defmodule WhisperServer.Application do use Application def start(_type, _args) do args = parse_args(System.argv()) client = args[:client] || System.get_env("CLIENT") || "cuda" Application.put_env(:whisper_server, :client, String.to_atom(client)) Application.put_env(:whisper_server, :model_name, args[:model] || System.get_env("MODEL") || "openai/whisper-base") Application.put_env(:whisper_server, :batch_size, args[:batch_size] || String.to_integer(System.get_env("BATCH_SIZE") || "3")) Application.put_env(:whisper_server, :batch_timeout, args[:batch_timeout] || String.to_integer(System.get_env("BATCH_TIMEOUT") || "3000")) Application.put_env(:whisper_server, :port, args[:port] || String.to_integer(System.get_env("PORT") || "4000")) children = [ WhisperServer.WhisperInference, WhisperServer.Large, {Plug.Cowboy, scheme: :http, plug: WhisperServer, options: [port: Application.get_env(:whisper_server, :port)]} ] opts = [strategy: :one_for_one, name: WhisperServer.Supervisor] Supervisor.start_link(children, opts) end defp parse_args(argv) do {opts, _, _} = OptionParser.parse(argv, switches: [ batch_size: :integer, batch_timeout: :integer, client: :string, model: :string, port: :integer ], aliases: [ b: :batch_size, t: :batch_timeout, c: :client, m: :model, p: :port ] ) opts end end