server: port: 8080 host: 0.0.0.0 whisper: length_ms: 5000 keep_ms: 200 step_ms: 500 model: "models/ggml-large-v3.bin" # model: "models/ggml-base.bin" # model: "models/ggml-medium.en.bin" max_prompt_tokens: 32 context_confidence_threshold: 0.5 params: # n_threads: 8 # max_tokens: 0 # audio_ctx: 0 speed_up: false single_segment: true translate: false # temperature_inc: 0.2 #0.4 # temperature_inc: 0 # entropy_threshold: 2.5 # entropy_threshold: 2.8 n_max_text_ctx: 0 #16384 print_special: false print_progress: false print_realtime: false token_timestamps: false no_context: false no_timestamps: false suppress_non_speech_tokens: false tinydiarize: false language: "en"