fix: tts base url runtime read (#3341) (thanks @hclsys)

This commit is contained in:
Shakker 2026-01-28 23:30:29 +00:00
parent a7534dc223
commit 67f1402703
2 changed files with 16 additions and 7 deletions

View File

@ -77,6 +77,7 @@ Status: beta.
- Agents: prevent retries on oversized image errors and surface size limits. (#2871) Thanks @Suksham-sharma. - Agents: prevent retries on oversized image errors and surface size limits. (#2871) Thanks @Suksham-sharma.
- Agents: inherit provider baseUrl/api for inline models. (#2740) Thanks @lploc94. - Agents: inherit provider baseUrl/api for inline models. (#2740) Thanks @lploc94.
- Memory Search: keep auto provider model defaults and only include remote when configured. (#2576) Thanks @papago2355. - Memory Search: keep auto provider model defaults and only include remote when configured. (#2576) Thanks @papago2355.
- TTS: read OPENAI_TTS_BASE_URL at runtime instead of module load to honor config.env. (#3341) Thanks @hclsys.
- macOS: auto-scroll to bottom when sending a new message while scrolled up. (#2471) Thanks @kennyklee. - macOS: auto-scroll to bottom when sending a new message while scrolled up. (#2471) Thanks @kennyklee.
- Web UI: auto-expand the chat compose textarea while typing (with sensible max height). (#2950) Thanks @shivamraut101. - Web UI: auto-expand the chat compose textarea while typing (with sensible max height). (#2950) Thanks @shivamraut101.
- Gateway: prevent crashes on transient network errors (fetch failures, timeouts, DNS). Added fatal error detection to only exit on truly critical errors. Fixes #2895, #2879, #2873. (#2980) Thanks @elliotsecops. - Gateway: prevent crashes on transient network errors (fetch failures, timeouts, DNS). Added fatal error detection to only exit on truly critical errors. Fixes #2895, #2879, #2873. (#2980) Thanks @elliotsecops.

View File

@ -757,11 +757,19 @@ export const OPENAI_TTS_MODELS = ["gpt-4o-mini-tts", "tts-1", "tts-1-hd"] as con
* Custom OpenAI-compatible TTS endpoint. * Custom OpenAI-compatible TTS endpoint.
* When set, model/voice validation is relaxed to allow non-OpenAI models. * When set, model/voice validation is relaxed to allow non-OpenAI models.
* Example: OPENAI_TTS_BASE_URL=http://localhost:8880/v1 * Example: OPENAI_TTS_BASE_URL=http://localhost:8880/v1
*
* Note: Read at runtime (not module load) to support config.env loading.
*/ */
const OPENAI_TTS_BASE_URL = ( function getOpenAITtsBaseUrl(): string {
process.env.OPENAI_TTS_BASE_URL?.trim() || "https://api.openai.com/v1" return (process.env.OPENAI_TTS_BASE_URL?.trim() || "https://api.openai.com/v1").replace(
).replace(/\/+$/, ""); /\/+$/,
const isCustomOpenAIEndpoint = OPENAI_TTS_BASE_URL !== "https://api.openai.com/v1"; "",
);
}
function isCustomOpenAIEndpoint(): boolean {
return getOpenAITtsBaseUrl() !== "https://api.openai.com/v1";
}
export const OPENAI_TTS_VOICES = [ export const OPENAI_TTS_VOICES = [
"alloy", "alloy",
"ash", "ash",
@ -778,13 +786,13 @@ type OpenAiTtsVoice = (typeof OPENAI_TTS_VOICES)[number];
function isValidOpenAIModel(model: string): boolean { function isValidOpenAIModel(model: string): boolean {
// Allow any model when using custom endpoint (e.g., Kokoro, LocalAI) // Allow any model when using custom endpoint (e.g., Kokoro, LocalAI)
if (isCustomOpenAIEndpoint) return true; if (isCustomOpenAIEndpoint()) return true;
return OPENAI_TTS_MODELS.includes(model as (typeof OPENAI_TTS_MODELS)[number]); return OPENAI_TTS_MODELS.includes(model as (typeof OPENAI_TTS_MODELS)[number]);
} }
function isValidOpenAIVoice(voice: string): voice is OpenAiTtsVoice { function isValidOpenAIVoice(voice: string): voice is OpenAiTtsVoice {
// Allow any voice when using custom endpoint (e.g., Kokoro Chinese voices) // Allow any voice when using custom endpoint (e.g., Kokoro Chinese voices)
if (isCustomOpenAIEndpoint) return true; if (isCustomOpenAIEndpoint()) return true;
return OPENAI_TTS_VOICES.includes(voice as OpenAiTtsVoice); return OPENAI_TTS_VOICES.includes(voice as OpenAiTtsVoice);
} }
@ -1011,7 +1019,7 @@ async function openaiTTS(params: {
const timeout = setTimeout(() => controller.abort(), timeoutMs); const timeout = setTimeout(() => controller.abort(), timeoutMs);
try { try {
const response = await fetch(`${OPENAI_TTS_BASE_URL}/audio/speech`, { const response = await fetch(`${getOpenAITtsBaseUrl()}/audio/speech`, {
method: "POST", method: "POST",
headers: { headers: {
Authorization: `Bearer ${apiKey}`, Authorization: `Bearer ${apiKey}`,