From 8c9d022a8892decd4e737375f2aec78ff076b0c2 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 24 Jan 2026 22:35:17 +0000 Subject: [PATCH] feat: add Ollama provider discovery parity (#1606) (thanks @abhaymundhara) --- CHANGELOG.md | 1 + docs/concepts/model-providers.md | 24 +++ docs/providers/index.md | 1 + docs/providers/ollama.md | 171 ++++++++++++++++++ src/agents/model-auth.ts | 1 + .../models-config.providers.ollama.test.ts | 106 +++++++++++ src/agents/models-config.providers.ts | 150 ++++++++++++++- src/agents/models-config.ts | 2 +- ...-runner.applygoogleturnorderingfix.test.ts | 2 +- ...ed-runner.buildembeddedsandboxinfo.test.ts | 2 +- ...-runner.createsystempromptoverride.test.ts | 2 +- ...-undefined-sessionkey-is-undefined.test.ts | 2 +- ...-embedded-runner.limithistoryturns.test.ts | 2 +- ...dded-runner.resolvesessionagentids.test.ts | 2 +- .../pi-embedded-runner.splitsdktools.test.ts | 2 +- src/agents/pi-embedded-runner.test.ts | 2 +- test/setup.ts | 3 + 17 files changed, 465 insertions(+), 10 deletions(-) create mode 100644 docs/providers/ollama.md create mode 100644 src/agents/models-config.providers.ollama.test.ts diff --git a/CHANGELOG.md b/CHANGELOG.md index 36c20af76..b5806d304 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Docs: https://docs.clawd.bot - Docs: update Fly.io guide notes. - Docs: add Bedrock EC2 instance role setup + IAM steps. (#1625) Thanks @sergical. https://docs.clawd.bot/bedrock - Exec approvals: forward approval prompts to chat with `/approve` for all channels (including plugins). (#1621) Thanks @czekaj. https://docs.clawd.bot/tools/exec-approvals https://docs.clawd.bot/tools/slash-commands +- Models: add Ollama provider discovery + docs. (#1606) Thanks @abhaymundhara. https://docs.clawd.bot/providers/ollama ### Fixes - Web UI: hide internal `message_id` hints in chat bubbles. diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index fd21e4a57..9b5f31149 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -236,6 +236,30 @@ MiniMax is configured via `models.providers` because it uses custom endpoints: See [/providers/minimax](/providers/minimax) for setup details, model options, and config snippets. +### Ollama + +Ollama is a local LLM runtime that provides an OpenAI-compatible API: + +- Provider: `ollama` +- Auth: `OLLAMA_API_KEY` (any value; Ollama runs locally) +- Example model: `ollama/llama3.3` +- Installation: https://ollama.ai + +```bash +# Install Ollama, then pull a model: +ollama pull llama3.3 +``` + +```json5 +{ + agents: { + defaults: { model: { primary: "ollama/llama3.3" } } + } +} +``` + +Ollama is auto-discovered when `OLLAMA_API_KEY` (or an auth profile) is set and no explicit `models.providers.ollama` entry exists. Discovery probes `http://127.0.0.1:11434` and filters to tool-capable models. See [/providers/ollama](/providers/ollama) for model recommendations and custom configuration. + ### Local proxies (LM Studio, vLLM, LiteLLM, etc.) Example (OpenAI‑compatible): diff --git a/docs/providers/index.md b/docs/providers/index.md index 6f66fe726..e7d4b9260 100644 --- a/docs/providers/index.md +++ b/docs/providers/index.md @@ -35,6 +35,7 @@ Looking for chat channel docs (WhatsApp/Telegram/Discord/Slack/Mattermost (plugi - [Z.AI](/providers/zai) - [GLM models](/providers/glm) - [MiniMax](/providers/minimax) +- [Ollama (local models)](/providers/ollama) ## Transcription providers diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md new file mode 100644 index 000000000..cdc0d0622 --- /dev/null +++ b/docs/providers/ollama.md @@ -0,0 +1,171 @@ +--- +summary: "Run Clawdbot with Ollama (local LLM runtime)" +read_when: + - You want to run Clawdbot with local models via Ollama + - You need Ollama setup and configuration guidance +--- +# Ollama + +Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. Clawdbot integrates with Ollama's OpenAI-compatible API and can **auto-discover tool-capable models** when enabled via `OLLAMA_API_KEY` (or an auth profile) and no explicit `models.providers.ollama` config is set. + +## Quick start + +1) Install Ollama: https://ollama.ai + +2) Pull a model: + +```bash +ollama pull llama3.3 +# or +ollama pull qwen2.5-coder:32b +# or +ollama pull deepseek-r1:32b +``` + +3) Enable Ollama for Clawdbot (any value works; Ollama doesn't require a real key): + +```bash +# Set environment variable +export OLLAMA_API_KEY="ollama-local" + +# Or configure in your config file +clawdbot config set models.providers.ollama.apiKey "ollama-local" +``` + +4) Use Ollama models: + +```json5 +{ + agents: { + defaults: { + model: { primary: "ollama/llama3.3" } + } + } +} +``` + +## Model Discovery + +When Ollama is enabled via `OLLAMA_API_KEY` (or an auth profile) and no explicit `models.providers.ollama` entry exists, Clawdbot automatically detects models installed on your Ollama instance by querying `/api/tags` and `/api/show` at `http://localhost:11434`. It only keeps models that report tool support, so you don't need to manually configure them. + +To see what models are available: + +```bash +ollama list +clawdbot models list +``` + +To add a new model, simply pull it with Ollama: + +```bash +ollama pull mistral +``` + +The new model will be automatically discovered and available to use. + +If you set `models.providers.ollama` explicitly, auto-discovery is skipped. Define your models manually in that case. + +## Configuration + +### Basic Setup + +The simplest way to enable Ollama is via environment variable: + +```bash +export OLLAMA_API_KEY="ollama-local" +``` + +### Custom Base URL + +If Ollama is running on a different host or port (note: explicit config skips auto-discovery, so define models manually): + +```json5 +{ + models: { + providers: { + ollama: { + apiKey: "ollama-local", + baseUrl: "http://192.168.1.100:11434/v1" + } + } + } +} +``` + +### Model Selection + +Once configured, all your Ollama models are available: + +```json5 +{ + agents: { + defaults: { + model: { + primary: "ollama/llama3.3", + fallback: ["ollama/qwen2.5-coder:32b"] + } + } + } +} +``` + +## Advanced + +### Reasoning Models + +Models with "r1" or "reasoning" in their name are automatically detected as reasoning models and will use extended thinking features: + +```bash +ollama pull deepseek-r1:32b +``` + +### Model Costs + +Ollama is free and runs locally, so all model costs are set to $0. + +### Context Windows + +Ollama models use default context windows. You can customize these in your provider configuration if needed. + +## Troubleshooting + +### Ollama not detected + +Make sure Ollama is running: + +```bash +ollama serve +``` + +And that the API is accessible: + +```bash +curl http://localhost:11434/api/tags +``` + +### No models available + +Pull at least one model: + +```bash +ollama list # See what's installed +ollama pull llama3.3 # Pull a model +``` + +### Connection refused + +Check that Ollama is running on the correct port: + +```bash +# Check if Ollama is running +ps aux | grep ollama + +# Or restart Ollama +ollama serve +``` + +## See Also + +- [Model Providers](/concepts/model-providers) - Overview of all providers +- [Model Selection](/agents/model-selection) - How to choose models +- [Configuration](/configuration) - Full config reference diff --git a/src/agents/model-auth.ts b/src/agents/model-auth.ts index dbad539ee..4fff8ff59 100644 --- a/src/agents/model-auth.ts +++ b/src/agents/model-auth.ts @@ -284,6 +284,7 @@ export function resolveEnvApiKey(provider: string): EnvApiKeyResult | null { synthetic: "SYNTHETIC_API_KEY", mistral: "MISTRAL_API_KEY", opencode: "OPENCODE_API_KEY", + ollama: "OLLAMA_API_KEY", }; const envVar = envMap[normalized]; if (!envVar) return null; diff --git a/src/agents/models-config.providers.ollama.test.ts b/src/agents/models-config.providers.ollama.test.ts new file mode 100644 index 000000000..c06657ad5 --- /dev/null +++ b/src/agents/models-config.providers.ollama.test.ts @@ -0,0 +1,106 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { resolveImplicitProviders } from "./models-config.providers.js"; +import { mkdtempSync } from "node:fs"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; + +describe("Ollama provider", () => { + const previousEnv = { ...process.env }; + + afterEach(() => { + for (const key of Object.keys(process.env)) { + if (!(key in previousEnv)) delete process.env[key]; + } + for (const [key, value] of Object.entries(previousEnv)) { + process.env[key] = value; + } + vi.restoreAllMocks(); + vi.unstubAllGlobals(); + }); + + it("should not include ollama when no API key is configured", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "clawd-test-")); + const providers = await resolveImplicitProviders({ agentDir }); + + // Ollama requires explicit configuration via OLLAMA_API_KEY env var or profile + expect(providers?.ollama).toBeUndefined(); + }); + + it("discovers tool-capable models when OLLAMA_API_KEY is set", async () => { + process.env.OLLAMA_API_KEY = "ollama-local"; + delete process.env.VITEST; + process.env.NODE_ENV = "development"; + + const fetchMock = vi + .fn() + .mockResolvedValueOnce({ + ok: true, + status: 200, + json: async () => ({ + models: [{ name: "llama3.3" }, { name: "no-tools-model" }], + }), + }) + .mockResolvedValueOnce({ + ok: true, + status: 200, + json: async () => ({ + capabilities: ["tools", "thinking"], + model_info: { + "general.architecture": "llama", + "llama.context_length": "4096", + }, + }), + }) + .mockResolvedValueOnce({ + ok: true, + status: 200, + json: async () => ({ + capabilities: ["thinking"], + model_info: { + "general.architecture": "llama", + "llama.context_length": "2048", + }, + }), + }); + vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + + const agentDir = mkdtempSync(join(tmpdir(), "clawd-test-")); + const providers = await resolveImplicitProviders({ agentDir }); + + expect(fetchMock).toHaveBeenCalledTimes(3); + expect(fetchMock.mock.calls[0]?.[0]).toBe("http://127.0.0.1:11434/api/tags"); + expect(fetchMock.mock.calls[1]?.[0]).toBe("http://127.0.0.1:11434/api/show"); + + const provider = providers?.ollama; + expect(provider?.baseUrl).toBe("http://127.0.0.1:11434/v1"); + expect(provider?.models).toHaveLength(1); + expect(provider?.models?.[0]?.id).toBe("llama3.3"); + expect(provider?.models?.[0]?.reasoning).toBe(true); + expect(provider?.models?.[0]?.contextWindow).toBe(4096); + expect(provider?.models?.[0]?.maxTokens).toBe(4096 * 10); + }); + + it("skips discovery when ollama is explicitly configured", async () => { + process.env.OLLAMA_API_KEY = "ollama-local"; + delete process.env.VITEST; + process.env.NODE_ENV = "development"; + + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + + const agentDir = mkdtempSync(join(tmpdir(), "clawd-test-")); + const providers = await resolveImplicitProviders({ + agentDir, + explicitProviders: { + ollama: { + baseUrl: "http://example.com/v1", + api: "openai-completions", + models: [], + }, + }, + }); + + expect(fetchMock).not.toHaveBeenCalled(); + expect(providers?.ollama).toBeUndefined(); + }); +}); diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index 0425324fa..31fb1006c 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -1,9 +1,11 @@ import type { ClawdbotConfig } from "../config/config.js"; +import type { ModelDefinitionConfig } from "../config/types.models.js"; import { DEFAULT_COPILOT_API_BASE_URL, resolveCopilotApiToken, } from "../providers/github-copilot-token.js"; import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js"; +import { normalizeProviderId } from "./model-selection.js"; import { resolveAwsSdkEnvVarName, resolveEnvApiKey } from "./model-auth.js"; import { discoverBedrockModels } from "./bedrock-discovery.js"; import { @@ -62,6 +64,127 @@ const QWEN_PORTAL_DEFAULT_COST = { cacheWrite: 0, }; +const OLLAMA_HOST_BASE_URL = "http://127.0.0.1:11434"; +const OLLAMA_DEFAULT_CONTEXT_WINDOW = 8192; +const OLLAMA_MAX_TOKENS_MULTIPLIER = 10; +const OLLAMA_DISCOVERY_TIMEOUT_MS = 5000; +const OLLAMA_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +interface OllamaModel { + name: string; + modified_at: string; + size: number; + digest: string; + details?: { + family?: string; + parameter_size?: string; + }; +} + +interface OllamaTagsResponse { + models: OllamaModel[]; +} + +interface OllamaShowResponse { + capabilities?: string[]; + model_info?: Record; +} + +function parseOllamaNumber(value: unknown): number | undefined { + if (typeof value === "number" && Number.isFinite(value)) return value; + if (typeof value === "string" && value.trim()) { + const parsed = Number(value); + if (Number.isFinite(parsed)) return parsed; + } + return undefined; +} + +function resolveOllamaContextWindow( + modelInfo: Record | undefined, +): number { + if (!modelInfo) return OLLAMA_DEFAULT_CONTEXT_WINDOW; + const architecture = String(modelInfo["general.architecture"] ?? "").trim(); + const contextKey = architecture ? `${architecture}.context_length` : ""; + const contextWindow = + (contextKey ? parseOllamaNumber(modelInfo[contextKey]) : undefined) ?? + parseOllamaNumber(modelInfo["context_length"]); + return contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW; +} + +function normalizeOllamaHostBaseUrl(baseUrl: string): string { + const trimmed = baseUrl.trim().replace(/\/+$/, ""); + return trimmed.endsWith("/v1") ? trimmed.slice(0, -3) : trimmed; +} + +async function discoverOllamaModels(baseUrl: string): Promise { + // Skip Ollama discovery in test environments + if (process.env.VITEST || process.env.NODE_ENV === "test") { + return []; + } + try { + const response = await fetch(`${baseUrl}/api/tags`, { + signal: AbortSignal.timeout(OLLAMA_DISCOVERY_TIMEOUT_MS), + }); + if (!response.ok) { + console.warn(`Failed to discover Ollama models: ${response.status}`); + return []; + } + const data = (await response.json()) as OllamaTagsResponse; + if (!data.models || data.models.length === 0) { + console.warn("No Ollama models found on local instance"); + return []; + } + const models = await Promise.all( + data.models.map(async (model) => { + try { + const detailsResponse = await fetch(`${baseUrl}/api/show`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ name: model.name }), + signal: AbortSignal.timeout(OLLAMA_DISCOVERY_TIMEOUT_MS), + }); + if (!detailsResponse.ok) { + console.warn( + `Failed to fetch Ollama model details for ${model.name}: ${detailsResponse.status}`, + ); + return null; + } + const details = (await detailsResponse.json()) as OllamaShowResponse; + const capabilities = Array.isArray(details.capabilities) ? details.capabilities : []; + if (!capabilities.includes("tools")) { + console.debug(`Skipping Ollama model ${model.name}: does not support tools`); + return null; + } + const contextWindow = resolveOllamaContextWindow(details.model_info); + return { + id: model.name, + name: model.name, + reasoning: capabilities.includes("thinking"), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow, + maxTokens: contextWindow * OLLAMA_MAX_TOKENS_MULTIPLIER, + }; + } catch (error) { + console.warn(`Failed to fetch Ollama model details for ${model.name}: ${String(error)}`); + return null; + } + }), + ); + return models.filter((model): model is ModelDefinitionConfig => Boolean(model)); + } catch (error) { + console.warn(`Failed to discover Ollama models: ${String(error)}`); + return []; + } +} + function normalizeApiKeyConfig(value: string): string { const trimmed = value.trim(); const match = /^\$\{([A-Z0-9_]+)\}$/.exec(trimmed); @@ -275,11 +398,28 @@ function buildSyntheticProvider(): ProviderConfig { }; } -export function resolveImplicitProviders(params: { agentDir: string }): ModelsConfig["providers"] { +async function buildOllamaProvider(baseUrl: string): Promise { + const hostBaseUrl = normalizeOllamaHostBaseUrl(baseUrl); + const models = await discoverOllamaModels(hostBaseUrl); + return { + baseUrl: `${hostBaseUrl}/v1`, + api: "openai-completions", + models, + }; +} + +export async function resolveImplicitProviders(params: { + agentDir: string; + explicitProviders?: ModelsConfig["providers"]; +}): Promise { const providers: Record = {}; const authStore = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false, }); + const explicitProviders = params.explicitProviders ?? {}; + const hasExplicitOllama = Object.keys(explicitProviders).some( + (key) => normalizeProviderId(key) === "ollama", + ); const minimaxKey = resolveEnvApiKeyVarName("minimax") ?? @@ -317,6 +457,14 @@ export function resolveImplicitProviders(params: { agentDir: string }): ModelsCo }; } + // Ollama provider - only add if explicitly configured + const ollamaKey = + resolveEnvApiKeyVarName("ollama") ?? + resolveApiKeyFromProfiles({ provider: "ollama", store: authStore }); + if (ollamaKey && !hasExplicitOllama) { + providers.ollama = { ...(await buildOllamaProvider(OLLAMA_HOST_BASE_URL)), apiKey: ollamaKey }; + } + return providers; } diff --git a/src/agents/models-config.ts b/src/agents/models-config.ts index 63fb63f3d..6f6caa7b6 100644 --- a/src/agents/models-config.ts +++ b/src/agents/models-config.ts @@ -80,7 +80,7 @@ export async function ensureClawdbotModelsJson( const agentDir = agentDirOverride?.trim() ? agentDirOverride.trim() : resolveClawdbotAgentDir(); const explicitProviders = (cfg.models?.providers ?? {}) as Record; - const implicitProviders = resolveImplicitProviders({ agentDir }); + const implicitProviders = await resolveImplicitProviders({ agentDir, explicitProviders }); const providers: Record = mergeProviders({ implicit: implicitProviders, explicit: explicitProviders, diff --git a/src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts b/src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts index 00fde9ccc..18df4e184 100644 --- a/src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts +++ b/src/agents/pi-embedded-runner.applygoogleturnorderingfix.test.ts @@ -72,7 +72,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts b/src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts index a19bd64b7..6602a8c20 100644 --- a/src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts +++ b/src/agents/pi-embedded-runner.buildembeddedsandboxinfo.test.ts @@ -71,7 +71,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.createsystempromptoverride.test.ts b/src/agents/pi-embedded-runner.createsystempromptoverride.test.ts index 92a261d2d..5d96dcb33 100644 --- a/src/agents/pi-embedded-runner.createsystempromptoverride.test.ts +++ b/src/agents/pi-embedded-runner.createsystempromptoverride.test.ts @@ -70,7 +70,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.get-dm-history-limit-from-session-key.returns-undefined-sessionkey-is-undefined.test.ts b/src/agents/pi-embedded-runner.get-dm-history-limit-from-session-key.returns-undefined-sessionkey-is-undefined.test.ts index 5d33ef490..4b6f082b1 100644 --- a/src/agents/pi-embedded-runner.get-dm-history-limit-from-session-key.returns-undefined-sessionkey-is-undefined.test.ts +++ b/src/agents/pi-embedded-runner.get-dm-history-limit-from-session-key.returns-undefined-sessionkey-is-undefined.test.ts @@ -70,7 +70,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.limithistoryturns.test.ts b/src/agents/pi-embedded-runner.limithistoryturns.test.ts index cf95f31b1..bbcf5e84c 100644 --- a/src/agents/pi-embedded-runner.limithistoryturns.test.ts +++ b/src/agents/pi-embedded-runner.limithistoryturns.test.ts @@ -71,7 +71,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.resolvesessionagentids.test.ts b/src/agents/pi-embedded-runner.resolvesessionagentids.test.ts index 3889ae976..4679d92d5 100644 --- a/src/agents/pi-embedded-runner.resolvesessionagentids.test.ts +++ b/src/agents/pi-embedded-runner.resolvesessionagentids.test.ts @@ -70,7 +70,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.splitsdktools.test.ts b/src/agents/pi-embedded-runner.splitsdktools.test.ts index 813cfe976..1446845d8 100644 --- a/src/agents/pi-embedded-runner.splitsdktools.test.ts +++ b/src/agents/pi-embedded-runner.splitsdktools.test.ts @@ -71,7 +71,7 @@ const _makeOpenAiConfig = (modelIds: string[]) => }) satisfies ClawdbotConfig; const _ensureModels = (cfg: ClawdbotConfig, agentDir: string) => - ensureClawdbotModelsJson(cfg, agentDir); + ensureClawdbotModelsJson(cfg, agentDir) as unknown; const _textFromContent = (content: unknown) => { if (typeof content === "string") return content; diff --git a/src/agents/pi-embedded-runner.test.ts b/src/agents/pi-embedded-runner.test.ts index 169d095a6..ee4b60c30 100644 --- a/src/agents/pi-embedded-runner.test.ts +++ b/src/agents/pi-embedded-runner.test.ts @@ -130,7 +130,7 @@ const makeOpenAiConfig = (modelIds: string[]) => }, }) satisfies ClawdbotConfig; -const ensureModels = (cfg: ClawdbotConfig) => ensureClawdbotModelsJson(cfg, agentDir); +const ensureModels = (cfg: ClawdbotConfig) => ensureClawdbotModelsJson(cfg, agentDir) as unknown; const nextSessionFile = () => { sessionCounter += 1; diff --git a/test/setup.ts b/test/setup.ts index b96e8d611..f2fc2756e 100644 --- a/test/setup.ts +++ b/test/setup.ts @@ -1,5 +1,8 @@ import { afterAll, afterEach, beforeEach, vi } from "vitest"; +// Ensure Vitest environment is properly set +process.env.VITEST = "true"; + import type { ChannelId, ChannelOutboundAdapter,