diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 3f433e0eb3..e49aaf875b 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -17,7 +17,31 @@ function mimeToModality(mime: string): Modality | undefined { } export namespace ProviderTransform { - function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] { + function normalizeMessages( + msgs: ModelMessage[], + model: Provider.Model, + options: Record, + ): ModelMessage[] { + // Strip openai itemId metadata when store=false to avoid sending stale IDs + if (model.api.npm === "@ai-sdk/openai" && options.store === false) { + msgs = msgs.map((msg) => { + if (!Array.isArray(msg.content)) return msg + const content = msg.content.map((part) => { + if (!part.providerOptions?.openai) return part + const { itemId, reasoningEncryptedContent, ...rest } = part.providerOptions.openai as Record + const openai = Object.keys(rest).length > 0 ? rest : undefined + return { + ...part, + providerOptions: { + ...part.providerOptions, + openai, + }, + } + }) + return { ...msg, content } as typeof msg + }) + } + // Anthropic rejects messages with empty content - filter out empty string messages // and remove empty text/reasoning parts from array content if (model.api.npm === "@ai-sdk/anthropic") { @@ -219,9 +243,9 @@ export namespace ProviderTransform { }) } - export function message(msgs: ModelMessage[], model: Provider.Model) { + export function message(msgs: ModelMessage[], model: Provider.Model, options: Record) { msgs = unsupportedParts(msgs, model) - msgs = normalizeMessages(msgs, model) + msgs = normalizeMessages(msgs, model, options) if ( model.providerID === "anthropic" || model.api.id.includes("anthropic") || diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index c8852edad0..7ad856bf0d 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -218,7 +218,7 @@ export namespace LLM { async transformParams(args) { if (args.type === "stream") { // @ts-expect-error - args.params.prompt = ProviderTransform.message(args.params.prompt, input.model) + args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options) } return args.params }, diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts index 61da69332e..6a28d77c93 100644 --- a/packages/opencode/test/provider/transform.test.ts +++ b/packages/opencode/test/provider/transform.test.ts @@ -261,40 +261,44 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, { - id: "deepseek/deepseek-chat", - providerID: "deepseek", - api: { - id: "deepseek-chat", - url: "https://api.deepseek.com", - npm: "@ai-sdk/openai-compatible", - }, - name: "DeepSeek Chat", - capabilities: { - temperature: true, - reasoning: true, - attachment: false, - toolcall: true, - input: { text: true, audio: false, image: false, video: false, pdf: false }, - output: { text: true, audio: false, image: false, video: false, pdf: false }, - interleaved: { - field: "reasoning_content", + const result = ProviderTransform.message( + msgs, + { + id: "deepseek/deepseek-chat", + providerID: "deepseek", + api: { + id: "deepseek-chat", + url: "https://api.deepseek.com", + npm: "@ai-sdk/openai-compatible", }, + name: "DeepSeek Chat", + capabilities: { + temperature: true, + reasoning: true, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: { + field: "reasoning_content", + }, + }, + cost: { + input: 0.001, + output: 0.002, + cache: { read: 0.0001, write: 0.0002 }, + }, + limit: { + context: 128000, + output: 8192, + }, + status: "active", + options: {}, + headers: {}, + release_date: "2023-04-01", }, - cost: { - input: 0.001, - output: 0.002, - cache: { read: 0.0001, write: 0.0002 }, - }, - limit: { - context: 128000, - output: 8192, - }, - status: "active", - options: {}, - headers: {}, - release_date: "2023-04-01", - }) + {}, + ) expect(result).toHaveLength(1) expect(result[0].content).toEqual([ @@ -319,38 +323,42 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, { - id: "openai/gpt-4", - providerID: "openai", - api: { - id: "gpt-4", - url: "https://api.openai.com", - npm: "@ai-sdk/openai", + const result = ProviderTransform.message( + msgs, + { + id: "openai/gpt-4", + providerID: "openai", + api: { + id: "gpt-4", + url: "https://api.openai.com", + npm: "@ai-sdk/openai", + }, + name: "GPT-4", + capabilities: { + temperature: true, + reasoning: false, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { + input: 0.03, + output: 0.06, + cache: { read: 0.001, write: 0.002 }, + }, + limit: { + context: 128000, + output: 4096, + }, + status: "active", + options: {}, + headers: {}, + release_date: "2023-04-01", }, - name: "GPT-4", - capabilities: { - temperature: true, - reasoning: false, - attachment: true, - toolcall: true, - input: { text: true, audio: false, image: true, video: false, pdf: false }, - output: { text: true, audio: false, image: false, video: false, pdf: false }, - interleaved: false, - }, - cost: { - input: 0.03, - output: 0.06, - cache: { read: 0.001, write: 0.002 }, - }, - limit: { - context: 128000, - output: 4096, - }, - status: "active", - options: {}, - headers: {}, - release_date: "2023-04-01", - }) + {}, + ) expect(result[0].content).toEqual([ { type: "reasoning", text: "Should not be processed" }, @@ -404,7 +412,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel) + const result = ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -428,7 +436,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel) + const result = ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -450,7 +458,7 @@ describe("ProviderTransform.message - empty image handling", () => { }, ] as any[] - const result = ProviderTransform.message(msgs, mockModel) + const result = ProviderTransform.message(msgs, mockModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(3) @@ -503,7 +511,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => { role: "user", content: "World" }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("Hello") @@ -522,7 +530,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) @@ -541,7 +549,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) @@ -561,7 +569,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => { role: "user", content: "World" }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("Hello") @@ -579,7 +587,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(1) @@ -603,7 +611,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, anthropicModel) + const result = ProviderTransform.message(msgs, anthropicModel, {}) expect(result).toHaveLength(1) expect(result[0].content).toHaveLength(2) @@ -630,7 +638,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }, ] as any[] - const result = ProviderTransform.message(msgs, openaiModel) + const result = ProviderTransform.message(msgs, openaiModel, {}) expect(result).toHaveLength(2) expect(result[0].content).toBe("") @@ -638,6 +646,189 @@ describe("ProviderTransform.message - anthropic empty content filtering", () => }) }) +describe("ProviderTransform.message - strip openai metadata when store=false", () => { + const openaiModel = { + id: "openai/gpt-5", + providerID: "openai", + api: { + id: "gpt-5", + url: "https://api.openai.com", + npm: "@ai-sdk/openai", + }, + name: "GPT-5", + capabilities: { + temperature: true, + reasoning: true, + attachment: true, + toolcall: true, + input: { text: true, audio: false, image: true, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } }, + limit: { context: 128000, output: 4096 }, + status: "active", + options: {}, + headers: {}, + } as any + + test("strips itemId and reasoningEncryptedContent when store=false", () => { + const msgs = [ + { + role: "assistant", + content: [ + { + type: "reasoning", + text: "thinking...", + providerOptions: { + openai: { + itemId: "rs_123", + reasoningEncryptedContent: "encrypted", + }, + }, + }, + { + type: "text", + text: "Hello", + providerOptions: { + openai: { + itemId: "msg_456", + }, + }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[] + + expect(result).toHaveLength(1) + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.openai?.reasoningEncryptedContent).toBeUndefined() + expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined() + }) + + test("strips itemId and reasoningEncryptedContent when store=false even when not openai", () => { + const zenModel = { + ...openaiModel, + providerID: "zen", + } + const msgs = [ + { + role: "assistant", + content: [ + { + type: "reasoning", + text: "thinking...", + providerOptions: { + openai: { + itemId: "rs_123", + reasoningEncryptedContent: "encrypted", + }, + }, + }, + { + type: "text", + text: "Hello", + providerOptions: { + openai: { + itemId: "msg_456", + }, + }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[] + + expect(result).toHaveLength(1) + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.openai?.reasoningEncryptedContent).toBeUndefined() + expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined() + }) + + test("preserves other openai options when stripping itemId", () => { + const msgs = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "Hello", + providerOptions: { + openai: { + itemId: "msg_123", + otherOption: "value", + }, + }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[] + + expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined() + expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value") + }) + + test("does not strip metadata when store is not false", () => { + const msgs = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "Hello", + providerOptions: { + openai: { + itemId: "msg_123", + }, + }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[] + + expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") + }) + + test("does not strip metadata for non-openai packages", () => { + const anthropicModel = { + ...openaiModel, + providerID: "anthropic", + api: { + id: "claude-3", + url: "https://api.anthropic.com", + npm: "@ai-sdk/anthropic", + }, + } + const msgs = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "Hello", + providerOptions: { + openai: { + itemId: "msg_123", + }, + }, + }, + ], + }, + ] as any[] + + const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[] + + expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123") + }) +}) + describe("ProviderTransform.variants", () => { const createMockModel = (overrides: Partial = {}): any => ({ id: "test/test-model",