Compare commits
5 Commits
dev
...
fix-id-iss
| Author | SHA1 | Date |
|---|---|---|
|
|
bab5b318da | |
|
|
4ce5ee161a | |
|
|
ce4b246cc0 | |
|
|
f64177e5ec | |
|
|
2522ae6e41 |
|
|
@ -16,7 +16,31 @@ function mimeToModality(mime: string): Modality | undefined {
|
||||||
}
|
}
|
||||||
|
|
||||||
export namespace ProviderTransform {
|
export namespace ProviderTransform {
|
||||||
function normalizeMessages(msgs: ModelMessage[], model: Provider.Model): ModelMessage[] {
|
function normalizeMessages(
|
||||||
|
msgs: ModelMessage[],
|
||||||
|
model: Provider.Model,
|
||||||
|
options: Record<string, unknown>,
|
||||||
|
): ModelMessage[] {
|
||||||
|
// Strip openai itemId metadata following what codex does
|
||||||
|
if (model.api.npm === "@ai-sdk/openai" || options.store === false) {
|
||||||
|
msgs = msgs.map((msg) => {
|
||||||
|
if (!Array.isArray(msg.content)) return msg
|
||||||
|
const content = msg.content.map((part) => {
|
||||||
|
if (!part.providerOptions?.openai) return part
|
||||||
|
const { itemId, reasoningEncryptedContent, ...rest } = part.providerOptions.openai as Record<string, unknown>
|
||||||
|
const openai = Object.keys(rest).length > 0 ? rest : undefined
|
||||||
|
return {
|
||||||
|
...part,
|
||||||
|
providerOptions: {
|
||||||
|
...part.providerOptions,
|
||||||
|
openai,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return { ...msg, content } as typeof msg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Anthropic rejects messages with empty content - filter out empty string messages
|
// Anthropic rejects messages with empty content - filter out empty string messages
|
||||||
// and remove empty text/reasoning parts from array content
|
// and remove empty text/reasoning parts from array content
|
||||||
if (model.api.npm === "@ai-sdk/anthropic") {
|
if (model.api.npm === "@ai-sdk/anthropic") {
|
||||||
|
|
@ -218,9 +242,9 @@ export namespace ProviderTransform {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
export function message(msgs: ModelMessage[], model: Provider.Model) {
|
export function message(msgs: ModelMessage[], model: Provider.Model, options: Record<string, unknown>) {
|
||||||
msgs = unsupportedParts(msgs, model)
|
msgs = unsupportedParts(msgs, model)
|
||||||
msgs = normalizeMessages(msgs, model)
|
msgs = normalizeMessages(msgs, model, options)
|
||||||
if (
|
if (
|
||||||
model.providerID === "anthropic" ||
|
model.providerID === "anthropic" ||
|
||||||
model.api.id.includes("anthropic") ||
|
model.api.id.includes("anthropic") ||
|
||||||
|
|
@ -453,64 +477,69 @@ export namespace ProviderTransform {
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function options(
|
export function options(input: {
|
||||||
model: Provider.Model,
|
model: Provider.Model
|
||||||
sessionID: string,
|
sessionID: string
|
||||||
providerOptions?: Record<string, any>,
|
providerOptions?: Record<string, any>
|
||||||
): Record<string, any> {
|
}): Record<string, any> {
|
||||||
const result: Record<string, any> = {}
|
const result: Record<string, any> = {}
|
||||||
|
|
||||||
if (model.api.npm === "@openrouter/ai-sdk-provider") {
|
// openai and providers using openai package should set store to false by default.
|
||||||
|
if (input.model.providerID === "openai" || input.model.api.npm === "@ai-sdk/openai") {
|
||||||
|
result["store"] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (input.model.api.npm === "@openrouter/ai-sdk-provider") {
|
||||||
result["usage"] = {
|
result["usage"] = {
|
||||||
include: true,
|
include: true,
|
||||||
}
|
}
|
||||||
if (model.api.id.includes("gemini-3")) {
|
if (input.model.api.id.includes("gemini-3")) {
|
||||||
result["reasoning"] = { effort: "high" }
|
result["reasoning"] = { effort: "high" }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
model.providerID === "baseten" ||
|
input.model.providerID === "baseten" ||
|
||||||
(model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(model.api.id))
|
(input.model.providerID === "opencode" && ["kimi-k2-thinking", "glm-4.6"].includes(input.model.api.id))
|
||||||
) {
|
) {
|
||||||
result["chat_template_args"] = { enable_thinking: true }
|
result["chat_template_args"] = { enable_thinking: true }
|
||||||
}
|
}
|
||||||
|
|
||||||
if (["zai", "zhipuai"].includes(model.providerID) && model.api.npm === "@ai-sdk/openai-compatible") {
|
if (["zai", "zhipuai"].includes(input.model.providerID) && input.model.api.npm === "@ai-sdk/openai-compatible") {
|
||||||
result["thinking"] = {
|
result["thinking"] = {
|
||||||
type: "enabled",
|
type: "enabled",
|
||||||
clear_thinking: false,
|
clear_thinking: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model.providerID === "openai" || providerOptions?.setCacheKey) {
|
if (input.model.providerID === "openai" || input.providerOptions?.setCacheKey) {
|
||||||
result["promptCacheKey"] = sessionID
|
result["promptCacheKey"] = input.sessionID
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model.api.npm === "@ai-sdk/google" || model.api.npm === "@ai-sdk/google-vertex") {
|
if (input.model.api.npm === "@ai-sdk/google" || input.model.api.npm === "@ai-sdk/google-vertex") {
|
||||||
result["thinkingConfig"] = {
|
result["thinkingConfig"] = {
|
||||||
includeThoughts: true,
|
includeThoughts: true,
|
||||||
}
|
}
|
||||||
if (model.api.id.includes("gemini-3")) {
|
if (input.model.api.id.includes("gemini-3")) {
|
||||||
result["thinkingConfig"]["thinkingLevel"] = "high"
|
result["thinkingConfig"]["thinkingLevel"] = "high"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model.api.id.includes("gpt-5") && !model.api.id.includes("gpt-5-chat")) {
|
if (input.model.api.id.includes("gpt-5") && !input.model.api.id.includes("gpt-5-chat")) {
|
||||||
if (model.providerID.includes("codex")) {
|
if (input.model.providerID.includes("codex")) {
|
||||||
result["store"] = false
|
result["store"] = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!model.api.id.includes("codex") && !model.api.id.includes("gpt-5-pro")) {
|
if (!input.model.api.id.includes("codex") && !input.model.api.id.includes("gpt-5-pro")) {
|
||||||
result["reasoningEffort"] = "medium"
|
result["reasoningEffort"] = "medium"
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model.api.id.endsWith("gpt-5.") && model.providerID !== "azure") {
|
if (input.model.api.id.endsWith("gpt-5.") && input.model.providerID !== "azure") {
|
||||||
result["textVerbosity"] = "low"
|
result["textVerbosity"] = "low"
|
||||||
}
|
}
|
||||||
|
|
||||||
if (model.providerID.startsWith("opencode")) {
|
if (input.model.providerID.startsWith("opencode")) {
|
||||||
result["promptCacheKey"] = sessionID
|
result["promptCacheKey"] = input.sessionID
|
||||||
result["include"] = ["reasoning.encrypted_content"]
|
result["include"] = ["reasoning.encrypted_content"]
|
||||||
result["reasoningSummary"] = "auto"
|
result["reasoningSummary"] = "auto"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,11 @@ export namespace LLM {
|
||||||
!input.small && input.model.variants && input.user.variant ? input.model.variants[input.user.variant] : {}
|
!input.small && input.model.variants && input.user.variant ? input.model.variants[input.user.variant] : {}
|
||||||
const base = input.small
|
const base = input.small
|
||||||
? ProviderTransform.smallOptions(input.model)
|
? ProviderTransform.smallOptions(input.model)
|
||||||
: ProviderTransform.options(input.model, input.sessionID, provider.options)
|
: ProviderTransform.options({
|
||||||
|
model: input.model,
|
||||||
|
sessionID: input.sessionID,
|
||||||
|
providerOptions: provider.options,
|
||||||
|
})
|
||||||
const options: Record<string, any> = pipe(
|
const options: Record<string, any> = pipe(
|
||||||
base,
|
base,
|
||||||
mergeDeep(input.model.options),
|
mergeDeep(input.model.options),
|
||||||
|
|
@ -104,7 +108,6 @@ export namespace LLM {
|
||||||
)
|
)
|
||||||
if (isCodex) {
|
if (isCodex) {
|
||||||
options.instructions = SystemPrompt.instructions()
|
options.instructions = SystemPrompt.instructions()
|
||||||
options.store = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const params = await Plugin.trigger(
|
const params = await Plugin.trigger(
|
||||||
|
|
@ -214,7 +217,7 @@ export namespace LLM {
|
||||||
async transformParams(args) {
|
async transformParams(args) {
|
||||||
if (args.type === "stream") {
|
if (args.type === "stream") {
|
||||||
// @ts-expect-error
|
// @ts-expect-error
|
||||||
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model)
|
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
|
||||||
}
|
}
|
||||||
return args.params
|
return args.params
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -39,22 +39,34 @@ describe("ProviderTransform.options - setCacheKey", () => {
|
||||||
} as any
|
} as any
|
||||||
|
|
||||||
test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
|
test("should set promptCacheKey when providerOptions.setCacheKey is true", () => {
|
||||||
const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: true })
|
const result = ProviderTransform.options({
|
||||||
|
model: mockModel,
|
||||||
|
sessionID,
|
||||||
|
providerOptions: { setCacheKey: true },
|
||||||
|
})
|
||||||
expect(result.promptCacheKey).toBe(sessionID)
|
expect(result.promptCacheKey).toBe(sessionID)
|
||||||
})
|
})
|
||||||
|
|
||||||
test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
|
test("should not set promptCacheKey when providerOptions.setCacheKey is false", () => {
|
||||||
const result = ProviderTransform.options(mockModel, sessionID, { setCacheKey: false })
|
const result = ProviderTransform.options({
|
||||||
|
model: mockModel,
|
||||||
|
sessionID,
|
||||||
|
providerOptions: { setCacheKey: false },
|
||||||
|
})
|
||||||
expect(result.promptCacheKey).toBeUndefined()
|
expect(result.promptCacheKey).toBeUndefined()
|
||||||
})
|
})
|
||||||
|
|
||||||
test("should not set promptCacheKey when providerOptions is undefined", () => {
|
test("should not set promptCacheKey when providerOptions is undefined", () => {
|
||||||
const result = ProviderTransform.options(mockModel, sessionID, undefined)
|
const result = ProviderTransform.options({
|
||||||
|
model: mockModel,
|
||||||
|
sessionID,
|
||||||
|
providerOptions: undefined,
|
||||||
|
})
|
||||||
expect(result.promptCacheKey).toBeUndefined()
|
expect(result.promptCacheKey).toBeUndefined()
|
||||||
})
|
})
|
||||||
|
|
||||||
test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
|
test("should not set promptCacheKey when providerOptions does not have setCacheKey", () => {
|
||||||
const result = ProviderTransform.options(mockModel, sessionID, {})
|
const result = ProviderTransform.options({ model: mockModel, sessionID, providerOptions: {} })
|
||||||
expect(result.promptCacheKey).toBeUndefined()
|
expect(result.promptCacheKey).toBeUndefined()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -68,9 +80,27 @@ describe("ProviderTransform.options - setCacheKey", () => {
|
||||||
npm: "@ai-sdk/openai",
|
npm: "@ai-sdk/openai",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
const result = ProviderTransform.options(openaiModel, sessionID, {})
|
const result = ProviderTransform.options({ model: openaiModel, sessionID, providerOptions: {} })
|
||||||
expect(result.promptCacheKey).toBe(sessionID)
|
expect(result.promptCacheKey).toBe(sessionID)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test("should set store=false for openai provider", () => {
|
||||||
|
const openaiModel = {
|
||||||
|
...mockModel,
|
||||||
|
providerID: "openai",
|
||||||
|
api: {
|
||||||
|
id: "gpt-4",
|
||||||
|
url: "https://api.openai.com",
|
||||||
|
npm: "@ai-sdk/openai",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
const result = ProviderTransform.options({
|
||||||
|
model: openaiModel,
|
||||||
|
sessionID,
|
||||||
|
providerOptions: {},
|
||||||
|
})
|
||||||
|
expect(result.store).toBe(false)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe("ProviderTransform.maxOutputTokens", () => {
|
describe("ProviderTransform.maxOutputTokens", () => {
|
||||||
|
|
@ -208,40 +238,44 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, {
|
const result = ProviderTransform.message(
|
||||||
id: "deepseek/deepseek-chat",
|
msgs,
|
||||||
providerID: "deepseek",
|
{
|
||||||
api: {
|
id: "deepseek/deepseek-chat",
|
||||||
id: "deepseek-chat",
|
providerID: "deepseek",
|
||||||
url: "https://api.deepseek.com",
|
api: {
|
||||||
npm: "@ai-sdk/openai-compatible",
|
id: "deepseek-chat",
|
||||||
},
|
url: "https://api.deepseek.com",
|
||||||
name: "DeepSeek Chat",
|
npm: "@ai-sdk/openai-compatible",
|
||||||
capabilities: {
|
|
||||||
temperature: true,
|
|
||||||
reasoning: true,
|
|
||||||
attachment: false,
|
|
||||||
toolcall: true,
|
|
||||||
input: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
||||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
||||||
interleaved: {
|
|
||||||
field: "reasoning_content",
|
|
||||||
},
|
},
|
||||||
|
name: "DeepSeek Chat",
|
||||||
|
capabilities: {
|
||||||
|
temperature: true,
|
||||||
|
reasoning: true,
|
||||||
|
attachment: false,
|
||||||
|
toolcall: true,
|
||||||
|
input: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||||
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||||
|
interleaved: {
|
||||||
|
field: "reasoning_content",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
cost: {
|
||||||
|
input: 0.001,
|
||||||
|
output: 0.002,
|
||||||
|
cache: { read: 0.0001, write: 0.0002 },
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
context: 128000,
|
||||||
|
output: 8192,
|
||||||
|
},
|
||||||
|
status: "active",
|
||||||
|
options: {},
|
||||||
|
headers: {},
|
||||||
|
release_date: "2023-04-01",
|
||||||
},
|
},
|
||||||
cost: {
|
{},
|
||||||
input: 0.001,
|
)
|
||||||
output: 0.002,
|
|
||||||
cache: { read: 0.0001, write: 0.0002 },
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
context: 128000,
|
|
||||||
output: 8192,
|
|
||||||
},
|
|
||||||
status: "active",
|
|
||||||
options: {},
|
|
||||||
headers: {},
|
|
||||||
release_date: "2023-04-01",
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toEqual([
|
expect(result[0].content).toEqual([
|
||||||
|
|
@ -266,38 +300,42 @@ describe("ProviderTransform.message - DeepSeek reasoning content", () => {
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, {
|
const result = ProviderTransform.message(
|
||||||
id: "openai/gpt-4",
|
msgs,
|
||||||
providerID: "openai",
|
{
|
||||||
api: {
|
id: "openai/gpt-4",
|
||||||
id: "gpt-4",
|
providerID: "openai",
|
||||||
url: "https://api.openai.com",
|
api: {
|
||||||
npm: "@ai-sdk/openai",
|
id: "gpt-4",
|
||||||
|
url: "https://api.openai.com",
|
||||||
|
npm: "@ai-sdk/openai",
|
||||||
|
},
|
||||||
|
name: "GPT-4",
|
||||||
|
capabilities: {
|
||||||
|
temperature: true,
|
||||||
|
reasoning: false,
|
||||||
|
attachment: true,
|
||||||
|
toolcall: true,
|
||||||
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
||||||
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||||
|
interleaved: false,
|
||||||
|
},
|
||||||
|
cost: {
|
||||||
|
input: 0.03,
|
||||||
|
output: 0.06,
|
||||||
|
cache: { read: 0.001, write: 0.002 },
|
||||||
|
},
|
||||||
|
limit: {
|
||||||
|
context: 128000,
|
||||||
|
output: 4096,
|
||||||
|
},
|
||||||
|
status: "active",
|
||||||
|
options: {},
|
||||||
|
headers: {},
|
||||||
|
release_date: "2023-04-01",
|
||||||
},
|
},
|
||||||
name: "GPT-4",
|
{},
|
||||||
capabilities: {
|
)
|
||||||
temperature: true,
|
|
||||||
reasoning: false,
|
|
||||||
attachment: true,
|
|
||||||
toolcall: true,
|
|
||||||
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
|
||||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
|
||||||
interleaved: false,
|
|
||||||
},
|
|
||||||
cost: {
|
|
||||||
input: 0.03,
|
|
||||||
output: 0.06,
|
|
||||||
cache: { read: 0.001, write: 0.002 },
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
context: 128000,
|
|
||||||
output: 4096,
|
|
||||||
},
|
|
||||||
status: "active",
|
|
||||||
options: {},
|
|
||||||
headers: {},
|
|
||||||
release_date: "2023-04-01",
|
|
||||||
})
|
|
||||||
|
|
||||||
expect(result[0].content).toEqual([
|
expect(result[0].content).toEqual([
|
||||||
{ type: "reasoning", text: "Should not be processed" },
|
{ type: "reasoning", text: "Should not be processed" },
|
||||||
|
|
@ -351,7 +389,7 @@ describe("ProviderTransform.message - empty image handling", () => {
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, mockModel)
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(2)
|
expect(result[0].content).toHaveLength(2)
|
||||||
|
|
@ -375,7 +413,7 @@ describe("ProviderTransform.message - empty image handling", () => {
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, mockModel)
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(2)
|
expect(result[0].content).toHaveLength(2)
|
||||||
|
|
@ -397,7 +435,7 @@ describe("ProviderTransform.message - empty image handling", () => {
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, mockModel)
|
const result = ProviderTransform.message(msgs, mockModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(3)
|
expect(result[0].content).toHaveLength(3)
|
||||||
|
|
@ -450,7 +488,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
{ role: "user", content: "World" },
|
{ role: "user", content: "World" },
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(2)
|
expect(result).toHaveLength(2)
|
||||||
expect(result[0].content).toBe("Hello")
|
expect(result[0].content).toBe("Hello")
|
||||||
|
|
@ -469,7 +507,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(1)
|
expect(result[0].content).toHaveLength(1)
|
||||||
|
|
@ -488,7 +526,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(1)
|
expect(result[0].content).toHaveLength(1)
|
||||||
|
|
@ -508,7 +546,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
{ role: "user", content: "World" },
|
{ role: "user", content: "World" },
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(2)
|
expect(result).toHaveLength(2)
|
||||||
expect(result[0].content).toBe("Hello")
|
expect(result[0].content).toBe("Hello")
|
||||||
|
|
@ -526,7 +564,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(1)
|
expect(result[0].content).toHaveLength(1)
|
||||||
|
|
@ -550,7 +588,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, anthropicModel)
|
const result = ProviderTransform.message(msgs, anthropicModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(1)
|
expect(result).toHaveLength(1)
|
||||||
expect(result[0].content).toHaveLength(2)
|
expect(result[0].content).toHaveLength(2)
|
||||||
|
|
@ -577,7 +615,7 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
},
|
},
|
||||||
] as any[]
|
] as any[]
|
||||||
|
|
||||||
const result = ProviderTransform.message(msgs, openaiModel)
|
const result = ProviderTransform.message(msgs, openaiModel, {})
|
||||||
|
|
||||||
expect(result).toHaveLength(2)
|
expect(result).toHaveLength(2)
|
||||||
expect(result[0].content).toBe("")
|
expect(result[0].content).toBe("")
|
||||||
|
|
@ -585,6 +623,223 @@ describe("ProviderTransform.message - anthropic empty content filtering", () =>
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe("ProviderTransform.message - strip openai metadata when store=false", () => {
|
||||||
|
const openaiModel = {
|
||||||
|
id: "openai/gpt-5",
|
||||||
|
providerID: "openai",
|
||||||
|
api: {
|
||||||
|
id: "gpt-5",
|
||||||
|
url: "https://api.openai.com",
|
||||||
|
npm: "@ai-sdk/openai",
|
||||||
|
},
|
||||||
|
name: "GPT-5",
|
||||||
|
capabilities: {
|
||||||
|
temperature: true,
|
||||||
|
reasoning: true,
|
||||||
|
attachment: true,
|
||||||
|
toolcall: true,
|
||||||
|
input: { text: true, audio: false, image: true, video: false, pdf: false },
|
||||||
|
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||||
|
interleaved: false,
|
||||||
|
},
|
||||||
|
cost: { input: 0.03, output: 0.06, cache: { read: 0.001, write: 0.002 } },
|
||||||
|
limit: { context: 128000, output: 4096 },
|
||||||
|
status: "active",
|
||||||
|
options: {},
|
||||||
|
headers: {},
|
||||||
|
} as any
|
||||||
|
|
||||||
|
test("strips itemId and reasoningEncryptedContent when store=false", () => {
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "reasoning",
|
||||||
|
text: "thinking...",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "rs_123",
|
||||||
|
reasoningEncryptedContent: "encrypted",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_456",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
|
||||||
|
|
||||||
|
expect(result).toHaveLength(1)
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.reasoningEncryptedContent).toBeUndefined()
|
||||||
|
expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
test("strips itemId and reasoningEncryptedContent when store=false even when not openai", () => {
|
||||||
|
const zenModel = {
|
||||||
|
...openaiModel,
|
||||||
|
providerID: "zen",
|
||||||
|
}
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "reasoning",
|
||||||
|
text: "thinking...",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "rs_123",
|
||||||
|
reasoningEncryptedContent: "encrypted",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_456",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
const result = ProviderTransform.message(msgs, zenModel, { store: false }) as any[]
|
||||||
|
|
||||||
|
expect(result).toHaveLength(1)
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.reasoningEncryptedContent).toBeUndefined()
|
||||||
|
expect(result[0].content[1].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
test("preserves other openai options when stripping itemId", () => {
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_123",
|
||||||
|
otherOption: "value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
const result = ProviderTransform.message(msgs, openaiModel, { store: false }) as any[]
|
||||||
|
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.otherOption).toBe("value")
|
||||||
|
})
|
||||||
|
|
||||||
|
test("strips metadata for openai package even when store is true", () => {
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
// openai package always strips itemId regardless of store value
|
||||||
|
const result = ProviderTransform.message(msgs, openaiModel, { store: true }) as any[]
|
||||||
|
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
test("strips metadata for non-openai packages when store is false", () => {
|
||||||
|
const anthropicModel = {
|
||||||
|
...openaiModel,
|
||||||
|
providerID: "anthropic",
|
||||||
|
api: {
|
||||||
|
id: "claude-3",
|
||||||
|
url: "https://api.anthropic.com",
|
||||||
|
npm: "@ai-sdk/anthropic",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
// store=false triggers stripping even for non-openai packages
|
||||||
|
const result = ProviderTransform.message(msgs, anthropicModel, { store: false }) as any[]
|
||||||
|
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
test("does not strip metadata for non-openai packages when store is not false", () => {
|
||||||
|
const anthropicModel = {
|
||||||
|
...openaiModel,
|
||||||
|
providerID: "anthropic",
|
||||||
|
api: {
|
||||||
|
id: "claude-3",
|
||||||
|
url: "https://api.anthropic.com",
|
||||||
|
npm: "@ai-sdk/anthropic",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
const msgs = [
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: "Hello",
|
||||||
|
providerOptions: {
|
||||||
|
openai: {
|
||||||
|
itemId: "msg_123",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
] as any[]
|
||||||
|
|
||||||
|
const result = ProviderTransform.message(msgs, anthropicModel, {}) as any[]
|
||||||
|
|
||||||
|
expect(result[0].content[0].providerOptions?.openai?.itemId).toBe("msg_123")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
describe("ProviderTransform.variants", () => {
|
describe("ProviderTransform.variants", () => {
|
||||||
const createMockModel = (overrides: Partial<any> = {}): any => ({
|
const createMockModel = (overrides: Partial<any> = {}): any => ({
|
||||||
id: "test/test-model",
|
id: "test/test-model",
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue