feat: AI SDK v6 support (#18433)
parent
7a7643c86a
commit
c33d9996f0
246
bun.lock
246
bun.lock
|
|
@ -142,9 +142,9 @@
|
|||
"name": "@opencode-ai/console-function",
|
||||
"version": "1.3.3",
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "2.0.0",
|
||||
"@ai-sdk/openai": "2.0.2",
|
||||
"@ai-sdk/openai-compatible": "1.0.1",
|
||||
"@ai-sdk/anthropic": "3.0.64",
|
||||
"@ai-sdk/openai": "3.0.48",
|
||||
"@ai-sdk/openai-compatible": "2.0.37",
|
||||
"@hono/zod-validator": "catalog:",
|
||||
"@openauthjs/openauth": "0.0.0-20250322224806",
|
||||
"@opencode-ai/console-core": "workspace:*",
|
||||
|
|
@ -305,25 +305,25 @@
|
|||
"@actions/core": "1.11.1",
|
||||
"@actions/github": "6.0.1",
|
||||
"@agentclientprotocol/sdk": "0.14.1",
|
||||
"@ai-sdk/amazon-bedrock": "3.0.82",
|
||||
"@ai-sdk/anthropic": "2.0.65",
|
||||
"@ai-sdk/azure": "2.0.91",
|
||||
"@ai-sdk/cerebras": "1.0.36",
|
||||
"@ai-sdk/cohere": "2.0.22",
|
||||
"@ai-sdk/deepinfra": "1.0.36",
|
||||
"@ai-sdk/gateway": "2.0.30",
|
||||
"@ai-sdk/google": "2.0.54",
|
||||
"@ai-sdk/google-vertex": "3.0.106",
|
||||
"@ai-sdk/groq": "2.0.34",
|
||||
"@ai-sdk/mistral": "2.0.27",
|
||||
"@ai-sdk/openai": "2.0.89",
|
||||
"@ai-sdk/openai-compatible": "1.0.32",
|
||||
"@ai-sdk/perplexity": "2.0.23",
|
||||
"@ai-sdk/provider": "2.0.1",
|
||||
"@ai-sdk/provider-utils": "3.0.21",
|
||||
"@ai-sdk/togetherai": "1.0.34",
|
||||
"@ai-sdk/vercel": "1.0.33",
|
||||
"@ai-sdk/xai": "2.0.51",
|
||||
"@ai-sdk/amazon-bedrock": "4.0.83",
|
||||
"@ai-sdk/anthropic": "3.0.64",
|
||||
"@ai-sdk/azure": "3.0.49",
|
||||
"@ai-sdk/cerebras": "2.0.41",
|
||||
"@ai-sdk/cohere": "3.0.27",
|
||||
"@ai-sdk/deepinfra": "2.0.41",
|
||||
"@ai-sdk/gateway": "3.0.80",
|
||||
"@ai-sdk/google": "3.0.53",
|
||||
"@ai-sdk/google-vertex": "4.0.95",
|
||||
"@ai-sdk/groq": "3.0.31",
|
||||
"@ai-sdk/mistral": "3.0.27",
|
||||
"@ai-sdk/openai": "3.0.48",
|
||||
"@ai-sdk/openai-compatible": "2.0.37",
|
||||
"@ai-sdk/perplexity": "3.0.26",
|
||||
"@ai-sdk/provider": "3.0.8",
|
||||
"@ai-sdk/provider-utils": "4.0.21",
|
||||
"@ai-sdk/togetherai": "2.0.41",
|
||||
"@ai-sdk/vercel": "2.0.39",
|
||||
"@ai-sdk/xai": "3.0.74",
|
||||
"@aws-sdk/credential-providers": "3.993.0",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@effect/platform-node": "catalog:",
|
||||
|
|
@ -337,7 +337,7 @@
|
|||
"@opencode-ai/script": "workspace:*",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
"@opencode-ai/util": "workspace:*",
|
||||
"@openrouter/ai-sdk-provider": "1.5.4",
|
||||
"@openrouter/ai-sdk-provider": "2.3.3",
|
||||
"@opentui/core": "0.1.90",
|
||||
"@opentui/solid": "0.1.90",
|
||||
"@parcel/watcher": "2.5.1",
|
||||
|
|
@ -347,7 +347,7 @@
|
|||
"@standard-schema/spec": "1.0.0",
|
||||
"@zip.js/zip.js": "2.7.62",
|
||||
"ai": "catalog:",
|
||||
"ai-gateway-provider": "2.3.1",
|
||||
"ai-gateway-provider": "3.1.2",
|
||||
"bonjour-service": "1.3.0",
|
||||
"bun-pty": "0.4.8",
|
||||
"chokidar": "4.0.3",
|
||||
|
|
@ -358,7 +358,7 @@
|
|||
"drizzle-orm": "catalog:",
|
||||
"effect": "catalog:",
|
||||
"fuzzysort": "3.1.0",
|
||||
"gitlab-ai-provider": "5.3.3",
|
||||
"gitlab-ai-provider": "6.0.0",
|
||||
"glob": "13.0.5",
|
||||
"google-auth-library": "10.5.0",
|
||||
"gray-matter": "4.0.3",
|
||||
|
|
@ -599,10 +599,10 @@
|
|||
"tree-sitter-bash",
|
||||
],
|
||||
"patchedDependencies": {
|
||||
"@openrouter/ai-sdk-provider@1.5.4": "patches/@openrouter%2Fai-sdk-provider@1.5.4.patch",
|
||||
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch",
|
||||
"@ai-sdk/xai@2.0.51": "patches/@ai-sdk%2Fxai@2.0.51.patch",
|
||||
"@standard-community/standard-openapi@0.2.9": "patches/@standard-community%2Fstandard-openapi@0.2.9.patch",
|
||||
"@ai-sdk/anthropic@3.0.64": "patches/@ai-sdk%2Fanthropic@3.0.64.patch",
|
||||
"@ai-sdk/provider-utils@4.0.21": "patches/@ai-sdk%2Fprovider-utils@4.0.21.patch",
|
||||
},
|
||||
"overrides": {
|
||||
"@types/bun": "catalog:",
|
||||
|
|
@ -629,7 +629,7 @@
|
|||
"@types/node": "22.13.9",
|
||||
"@types/semver": "7.7.1",
|
||||
"@typescript/native-preview": "7.0.0-dev.20251207.1",
|
||||
"ai": "5.0.124",
|
||||
"ai": "6.0.138",
|
||||
"diff": "8.0.2",
|
||||
"dompurify": "3.3.1",
|
||||
"drizzle-kit": "1.0.0-beta.19-d95b7a4",
|
||||
|
|
@ -673,51 +673,51 @@
|
|||
|
||||
"@agentclientprotocol/sdk": ["@agentclientprotocol/sdk@0.14.1", "", { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-b6r3PS3Nly+Wyw9U+0nOr47bV8tfS476EgyEMhoKvJCZLbgqoDFN7DJwkxL88RR0aiOqOYV1ZnESHqb+RmdH8w=="],
|
||||
|
||||
"@ai-sdk/amazon-bedrock": ["@ai-sdk/amazon-bedrock@3.0.82", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.65", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-yb1EkRCMWex0tnpHPLGQxoJEiJvMGOizuxzlXFOpuGFiYgE679NsWE/F8pHwtoAWsqLlylgGAJvJDIJ8us8LEw=="],
|
||||
"@ai-sdk/amazon-bedrock": ["@ai-sdk/amazon-bedrock@4.0.83", "", { "dependencies": { "@ai-sdk/anthropic": "3.0.64", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@smithy/eventstream-codec": "^4.0.1", "@smithy/util-utf8": "^4.0.0", "aws4fetch": "^1.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-DoRpvIWGU/r83UeJAM9L93Lca8Kf/yP5fIhfEOltMPGP/PXrGe0BZaz0maLSRn8djJ6+HzWIsgu5ZI6bZqXEXg=="],
|
||||
|
||||
"@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-uyyaO4KhxoIKZztREqLPh+6/K3ZJx/rp72JKoUEL9/kC+vfQTThUfPnY/bUryUpcnawx8IY/tSoYNOi/8PCv7w=="],
|
||||
"@ai-sdk/anthropic": ["@ai-sdk/anthropic@3.0.64", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-rwLi/Rsuj2pYniQXIrvClHvXDzgM4UQHHnvHTWEF14efnlKclG/1ghpNC+adsRujAbCTr6gRsSbDE2vEqriV7g=="],
|
||||
|
||||
"@ai-sdk/azure": ["@ai-sdk/azure@2.0.91", "", { "dependencies": { "@ai-sdk/openai": "2.0.89", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w=="],
|
||||
"@ai-sdk/azure": ["@ai-sdk/azure@3.0.49", "", { "dependencies": { "@ai-sdk/openai": "3.0.48", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wskgAL+OmrHG7by/iWIxEBQCEdc1mDudha/UZav46i0auzdFfsDB/k2rXZaC4/3nWSgMZkxr0W3ncyouEGX/eg=="],
|
||||
|
||||
"@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-zoJYL33+ieyd86FSP0Whm86D79d1lKPR7wUzh1SZ1oTxwYmsGyvIrmMf2Ll0JA9Ds2Es6qik4VaFCrjwGYRTIQ=="],
|
||||
"@ai-sdk/cerebras": ["@ai-sdk/cerebras@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-kDMEpjaRdRXIUi1EH8WHwLRahyDTYv9SAJnP6VCCeq8X+tVqZbMLCqqxSG5dRknrI65ucjvzQt+FiDKTAa7AHg=="],
|
||||
|
||||
"@ai-sdk/cohere": ["@ai-sdk/cohere@2.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-yJ9kP5cEDJwo8qpITq5TQFD8YNfNtW+HbyvWwrKMbFzmiMvIZuk95HIaFXE7PCTuZsqMA05yYu+qX/vQ3rNKjA=="],
|
||||
"@ai-sdk/cohere": ["@ai-sdk/cohere@3.0.27", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-OqcCq2PiFY1dbK/0Ck45KuvE8jfdxRuuAE9Y5w46dAk6U+9vPOeg1CDcmR+ncqmrYrhRl3nmyDttyDahyjCzAw=="],
|
||||
|
||||
"@ai-sdk/deepgram": ["@ai-sdk/deepgram@1.0.24", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-E+wzGPSa/XHmajO3WtX8mtq0ewy04tsHSpU6/SGwqbiykwWba/emi7ayZ4ir89s5OzbAen2g7T9zZiEchMfkHQ=="],
|
||||
"@ai-sdk/deepgram": ["@ai-sdk/deepgram@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-htT1Y7vBN0cRu/1pGnhx6DNH3xaNr0o0MjDkmii48X2+6S/WkOzVNtMjn7V3vLWEQIWNio5vw1hG/F43K8WLHA=="],
|
||||
|
||||
"@ai-sdk/deepinfra": ["@ai-sdk/deepinfra@1.0.36", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.33", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-LndvRktEgY2IFu4peDJMEXcjhHEEFtM0upLx/J64kCpFHCifalXpK4PPSX3PVndnn0bJzvamO5+fc0z2ooqBZw=="],
|
||||
"@ai-sdk/deepinfra": ["@ai-sdk/deepinfra@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-y6RoOP7DGWmDSiSxrUSt5p18sbz+Ixe5lMVPmdE7x+Tr5rlrzvftyHhjWHfqlAtoYERZTGFbP6tPW1OfQcrb4A=="],
|
||||
|
||||
"@ai-sdk/deepseek": ["@ai-sdk/deepseek@1.0.35", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Qvh2yxL5zJS9RO/Bf12pyYBIDmn+9GR1hT6e28IYWQWnt2Xq0h9XGps6XagLAv3VYYFg8c/ozkWVd4kXLZ25HA=="],
|
||||
"@ai-sdk/deepseek": ["@ai-sdk/deepseek@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4vOEekW4TAYVHN0qgiwoUOQZhguGwZBiEw8LDeUmpWBm07QkLRAtxYCaSoMiA4hZZojao5mj6NRGEBW1CnDPtg=="],
|
||||
|
||||
"@ai-sdk/elevenlabs": ["@ai-sdk/elevenlabs@1.0.24", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ee2At5jgV+SqC6nrtPq20iH7N/aN+O36LrA4gkzVM4cmhM7bvQKVkOXhC1XxG+wsYG6UZi3Nekoi8MEjNWuRrw=="],
|
||||
"@ai-sdk/elevenlabs": ["@ai-sdk/elevenlabs@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-K+1YprVMO8R6vTcNhqTqUWhOzX5V/hEY0pFx9KQL0/+MJjOgRi6DcOLoNBd7ONcjxYTyiFLRfk/0a/pHTtSgFA=="],
|
||||
|
||||
"@ai-sdk/fireworks": ["@ai-sdk/fireworks@1.0.35", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.34", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-inUq29XvSVDer6JIeOkwAmCFxOtHPU0OZEhwaWoe3PI59naHIW4RIFA9wppLLV5fJI9WQcAfDKy0ZHW9nV3UJw=="],
|
||||
"@ai-sdk/fireworks": ["@ai-sdk/fireworks@2.0.40", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.35", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ARjygiBQtVSgNBp3Sag+Bkwn68ub+cZPC05UpRGG+VY8/Q896K2yU1j4I0+S1eU0BQW/9DKbRG04d9Ayi2DUmA=="],
|
||||
|
||||
"@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.30", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-5Nrkj8B4MzkkOfjjA+Cs5pamkbkK4lI11bx80QV7TFcen/hWA8wEC+UVzwuM5H2zpekoNMjvl6GonHnR62XIZw=="],
|
||||
"@ai-sdk/gateway": ["@ai-sdk/gateway@3.0.80", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-uM7kpZB5l977lW7+2X1+klBUxIZQ78+1a9jHlaHFEzcOcmmslTl3sdP0QqfuuBcO0YBM2gwOiqVdp8i4TRQYcw=="],
|
||||
|
||||
"@ai-sdk/google": ["@ai-sdk/google@2.0.54", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-VKguP0x/PUYpdQyuA/uy5pDGJy6reL0X/yDKxHfL207aCUXpFIBmyMhVs4US39dkEVhtmIFSwXauY0Pt170JRw=="],
|
||||
"@ai-sdk/google": ["@ai-sdk/google@3.0.53", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-uz8tIlkDgQJG9Js2Wh9JHzd4kI9+hYJqf9XXJLx60vyN5mRIqhr49iwR5zGP5Gl8odp2PeR3Gh2k+5bh3Z1HHw=="],
|
||||
|
||||
"@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.106", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.65", "@ai-sdk/google": "2.0.54", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-f9sA66bmhgJoTwa+pHWFSdYxPa0lgdQ/MgYNxZptzVyGptoziTf1a9EIXEL3jiCD0qIBAg+IhDAaYalbvZaDqQ=="],
|
||||
"@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@4.0.95", "", { "dependencies": { "@ai-sdk/anthropic": "3.0.64", "@ai-sdk/google": "3.0.53", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-xL44fHlTtDM7RLkMTgyqMfkfthA38JS91bbMaHItObIhte1PAIY936ZV1PLl/Z9A/oBAXjHWbXo5xDoHzB7LEg=="],
|
||||
|
||||
"@ai-sdk/groq": ["@ai-sdk/groq@2.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-wfCYkVgmVjxNA32T57KbLabVnv9aFUflJ4urJ7eWgTwbnmGQHElCTu+rJ3ydxkXSqxOkXPwMOttDm7XNrvPjmg=="],
|
||||
"@ai-sdk/groq": ["@ai-sdk/groq@3.0.31", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-XbbugpnFmXGu2TlXiq8KUJskP6/VVbuFcnFIGDzDIB/Chg6XHsNnqrTF80Zxkh0Pd3+NvbM+2Uqrtsndk6bDAg=="],
|
||||
|
||||
"@ai-sdk/mistral": ["@ai-sdk/mistral@2.0.27", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-gaptHgaXjMw3+eA0Q4FABcsj5nQNP6EpFaGUR+Pj5WJy7Kn6mApl975/x57224MfeJIShNpt8wFKK3tvh5ewKg=="],
|
||||
"@ai-sdk/mistral": ["@ai-sdk/mistral@3.0.27", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ZXe7nZQgliDdjz5ufH5RKpHWxbN72AzmzzKGbF/z+0K9GN5tUCnftrQRvTRFHA5jAzTapcm2BEevmGLVbMkW+A=="],
|
||||
|
||||
"@ai-sdk/openai": ["@ai-sdk/openai@2.0.2", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-D4zYz2uR90aooKQvX1XnS00Z7PkbrcY+snUvPfm5bCabTG7bzLrVtD56nJ5bSaZG8lmuOMfXpyiEEArYLyWPpw=="],
|
||||
"@ai-sdk/openai": ["@ai-sdk/openai@3.0.48", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ALmj/53EXpcRqMbGpPJPP4UOSWw0q4VGpnDo7YctvsynjkrKDmoneDG/1a7VQnSPYHnJp6tTRMf5ZdxZ5whulg=="],
|
||||
|
||||
"@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.1", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-luHVcU+yKzwv3ekKgbP3v+elUVxb2Rt+8c6w9qi7g2NYG2/pEL21oIrnaEnc6UtTZLLZX9EFBcpq2N1FQKDIMw=="],
|
||||
"@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.37", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-+POSFVcgiu47BK64dhsI6OpcDC0/VAE2ZSaXdXGNNhpC/ava++uSRJYks0k2bpfY0wwCTgpAWZsXn/dG2Yppiw=="],
|
||||
|
||||
"@ai-sdk/perplexity": ["@ai-sdk/perplexity@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-aiaRvnc6mhQZKhTTSXPCjPH8Iqr5D/PfCN1hgVP/3RGTBbJtsd9HemIBSABeSdAKbsMH/PwJxgnqH75HEamcBA=="],
|
||||
"@ai-sdk/perplexity": ["@ai-sdk/perplexity@3.0.26", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-dXzrVsLR5f6tr+U04jq4AXoRroGFBTvODnLgss0SWbzNjGGQg3XqtQ9j7rCLo6o8qbYGuAHvqUrIpUCuiscuFg=="],
|
||||
|
||||
"@ai-sdk/provider": ["@ai-sdk/provider@2.0.1", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng=="],
|
||||
"@ai-sdk/provider": ["@ai-sdk/provider@3.0.8", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ=="],
|
||||
|
||||
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-veuMwTLxsgh31Jjn0SnBABnM1f7ebHhRWcV2ZuY3hP3iJDCZ8VXBaYqcHXoOQDqUXTCas08sKQcHyWK+zl882Q=="],
|
||||
"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.21", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-MtFUYI1/8mgDvRmaBDjbLJPFFrMG777AvSgyIFQtZHIMzm88R/12vYBBpnk7pfiWLFE1DSZzY4WDYzGbKAcmiw=="],
|
||||
|
||||
"@ai-sdk/togetherai": ["@ai-sdk/togetherai@1.0.34", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-jjJmJms6kdEc4nC3MDGFJfhV8F1ifY4nolV2dbnT7BM4ab+Wkskc0GwCsJ7G7WdRMk7xDbFh4he3DPL8KJ/cyA=="],
|
||||
"@ai-sdk/togetherai": ["@ai-sdk/togetherai@2.0.41", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-k3p9e3k0/gpDDyTtvafsK4HYR4D/aUQW/kzCwWo1+CzdBU84i4L14gWISC/mv6tgSicMXHcEUd521fPufQwNlg=="],
|
||||
|
||||
"@ai-sdk/vercel": ["@ai-sdk/vercel@1.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.32", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Qwjm+HdwKasu7L9bDUryBMGKDMscIEzMUkjw/33uGdJpktzyNW13YaNIObOZ2HkskqDMIQJSd4Ao2BBT8fEYLw=="],
|
||||
"@ai-sdk/vercel": ["@ai-sdk/vercel@2.0.39", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8eu3ljJpkCTP4ppcyYB+NcBrkcBoSOFthCSgk5VnjaxnDaOJFaxnPwfddM7wx3RwMk2CiK1O61Px/LlqNc7QkQ=="],
|
||||
|
||||
"@ai-sdk/xai": ["@ai-sdk/xai@2.0.51", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.30", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AI3le03qiegkZvn9hpnpDwez49lOvQLj4QUBT8H41SMbrdTYOxn3ktTwrsSu90cNDdzKGMvoH0u2GHju1EdnCg=="],
|
||||
"@ai-sdk/xai": ["@ai-sdk/xai@3.0.74", "", { "dependencies": { "@ai-sdk/openai-compatible": "2.0.37", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HDDLsT+QrzE3c2QZLRV/HKAwMtXDb0PMDdk1PYUXLJ3r9Qv76zGKGyvJLX7Pu6c8TOHD1mwLrOVYrsTpC/eTMw=="],
|
||||
|
||||
"@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="],
|
||||
|
||||
|
|
@ -1455,9 +1455,7 @@
|
|||
|
||||
"@opencode-ai/web": ["@opencode-ai/web@workspace:packages/web"],
|
||||
|
||||
"@openrouter/ai-sdk-provider": ["@openrouter/ai-sdk-provider@1.5.4", "", { "dependencies": { "@openrouter/sdk": "^0.1.27" }, "peerDependencies": { "ai": "^5.0.0", "zod": "^3.24.1 || ^v4" } }, "sha512-xrSQPUIH8n9zuyYZR0XK7Ba0h2KsjJcMkxnwaYfmv13pKs3sDkjPzVPPhlhzqBGddHb5cFEwJ9VFuFeDcxCDSw=="],
|
||||
|
||||
"@openrouter/sdk": ["@openrouter/sdk@0.1.27", "", { "dependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-RH//L10bSmc81q25zAZudiI4kNkLgxF2E+WU42vghp3N6TEvZ6F0jK7uT3tOxkEn91gzmMw9YVmDENy7SJsajQ=="],
|
||||
"@openrouter/ai-sdk-provider": ["@openrouter/ai-sdk-provider@2.3.3", "", { "peerDependencies": { "ai": "^6.0.0", "zod": "^3.25.0 || ^4.0.0" } }, "sha512-4fVteGkVedc7fGoA9+qJs4tpYwALezMq14m2Sjub3KmyRlksCbK+WJf67NPdGem8+NZrV2tAN42A1NU3+SiV3w=="],
|
||||
|
||||
"@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="],
|
||||
|
||||
|
|
@ -2271,9 +2269,9 @@
|
|||
|
||||
"agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="],
|
||||
|
||||
"ai": ["ai@5.0.124", "", { "dependencies": { "@ai-sdk/gateway": "2.0.30", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Li6Jw9F9qsvFJXZPBfxj38ddP2iURCnMs96f9Q3OeQzrDVcl1hvtwSEAuxA/qmfh6SDV2ERqFUOFzigvr0697g=="],
|
||||
"ai": ["ai@6.0.138", "", { "dependencies": { "@ai-sdk/gateway": "3.0.80", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.21", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-49OfPe0f5uxJ6jUdA5BBXjIinP6+ZdYfAtpF2aEH64GA5wPcxH2rf/TBUQQ0bbamBz/D+TLMV18xilZqOC+zaA=="],
|
||||
|
||||
"ai-gateway-provider": ["ai-gateway-provider@2.3.1", "", { "dependencies": { "@ai-sdk/provider": "^2.0.0", "@ai-sdk/provider-utils": "^3.0.19", "ai": "^5.0.116" }, "optionalDependencies": { "@ai-sdk/amazon-bedrock": "^3.0.71", "@ai-sdk/anthropic": "^2.0.56", "@ai-sdk/azure": "^2.0.90", "@ai-sdk/cerebras": "^1.0.33", "@ai-sdk/cohere": "^2.0.21", "@ai-sdk/deepgram": "^1.0.21", "@ai-sdk/deepseek": "^1.0.32", "@ai-sdk/elevenlabs": "^1.0.21", "@ai-sdk/fireworks": "^1.0.30", "@ai-sdk/google": "^2.0.51", "@ai-sdk/google-vertex": "3.0.90", "@ai-sdk/groq": "^2.0.33", "@ai-sdk/mistral": "^2.0.26", "@ai-sdk/openai": "^2.0.88", "@ai-sdk/perplexity": "^2.0.22", "@ai-sdk/xai": "^2.0.42", "@openrouter/ai-sdk-provider": "^1.5.3" }, "peerDependencies": { "@ai-sdk/openai-compatible": "^1.0.29" } }, "sha512-PqI6TVNEDNwr7kOhy7XUGnA8XJB1SpeA9aLqGjr0CyWkKgH+y+ofPm8MZGZ74DOwVejDF+POZq0Qs9jKEKUeYg=="],
|
||||
"ai-gateway-provider": ["ai-gateway-provider@3.1.2", "", { "optionalDependencies": { "@ai-sdk/amazon-bedrock": "^4.0.62", "@ai-sdk/anthropic": "^3.0.46", "@ai-sdk/azure": "^3.0.31", "@ai-sdk/cerebras": "^2.0.34", "@ai-sdk/cohere": "^3.0.21", "@ai-sdk/deepgram": "^2.0.20", "@ai-sdk/deepseek": "^2.0.20", "@ai-sdk/elevenlabs": "^2.0.20", "@ai-sdk/fireworks": "^2.0.34", "@ai-sdk/google": "^3.0.30", "@ai-sdk/google-vertex": "^4.0.61", "@ai-sdk/groq": "^3.0.24", "@ai-sdk/mistral": "^3.0.20", "@ai-sdk/openai": "^3.0.30", "@ai-sdk/perplexity": "^3.0.19", "@ai-sdk/xai": "^3.0.57", "@openrouter/ai-sdk-provider": "^2.2.3" }, "peerDependencies": { "@ai-sdk/openai-compatible": "^2.0.0", "@ai-sdk/provider": "^3.0.0", "@ai-sdk/provider-utils": "^4.0.0", "ai": "^6.0.0" } }, "sha512-krGNnJSoO/gJ7Hbe5nQDlsBpDUGIBGtMQTRUaW7s1MylsfvLduba0TLWzQaGtOmNRkP0pGhtGlwsnS6FNQMlyw=="],
|
||||
|
||||
"ajv": ["ajv@8.18.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A=="],
|
||||
|
||||
|
|
@ -3049,7 +3047,7 @@
|
|||
|
||||
"github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="],
|
||||
|
||||
"gitlab-ai-provider": ["gitlab-ai-provider@5.3.3", "", { "dependencies": { "@anthropic-ai/sdk": "^0.71.0", "@anycable/core": "^0.9.2", "graphql-request": "^6.1.0", "isomorphic-ws": "^5.0.0", "openai": "^6.16.0", "socket.io-client": "^4.8.1", "vscode-jsonrpc": "^8.2.1", "zod": "^3.25.76" }, "peerDependencies": { "@ai-sdk/provider": ">=2.0.0", "@ai-sdk/provider-utils": ">=3.0.0" } }, "sha512-k0kRUoAhDvoRC28hQW4sPp+A3cfpT5c/oL9Ng10S0oBiF2Tci1AtsX1iclJM5Os8C1nIIAXBW8LMr0GY7rwcGA=="],
|
||||
"gitlab-ai-provider": ["gitlab-ai-provider@6.0.0", "", { "dependencies": { "@anthropic-ai/sdk": "^0.71.0", "@anycable/core": "^0.9.2", "graphql-request": "^6.1.0", "isomorphic-ws": "^5.0.0", "openai": "^6.16.0", "socket.io-client": "^4.8.1", "vscode-jsonrpc": "^8.2.1", "zod": "^3.25.76" }, "peerDependencies": { "@ai-sdk/provider": ">=3.0.0", "@ai-sdk/provider-utils": ">=4.0.0" } }, "sha512-683GcJdrer/GhnljkbVcGsndCEhvGB8f9fUdCxQBlkuyt8rzf0G9DpSh+iMBYp9HpcSvYmYG0Qv5ks9dLrNxwQ=="],
|
||||
|
||||
"glob": ["glob@13.0.5", "", { "dependencies": { "minimatch": "^10.2.1", "minipass": "^7.1.2", "path-scurry": "^2.0.0" } }, "sha512-BzXxZg24Ibra1pbQ/zE7Kys4Ua1ks7Bn6pKLkVPZ9FZe4JQS6/Q7ef3LG1H+k7lUf5l4T3PLSyYyYJVYUvfgTw=="],
|
||||
|
||||
|
|
@ -4799,63 +4797,21 @@
|
|||
|
||||
"@actions/http-client/undici": ["undici@6.23.0", "", {}, "sha512-VfQPToRA5FZs/qJxLIinmU59u0r7LXqoJkCzinq3ckNJp3vKEh7jTWN589YQ5+aoAC/TGRLyJLCPKcLQbM8r9g=="],
|
||||
|
||||
"@ai-sdk/amazon-bedrock/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
|
||||
"@ai-sdk/amazon-bedrock/@smithy/eventstream-codec": ["@smithy/eventstream-codec@4.2.11", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@smithy/types": "^4.13.0", "@smithy/util-hex-encoding": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-Sf39Ml0iVX+ba/bgMPxaXWAAFmHqYLTmbjAPfLPLY8CrYkRDEqZdUsKC1OwVMCdJXfAt0v4j49GIJ8DoSYAe6w=="],
|
||||
|
||||
"@ai-sdk/anthropic/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
|
||||
"@ai-sdk/amazon-bedrock/@smithy/util-utf8": ["@smithy/util-utf8@4.2.2", "", { "dependencies": { "@smithy/util-buffer-from": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw=="],
|
||||
|
||||
"@ai-sdk/anthropic/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
"@ai-sdk/deepgram/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
|
||||
|
||||
"@ai-sdk/azure/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
|
||||
"@ai-sdk/deepseek/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
|
||||
|
||||
"@ai-sdk/azure/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
"@ai-sdk/elevenlabs/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
|
||||
|
||||
"@ai-sdk/cerebras/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
|
||||
"@ai-sdk/fireworks/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@2.0.35", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-g3wA57IAQFb+3j4YuFndgkUdXyRETZVvbfAWM+UX7bZSxA3xjes0v3XKgIdKdekPtDGsh4ZX2byHD0gJIMPfiA=="],
|
||||
|
||||
"@ai-sdk/cerebras/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
"@ai-sdk/fireworks/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.19", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg=="],
|
||||
|
||||
"@ai-sdk/cohere/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/deepgram/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
|
||||
|
||||
"@ai-sdk/deepinfra/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.33", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-2KMcR2xAul3u5dGZD7gONgbIki3Hg7Ey+sFu7gsiJ4U2iRU0GDV3ccNq79dTuAEXPDFcOWCUpW8A8jXc0kxJxQ=="],
|
||||
|
||||
"@ai-sdk/deepseek/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
|
||||
|
||||
"@ai-sdk/elevenlabs/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
|
||||
|
||||
"@ai-sdk/fireworks/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AnGoxVNZ/E3EU4lW12rrufI6riqL2cEv4jk3OrjJ/i54XwR0CJU1V26jXAwxb+Pc+uZmYG++HM+gzXxPQZkMNQ=="],
|
||||
|
||||
"@ai-sdk/fireworks/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
|
||||
|
||||
"@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
|
||||
|
||||
"@ai-sdk/groq/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/mistral/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/openai/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
|
||||
|
||||
"@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
|
||||
"@ai-sdk/openai-compatible/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
|
||||
|
||||
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.0", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.3", "zod-to-json-schema": "^3.24.1" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-BoQZtGcBxkeSH1zK+SRYNDtJPIPpacTeiMZqnG4Rv6xXjEwM0FH4MGs9c+PlhyEWmQCzjRM2HAotEydFhD4dYw=="],
|
||||
|
||||
"@ai-sdk/perplexity/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/togetherai/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
|
||||
|
||||
"@ai-sdk/togetherai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/vercel/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
|
||||
|
||||
"@ai-sdk/vercel/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"@ai-sdk/xai/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.30", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-thubwhRtv9uicAxSWwNpinM7hiL/0CkhL/ymPaHuKvI494J7HIzn8KQZQ2ymRz284WTIZnI7VMyyejxW4RMM6w=="],
|
||||
|
||||
"@ai-sdk/xai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
"@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@astrojs/check/yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||
|
||||
|
|
@ -5329,16 +5285,6 @@
|
|||
|
||||
"accepts/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="],
|
||||
|
||||
"ai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.90", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.56", "@ai-sdk/google": "2.0.46", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-C9MLe1KZGg1ZbupV2osygHtL5qngyCDA6ATatunyfTbIe8TXKG8HGni/3O6ifbnI5qxTidIn150Ox7eIFZVMYg=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.34", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.22" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-AnGoxVNZ/E3EU4lW12rrufI6riqL2cEv4jk3OrjJ/i54XwR0CJU1V26jXAwxb+Pc+uZmYG++HM+gzXxPQZkMNQ=="],
|
||||
|
||||
"ajv-keywords/ajv": ["ajv@6.14.0", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw=="],
|
||||
|
||||
"ansi-align/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
|
@ -5557,12 +5503,6 @@
|
|||
|
||||
"nypm/tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="],
|
||||
|
||||
"opencode/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.65", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.21" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-HqTPP59mLQ9U6jXQcx6EORkdc5FyZu34Sitkg6jNpyMYcRjStvfx4+NWq/qaR+OTwBFcccv8hvVii0CYkH2Lag=="],
|
||||
|
||||
"opencode/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
|
||||
|
||||
"opencode/@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.32", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-YspqqyJPzHjqWrjt4y/Wgc2aJgCcQj5uIJgZpq2Ar/lH30cEVhgE+keePDbjKpetD9UwNggCj7u6kO3unS23OQ=="],
|
||||
|
||||
"opencode-gitlab-auth/open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="],
|
||||
|
||||
"opencode-poe-auth/open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="],
|
||||
|
|
@ -5739,16 +5679,6 @@
|
|||
|
||||
"@actions/github/@octokit/plugin-rest-endpoint-methods/@octokit/types": ["@octokit/types@12.6.0", "", { "dependencies": { "@octokit/openapi-types": "^20.0.0" } }, "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw=="],
|
||||
|
||||
"@ai-sdk/anthropic/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/anthropic/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
|
||||
|
||||
"@ai-sdk/azure/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/cerebras/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/cohere/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/deepgram/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/deepseek/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
|
@ -5757,28 +5687,6 @@
|
|||
|
||||
"@ai-sdk/fireworks/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/gateway/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/groq/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/mistral/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/openai-compatible/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
|
||||
|
||||
"@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/openai/@ai-sdk/provider-utils/zod-to-json-schema": ["zod-to-json-schema@3.25.1", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA=="],
|
||||
|
||||
"@ai-sdk/perplexity/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/togetherai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/vercel/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@ai-sdk/xai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"@astrojs/check/yargs/cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||
|
||||
"@astrojs/check/yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||
|
|
@ -6211,20 +6119,6 @@
|
|||
|
||||
"accepts/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.56", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-XHJKu0Yvfu9SPzRfsAFESa+9T7f2YJY6TxykKMfRsAwpeWAiX/Gbx5J5uM15AzYC3Rw8tVP3oH+j7jEivENirQ=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/google": ["@ai-sdk/google@2.0.46", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8PK6u4sGE/kXebd7ZkTp+0aya4kNqzoqpS5m7cHY2NfTK6fhPc6GNvE+MZIZIoHQTp5ed86wGBdeBPpFaaUtyg=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.19", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-W41Wc9/jbUVXVwCN/7bWa4IKe8MtxO3EyA0Hfhx6grnmiYlCvpI8neSYWFE0zScXJkgA/YK3BRybzgyiXuu6JA=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.22", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-fFT1KfUUKktfAFm5mClJhS1oux9tP2qgzmEZVl5UdwltQ1LO/s8hd7znVrgKzivwv1s1FIPza0s9OpJaNB/vHw=="],
|
||||
|
||||
"ai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"ajv-keywords/ajv/json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
|
||||
|
||||
"ansi-align/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||
|
|
@ -6321,10 +6215,6 @@
|
|||
|
||||
"opencode-poe-auth/open/wsl-utils": ["wsl-utils@0.1.0", "", { "dependencies": { "is-wsl": "^3.1.0" } }, "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw=="],
|
||||
|
||||
"opencode/@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
|
||||
|
||||
"opencontrol/@modelcontextprotocol/sdk/express": ["express@5.2.1", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw=="],
|
||||
|
||||
"opencontrol/@modelcontextprotocol/sdk/express-rate-limit": ["express-rate-limit@7.5.1", "", { "peerDependencies": { "express": ">= 4.11" } }, "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw=="],
|
||||
|
|
@ -6581,12 +6471,6 @@
|
|||
|
||||
"@solidjs/start/shiki/@shikijs/engine-javascript/oniguruma-to-es": ["oniguruma-to-es@2.3.0", "", { "dependencies": { "emoji-regex-xs": "^1.0.0", "regex": "^5.1.1", "regex-recursion": "^5.1.1" } }, "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/google-vertex/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"ai-gateway-provider/@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"ansi-align/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||
|
||||
"app-builder-lib/@electron/get/fs-extra/universalify": ["universalify@0.1.2", "", {}, "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="],
|
||||
|
|
@ -6639,10 +6523,6 @@
|
|||
|
||||
"js-beautify/glob/path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
|
||||
|
||||
"opencode/@ai-sdk/openai-compatible/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"opencode/@ai-sdk/openai/@ai-sdk/provider-utils/@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
|
||||
|
||||
"opencontrol/@modelcontextprotocol/sdk/express/accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
|
||||
|
||||
"opencontrol/@modelcontextprotocol/sdk/express/body-parser": ["body-parser@2.2.2", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.3", "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", "qs": "^6.14.1", "raw-body": "^3.0.1", "type-is": "^2.0.1" } }, "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA=="],
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@
|
|||
"drizzle-kit": "1.0.0-beta.19-d95b7a4",
|
||||
"drizzle-orm": "1.0.0-beta.19-d95b7a4",
|
||||
"effect": "4.0.0-beta.37",
|
||||
"ai": "5.0.124",
|
||||
"ai": "6.0.138",
|
||||
"hono": "4.10.7",
|
||||
"hono-openapi": "1.1.2",
|
||||
"fuzzysort": "3.1.0",
|
||||
|
|
@ -113,8 +113,8 @@
|
|||
},
|
||||
"patchedDependencies": {
|
||||
"@standard-community/standard-openapi@0.2.9": "patches/@standard-community%2Fstandard-openapi@0.2.9.patch",
|
||||
"@openrouter/ai-sdk-provider@1.5.4": "patches/@openrouter%2Fai-sdk-provider@1.5.4.patch",
|
||||
"@ai-sdk/xai@2.0.51": "patches/@ai-sdk%2Fxai@2.0.51.patch",
|
||||
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch"
|
||||
"solid-js@1.9.10": "patches/solid-js@1.9.10.patch",
|
||||
"@ai-sdk/provider-utils@4.0.21": "patches/@ai-sdk%2Fprovider-utils@4.0.21.patch",
|
||||
"@ai-sdk/anthropic@3.0.64": "patches/@ai-sdk%2Fanthropic@3.0.64.patch"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@
|
|||
"@typescript/native-preview": "catalog:"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "2.0.0",
|
||||
"@ai-sdk/openai": "2.0.2",
|
||||
"@ai-sdk/openai-compatible": "1.0.1",
|
||||
"@ai-sdk/anthropic": "3.0.64",
|
||||
"@ai-sdk/openai": "3.0.48",
|
||||
"@ai-sdk/openai-compatible": "2.0.37",
|
||||
"@hono/zod-validator": "catalog:",
|
||||
"@opencode-ai/console-core": "workspace:*",
|
||||
"@opencode-ai/console-resource": "workspace:*",
|
||||
|
|
|
|||
|
|
@ -68,25 +68,25 @@
|
|||
"@actions/core": "1.11.1",
|
||||
"@actions/github": "6.0.1",
|
||||
"@agentclientprotocol/sdk": "0.14.1",
|
||||
"@ai-sdk/amazon-bedrock": "3.0.82",
|
||||
"@ai-sdk/anthropic": "2.0.65",
|
||||
"@ai-sdk/azure": "2.0.91",
|
||||
"@ai-sdk/cerebras": "1.0.36",
|
||||
"@ai-sdk/cohere": "2.0.22",
|
||||
"@ai-sdk/deepinfra": "1.0.36",
|
||||
"@ai-sdk/gateway": "2.0.30",
|
||||
"@ai-sdk/google": "2.0.54",
|
||||
"@ai-sdk/google-vertex": "3.0.106",
|
||||
"@ai-sdk/groq": "2.0.34",
|
||||
"@ai-sdk/mistral": "2.0.27",
|
||||
"@ai-sdk/openai": "2.0.89",
|
||||
"@ai-sdk/openai-compatible": "1.0.32",
|
||||
"@ai-sdk/perplexity": "2.0.23",
|
||||
"@ai-sdk/provider": "2.0.1",
|
||||
"@ai-sdk/provider-utils": "3.0.21",
|
||||
"@ai-sdk/togetherai": "1.0.34",
|
||||
"@ai-sdk/vercel": "1.0.33",
|
||||
"@ai-sdk/xai": "2.0.51",
|
||||
"@ai-sdk/amazon-bedrock": "4.0.83",
|
||||
"@ai-sdk/anthropic": "3.0.64",
|
||||
"@ai-sdk/azure": "3.0.49",
|
||||
"@ai-sdk/cerebras": "2.0.41",
|
||||
"@ai-sdk/cohere": "3.0.27",
|
||||
"@ai-sdk/deepinfra": "2.0.41",
|
||||
"@ai-sdk/gateway": "3.0.80",
|
||||
"@ai-sdk/google": "3.0.53",
|
||||
"@ai-sdk/google-vertex": "4.0.95",
|
||||
"@ai-sdk/groq": "3.0.31",
|
||||
"@ai-sdk/mistral": "3.0.27",
|
||||
"@ai-sdk/openai": "3.0.48",
|
||||
"@ai-sdk/openai-compatible": "2.0.37",
|
||||
"@ai-sdk/perplexity": "3.0.26",
|
||||
"@ai-sdk/provider": "3.0.8",
|
||||
"@ai-sdk/provider-utils": "4.0.21",
|
||||
"@ai-sdk/togetherai": "2.0.41",
|
||||
"@ai-sdk/vercel": "2.0.39",
|
||||
"@ai-sdk/xai": "3.0.74",
|
||||
"@aws-sdk/credential-providers": "3.993.0",
|
||||
"@clack/prompts": "1.0.0-alpha.1",
|
||||
"@effect/platform-node": "catalog:",
|
||||
|
|
@ -100,7 +100,7 @@
|
|||
"@opencode-ai/script": "workspace:*",
|
||||
"@opencode-ai/sdk": "workspace:*",
|
||||
"@opencode-ai/util": "workspace:*",
|
||||
"@openrouter/ai-sdk-provider": "1.5.4",
|
||||
"@openrouter/ai-sdk-provider": "2.3.3",
|
||||
"@opentui/core": "0.1.90",
|
||||
"@opentui/solid": "0.1.90",
|
||||
"@parcel/watcher": "2.5.1",
|
||||
|
|
@ -110,7 +110,7 @@
|
|||
"@standard-schema/spec": "1.0.0",
|
||||
"@zip.js/zip.js": "2.7.62",
|
||||
"ai": "catalog:",
|
||||
"ai-gateway-provider": "2.3.1",
|
||||
"ai-gateway-provider": "3.1.2",
|
||||
"bonjour-service": "1.3.0",
|
||||
"bun-pty": "0.4.8",
|
||||
"chokidar": "4.0.3",
|
||||
|
|
@ -121,7 +121,7 @@
|
|||
"drizzle-orm": "catalog:",
|
||||
"effect": "catalog:",
|
||||
"fuzzysort": "3.1.0",
|
||||
"gitlab-ai-provider": "5.3.3",
|
||||
"gitlab-ai-provider": "6.0.0",
|
||||
"glob": "13.0.5",
|
||||
"google-auth-library": "10.5.0",
|
||||
"gray-matter": "4.0.3",
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import { BunProc } from "../bun"
|
|||
import { Hash } from "../util/hash"
|
||||
import { Plugin } from "../plugin"
|
||||
import { NamedError } from "@opencode-ai/util/error"
|
||||
import { type LanguageModelV3 } from "@ai-sdk/provider"
|
||||
import { ModelsDev } from "./models"
|
||||
import { Auth } from "../auth"
|
||||
import { Env } from "../env"
|
||||
|
|
@ -28,7 +29,7 @@ import { createVertex } from "@ai-sdk/google-vertex"
|
|||
import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
|
||||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
|
||||
import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
|
||||
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
|
||||
import { createXai } from "@ai-sdk/xai"
|
||||
import { createMistral } from "@ai-sdk/mistral"
|
||||
|
|
@ -109,7 +110,11 @@ export namespace Provider {
|
|||
})
|
||||
}
|
||||
|
||||
const BUNDLED_PROVIDERS: Record<string, (options: any) => SDK> = {
|
||||
type BundledSDK = {
|
||||
languageModel(modelId: string): LanguageModelV3
|
||||
}
|
||||
|
||||
const BUNDLED_PROVIDERS: Record<string, (options: any) => BundledSDK> = {
|
||||
"@ai-sdk/amazon-bedrock": createAmazonBedrock,
|
||||
"@ai-sdk/anthropic": createAnthropic,
|
||||
"@ai-sdk/azure": createAzure,
|
||||
|
|
@ -130,7 +135,6 @@ export namespace Provider {
|
|||
"@ai-sdk/perplexity": createPerplexity,
|
||||
"@ai-sdk/vercel": createVercel,
|
||||
"gitlab-ai-provider": createGitLab,
|
||||
// @ts-ignore (TODO: kill this code so we dont have to maintain it)
|
||||
"@ai-sdk/github-copilot": createGitHubCopilotOpenAICompatible,
|
||||
}
|
||||
|
||||
|
|
@ -591,7 +595,12 @@ export namespace Provider {
|
|||
|
||||
if (!result.models.length) {
|
||||
log.info("gitlab model discovery skipped: no models found", {
|
||||
project: result.project ? { id: result.project.id, path: result.project.pathWithNamespace } : null,
|
||||
project: result.project
|
||||
? {
|
||||
id: result.project.id,
|
||||
path: result.project.pathWithNamespace,
|
||||
}
|
||||
: null,
|
||||
})
|
||||
return {}
|
||||
}
|
||||
|
|
@ -619,8 +628,20 @@ export namespace Provider {
|
|||
reasoning: true,
|
||||
attachment: true,
|
||||
toolcall: true,
|
||||
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||
input: {
|
||||
text: true,
|
||||
audio: false,
|
||||
image: true,
|
||||
video: false,
|
||||
pdf: true,
|
||||
},
|
||||
output: {
|
||||
text: true,
|
||||
audio: false,
|
||||
image: false,
|
||||
video: false,
|
||||
pdf: false,
|
||||
},
|
||||
interleaved: false,
|
||||
},
|
||||
release_date: "",
|
||||
|
|
@ -930,17 +951,17 @@ export namespace Provider {
|
|||
}
|
||||
|
||||
const providers: Record<ProviderID, Info> = {} as Record<ProviderID, Info>
|
||||
const languages = new Map<string, LanguageModelV2>()
|
||||
const languages = new Map<string, LanguageModelV3>()
|
||||
const modelLoaders: {
|
||||
[providerID: string]: CustomModelLoader
|
||||
} = {}
|
||||
const varsLoaders: {
|
||||
[providerID: string]: CustomVarsLoader
|
||||
} = {}
|
||||
const sdk = new Map<string, BundledSDK>()
|
||||
const discoveryLoaders: {
|
||||
[providerID: string]: CustomDiscoverModels
|
||||
} = {}
|
||||
const sdk = new Map<string, SDK>()
|
||||
|
||||
log.info("init")
|
||||
|
||||
|
|
@ -1232,7 +1253,13 @@ export namespace Provider {
|
|||
...model.headers,
|
||||
}
|
||||
|
||||
const key = Hash.fast(JSON.stringify({ providerID: model.providerID, npm: model.api.npm, options }))
|
||||
const key = Hash.fast(
|
||||
JSON.stringify({
|
||||
providerID: model.providerID,
|
||||
npm: model.api.npm,
|
||||
options,
|
||||
}),
|
||||
)
|
||||
const existing = s.sdk.get(key)
|
||||
if (existing) return existing
|
||||
|
||||
|
|
@ -1285,7 +1312,10 @@ export namespace Provider {
|
|||
|
||||
const bundledFn = BUNDLED_PROVIDERS[model.api.npm]
|
||||
if (bundledFn) {
|
||||
log.info("using bundled provider", { providerID: model.providerID, pkg: model.api.npm })
|
||||
log.info("using bundled provider", {
|
||||
providerID: model.providerID,
|
||||
pkg: model.api.npm,
|
||||
})
|
||||
const loaded = bundledFn({
|
||||
name: model.providerID,
|
||||
...options,
|
||||
|
|
@ -1325,7 +1355,10 @@ export namespace Provider {
|
|||
const provider = s.providers[providerID]
|
||||
if (!provider) {
|
||||
const availableProviders = Object.keys(s.providers)
|
||||
const matches = fuzzysort.go(providerID, availableProviders, { limit: 3, threshold: -10000 })
|
||||
const matches = fuzzysort.go(providerID, availableProviders, {
|
||||
limit: 3,
|
||||
threshold: -10000,
|
||||
})
|
||||
const suggestions = matches.map((m) => m.target)
|
||||
throw new ModelNotFoundError({ providerID, modelID, suggestions })
|
||||
}
|
||||
|
|
@ -1333,14 +1366,17 @@ export namespace Provider {
|
|||
const info = provider.models[modelID]
|
||||
if (!info) {
|
||||
const availableModels = Object.keys(provider.models)
|
||||
const matches = fuzzysort.go(modelID, availableModels, { limit: 3, threshold: -10000 })
|
||||
const matches = fuzzysort.go(modelID, availableModels, {
|
||||
limit: 3,
|
||||
threshold: -10000,
|
||||
})
|
||||
const suggestions = matches.map((m) => m.target)
|
||||
throw new ModelNotFoundError({ providerID, modelID, suggestions })
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
export async function getLanguage(model: Model): Promise<LanguageModelV2> {
|
||||
export async function getLanguage(model: Model): Promise<LanguageModelV3> {
|
||||
const s = await state()
|
||||
const key = `${model.providerID}/${model.id}`
|
||||
if (s.models.has(key)) return s.models.get(key)!
|
||||
|
|
@ -1350,7 +1386,10 @@ export namespace Provider {
|
|||
|
||||
try {
|
||||
const language = s.modelLoaders[model.providerID]
|
||||
? await s.modelLoaders[model.providerID](sdk, model.api.id, { ...provider.options, ...model.options })
|
||||
? await s.modelLoaders[model.providerID](sdk, model.api.id, {
|
||||
...provider.options,
|
||||
...model.options,
|
||||
})
|
||||
: sdk.languageModel(model.api.id)
|
||||
s.models.set(key, language)
|
||||
return language
|
||||
|
|
@ -1457,9 +1496,9 @@ export namespace Provider {
|
|||
if (cfg.model) return parseModel(cfg.model)
|
||||
|
||||
const providers = await list()
|
||||
const recent = (await Filesystem.readJson<{ recent?: { providerID: ProviderID; modelID: ModelID }[] }>(
|
||||
path.join(Global.Path.state, "model.json"),
|
||||
)
|
||||
const recent = (await Filesystem.readJson<{
|
||||
recent?: { providerID: ProviderID; modelID: ModelID }[]
|
||||
}>(path.join(Global.Path.state, "model.json"))
|
||||
.then((x) => (Array.isArray(x.recent) ? x.recent : []))
|
||||
.catch(() => [])) as { providerID: ProviderID; modelID: ModelID }[]
|
||||
for (const entry of recent) {
|
||||
|
|
|
|||
|
|
@ -1,16 +1,16 @@
|
|||
import {
|
||||
type LanguageModelV2Prompt,
|
||||
type SharedV2ProviderMetadata,
|
||||
type LanguageModelV3Prompt,
|
||||
type SharedV3ProviderOptions,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types"
|
||||
import { convertToBase64 } from "@ai-sdk/provider-utils"
|
||||
|
||||
function getOpenAIMetadata(message: { providerOptions?: SharedV2ProviderMetadata }) {
|
||||
function getOpenAIMetadata(message: { providerOptions?: SharedV3ProviderOptions }) {
|
||||
return message?.providerOptions?.copilot ?? {}
|
||||
}
|
||||
|
||||
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Prompt): OpenAICompatibleChatPrompt {
|
||||
export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV3Prompt): OpenAICompatibleChatPrompt {
|
||||
const messages: OpenAICompatibleChatPrompt = []
|
||||
for (const { role, content, ...message } of prompt) {
|
||||
const metadata = getOpenAIMetadata({ ...message })
|
||||
|
|
@ -127,6 +127,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
|
|||
|
||||
case "tool": {
|
||||
for (const toolResponse of content) {
|
||||
if (toolResponse.type === "tool-approval-response") {
|
||||
continue
|
||||
}
|
||||
const output = toolResponse.output
|
||||
|
||||
let contentValue: string
|
||||
|
|
@ -135,6 +138,9 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
|
|||
case "error-text":
|
||||
contentValue = output.value
|
||||
break
|
||||
case "execution-denied":
|
||||
contentValue = output.reason ?? "Tool execution denied."
|
||||
break
|
||||
case "content":
|
||||
case "json":
|
||||
case "error-json":
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
|
||||
|
||||
export function mapOpenAICompatibleFinishReason(finishReason: string | null | undefined): LanguageModelV2FinishReason {
|
||||
export function mapOpenAICompatibleFinishReason(
|
||||
finishReason: string | null | undefined,
|
||||
): LanguageModelV3FinishReason["unified"] {
|
||||
switch (finishReason) {
|
||||
case "stop":
|
||||
return "stop"
|
||||
|
|
@ -12,6 +14,6 @@ export function mapOpenAICompatibleFinishReason(finishReason: string | null | un
|
|||
case "tool_calls":
|
||||
return "tool-calls"
|
||||
default:
|
||||
return "unknown"
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import {
|
||||
APICallError,
|
||||
InvalidResponseDataError,
|
||||
type LanguageModelV2,
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Content,
|
||||
type LanguageModelV2FinishReason,
|
||||
type LanguageModelV2StreamPart,
|
||||
type SharedV2ProviderMetadata,
|
||||
type LanguageModelV3,
|
||||
type LanguageModelV3CallOptions,
|
||||
type LanguageModelV3Content,
|
||||
type LanguageModelV3StreamPart,
|
||||
type SharedV3ProviderMetadata,
|
||||
type SharedV3Warning,
|
||||
} from "@ai-sdk/provider"
|
||||
import {
|
||||
combineHeaders,
|
||||
|
|
@ -47,11 +47,11 @@ export type OpenAICompatibleChatConfig = {
|
|||
/**
|
||||
* The supported URLs for the model.
|
||||
*/
|
||||
supportedUrls?: () => LanguageModelV2["supportedUrls"]
|
||||
supportedUrls?: () => LanguageModelV3["supportedUrls"]
|
||||
}
|
||||
|
||||
export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = "v2"
|
||||
export class OpenAICompatibleChatLanguageModel implements LanguageModelV3 {
|
||||
readonly specificationVersion = "v3"
|
||||
|
||||
readonly supportsStructuredOutputs: boolean
|
||||
|
||||
|
|
@ -98,8 +98,8 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
seed,
|
||||
toolChoice,
|
||||
tools,
|
||||
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
|
||||
const warnings: LanguageModelV2CallWarning[] = []
|
||||
}: LanguageModelV3CallOptions) {
|
||||
const warnings: SharedV3Warning[] = []
|
||||
|
||||
// Parse provider options
|
||||
const compatibleOptions = Object.assign(
|
||||
|
|
@ -116,13 +116,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
)
|
||||
|
||||
if (topK != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "topK" })
|
||||
warnings.push({ type: "unsupported", feature: "topK" })
|
||||
}
|
||||
|
||||
if (responseFormat?.type === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "responseFormat",
|
||||
type: "unsupported",
|
||||
feature: "responseFormat",
|
||||
details: "JSON response format schema is only supported with structuredOutputs",
|
||||
})
|
||||
}
|
||||
|
|
@ -189,9 +189,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
}
|
||||
|
||||
async doGenerate(
|
||||
options: Parameters<LanguageModelV2["doGenerate"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
|
||||
async doGenerate(options: LanguageModelV3CallOptions) {
|
||||
const { args, warnings } = await this.getArgs({ ...options })
|
||||
|
||||
const body = JSON.stringify(args)
|
||||
|
|
@ -214,7 +212,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
})
|
||||
|
||||
const choice = responseBody.choices[0]
|
||||
const content: Array<LanguageModelV2Content> = []
|
||||
const content: Array<LanguageModelV3Content> = []
|
||||
|
||||
// text content:
|
||||
const text = choice.message.content
|
||||
|
|
@ -257,7 +255,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
|
||||
// provider metadata:
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
[this.providerOptionsName]: {},
|
||||
...(await this.config.metadataExtractor?.extractMetadata?.({
|
||||
parsedBody: rawResponse,
|
||||
|
|
@ -275,13 +273,23 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
|
||||
return {
|
||||
content,
|
||||
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
finishReason: {
|
||||
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
raw: choice.finish_reason ?? undefined,
|
||||
},
|
||||
usage: {
|
||||
inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
|
||||
outputTokens: responseBody.usage?.completion_tokens ?? undefined,
|
||||
totalTokens: responseBody.usage?.total_tokens ?? undefined,
|
||||
reasoningTokens: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
|
||||
cachedInputTokens: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: responseBody.usage?.prompt_tokens ?? undefined,
|
||||
noCache: undefined,
|
||||
cacheRead: responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: responseBody.usage?.completion_tokens ?? undefined,
|
||||
text: undefined,
|
||||
reasoning: responseBody.usage?.completion_tokens_details?.reasoning_tokens ?? undefined,
|
||||
},
|
||||
raw: responseBody.usage ?? undefined,
|
||||
},
|
||||
providerMetadata,
|
||||
request: { body },
|
||||
|
|
@ -294,9 +302,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
}
|
||||
|
||||
async doStream(
|
||||
options: Parameters<LanguageModelV2["doStream"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
|
||||
async doStream(options: LanguageModelV3CallOptions) {
|
||||
const { args, warnings } = await this.getArgs({ ...options })
|
||||
|
||||
const body = {
|
||||
|
|
@ -332,7 +338,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
hasFinished: boolean
|
||||
}> = []
|
||||
|
||||
let finishReason: LanguageModelV2FinishReason = "unknown"
|
||||
let finishReason: {
|
||||
unified: ReturnType<typeof mapOpenAICompatibleFinishReason>
|
||||
raw: string | undefined
|
||||
} = {
|
||||
unified: "other",
|
||||
raw: undefined,
|
||||
}
|
||||
const usage: {
|
||||
completionTokens: number | undefined
|
||||
completionTokensDetails: {
|
||||
|
|
@ -366,7 +378,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV2StreamPart>({
|
||||
new TransformStream<ParseResult<z.infer<typeof this.chunkSchema>>, LanguageModelV3StreamPart>({
|
||||
start(controller) {
|
||||
controller.enqueue({ type: "stream-start", warnings })
|
||||
},
|
||||
|
|
@ -380,7 +392,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
|
||||
// handle failed chunk parsing / validation:
|
||||
if (!chunk.success) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: chunk.error })
|
||||
return
|
||||
}
|
||||
|
|
@ -390,7 +405,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
|
||||
// handle error chunks:
|
||||
if ("error" in value) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: value.error.message })
|
||||
return
|
||||
}
|
||||
|
|
@ -435,7 +453,10 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
const choice = value.choices[0]
|
||||
|
||||
if (choice?.finish_reason != null) {
|
||||
finishReason = mapOpenAICompatibleFinishReason(choice.finish_reason)
|
||||
finishReason = {
|
||||
unified: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
||||
raw: choice.finish_reason ?? undefined,
|
||||
}
|
||||
}
|
||||
|
||||
if (choice?.delta == null) {
|
||||
|
|
@ -652,7 +673,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
})
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
[providerOptionsName]: {},
|
||||
// Include reasoning_opaque for Copilot multi-turn reasoning
|
||||
...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
|
||||
|
|
@ -671,11 +692,25 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
|
|||
type: "finish",
|
||||
finishReason,
|
||||
usage: {
|
||||
inputTokens: usage.promptTokens ?? undefined,
|
||||
outputTokens: usage.completionTokens ?? undefined,
|
||||
totalTokens: usage.totalTokens ?? undefined,
|
||||
reasoningTokens: usage.completionTokensDetails.reasoningTokens ?? undefined,
|
||||
cachedInputTokens: usage.promptTokensDetails.cachedTokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: usage.promptTokens,
|
||||
noCache:
|
||||
usage.promptTokens != undefined && usage.promptTokensDetails.cachedTokens != undefined
|
||||
? usage.promptTokens - usage.promptTokensDetails.cachedTokens
|
||||
: undefined,
|
||||
cacheRead: usage.promptTokensDetails.cachedTokens,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: usage.completionTokens,
|
||||
text: undefined,
|
||||
reasoning: usage.completionTokensDetails.reasoningTokens,
|
||||
},
|
||||
raw: {
|
||||
prompt_tokens: usage.promptTokens ?? null,
|
||||
completion_tokens: usage.completionTokens ?? null,
|
||||
total_tokens: usage.totalTokens ?? null,
|
||||
},
|
||||
},
|
||||
providerMetadata,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import type { SharedV2ProviderMetadata } from "@ai-sdk/provider"
|
||||
import type { SharedV3ProviderMetadata } from "@ai-sdk/provider"
|
||||
|
||||
/**
|
||||
Extracts provider-specific metadata from API responses.
|
||||
|
|
@ -14,7 +14,7 @@ export type MetadataExtractor = {
|
|||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV2ProviderMetadata | undefined>
|
||||
extractMetadata: ({ parsedBody }: { parsedBody: unknown }) => Promise<SharedV3ProviderMetadata | undefined>
|
||||
|
||||
/**
|
||||
* Creates an extractor for handling streaming responses. The returned object provides
|
||||
|
|
@ -39,6 +39,6 @@ export type MetadataExtractor = {
|
|||
* @returns Provider-specific metadata or undefined if no metadata is available.
|
||||
* The metadata should be under a key indicating the provider id.
|
||||
*/
|
||||
buildMetadata(): SharedV2ProviderMetadata | undefined
|
||||
buildMetadata(): SharedV3ProviderMetadata | undefined
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,11 @@
|
|||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
|
||||
|
||||
export function prepareTools({
|
||||
tools,
|
||||
toolChoice,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
|
||||
tools: LanguageModelV3CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
|
||||
}): {
|
||||
tools:
|
||||
| undefined
|
||||
|
|
@ -22,12 +18,12 @@ export function prepareTools({
|
|||
}
|
||||
}>
|
||||
toolChoice: { type: "function"; function: { name: string } } | "auto" | "none" | "required" | undefined
|
||||
toolWarnings: LanguageModelV2CallWarning[]
|
||||
toolWarnings: SharedV3Warning[]
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = []
|
||||
const toolWarnings: SharedV3Warning[] = []
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings }
|
||||
|
|
@ -43,8 +39,8 @@ export function prepareTools({
|
|||
}> = []
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool.type === "provider-defined") {
|
||||
toolWarnings.push({ type: "unsupported-tool", tool })
|
||||
if (tool.type === "provider") {
|
||||
toolWarnings.push({ type: "unsupported", feature: `tool type: ${tool.type}` })
|
||||
} else {
|
||||
openaiCompatTools.push({
|
||||
type: "function",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import type { LanguageModelV2 } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3 } from "@ai-sdk/provider"
|
||||
import { type FetchFunction, withoutTrailingSlash, withUserAgentSuffix } from "@ai-sdk/provider-utils"
|
||||
import { OpenAICompatibleChatLanguageModel } from "./chat/openai-compatible-chat-language-model"
|
||||
import { OpenAIResponsesLanguageModel } from "./responses/openai-responses-language-model"
|
||||
|
|
@ -36,10 +36,10 @@ export interface OpenaiCompatibleProviderSettings {
|
|||
}
|
||||
|
||||
export interface OpenaiCompatibleProvider {
|
||||
(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
chat(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
responses(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV2
|
||||
(modelId: OpenaiCompatibleModelId): LanguageModelV3
|
||||
chat(modelId: OpenaiCompatibleModelId): LanguageModelV3
|
||||
responses(modelId: OpenaiCompatibleModelId): LanguageModelV3
|
||||
languageModel(modelId: OpenaiCompatibleModelId): LanguageModelV3
|
||||
|
||||
// embeddingModel(modelId: any): EmbeddingModelV2
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import {
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Prompt,
|
||||
type LanguageModelV2ToolCallPart,
|
||||
type LanguageModelV3Prompt,
|
||||
type LanguageModelV3ToolCallPart,
|
||||
type SharedV3Warning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { convertToBase64, parseProviderOptions } from "@ai-sdk/provider-utils"
|
||||
|
|
@ -25,17 +25,18 @@ export async function convertToOpenAIResponsesInput({
|
|||
store,
|
||||
hasLocalShellTool = false,
|
||||
}: {
|
||||
prompt: LanguageModelV2Prompt
|
||||
prompt: LanguageModelV3Prompt
|
||||
systemMessageMode: "system" | "developer" | "remove"
|
||||
fileIdPrefixes?: readonly string[]
|
||||
store: boolean
|
||||
hasLocalShellTool?: boolean
|
||||
}): Promise<{
|
||||
input: OpenAIResponsesInput
|
||||
warnings: Array<LanguageModelV2CallWarning>
|
||||
warnings: Array<SharedV3Warning>
|
||||
}> {
|
||||
const input: OpenAIResponsesInput = []
|
||||
const warnings: Array<LanguageModelV2CallWarning> = []
|
||||
const warnings: Array<SharedV3Warning> = []
|
||||
const processedApprovalIds = new Set<string>()
|
||||
|
||||
for (const { role, content } of prompt) {
|
||||
switch (role) {
|
||||
|
|
@ -118,7 +119,7 @@ export async function convertToOpenAIResponsesInput({
|
|||
|
||||
case "assistant": {
|
||||
const reasoningMessages: Record<string, OpenAIResponsesReasoning> = {}
|
||||
const toolCallParts: Record<string, LanguageModelV2ToolCallPart> = {}
|
||||
const toolCallParts: Record<string, LanguageModelV3ToolCallPart> = {}
|
||||
|
||||
for (const part of content) {
|
||||
switch (part.type) {
|
||||
|
|
@ -251,8 +252,36 @@ export async function convertToOpenAIResponsesInput({
|
|||
|
||||
case "tool": {
|
||||
for (const part of content) {
|
||||
if (part.type === "tool-approval-response") {
|
||||
if (processedApprovalIds.has(part.approvalId)) {
|
||||
continue
|
||||
}
|
||||
processedApprovalIds.add(part.approvalId)
|
||||
|
||||
if (store) {
|
||||
input.push({
|
||||
type: "item_reference",
|
||||
id: part.approvalId,
|
||||
})
|
||||
}
|
||||
|
||||
input.push({
|
||||
type: "mcp_approval_response",
|
||||
approval_request_id: part.approvalId,
|
||||
approve: part.approved,
|
||||
})
|
||||
continue
|
||||
}
|
||||
const output = part.output
|
||||
|
||||
if (output.type === "execution-denied") {
|
||||
const approvalId = (output.providerOptions?.openai as { approvalId?: string } | undefined)?.approvalId
|
||||
|
||||
if (approvalId) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (hasLocalShellTool && part.toolName === "local_shell" && output.type === "json") {
|
||||
input.push({
|
||||
type: "local_shell_call_output",
|
||||
|
|
@ -268,6 +297,9 @@ export async function convertToOpenAIResponsesInput({
|
|||
case "error-text":
|
||||
contentValue = output.value
|
||||
break
|
||||
case "execution-denied":
|
||||
contentValue = output.reason ?? "Tool execution denied."
|
||||
break
|
||||
case "content":
|
||||
case "json":
|
||||
case "error-json":
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import type { LanguageModelV2FinishReason } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3FinishReason } from "@ai-sdk/provider"
|
||||
|
||||
export function mapOpenAIResponseFinishReason({
|
||||
finishReason,
|
||||
|
|
@ -7,7 +7,7 @@ export function mapOpenAIResponseFinishReason({
|
|||
finishReason: string | null | undefined
|
||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||
hasFunctionCall: boolean
|
||||
}): LanguageModelV2FinishReason {
|
||||
}): LanguageModelV3FinishReason["unified"] {
|
||||
switch (finishReason) {
|
||||
case undefined:
|
||||
case null:
|
||||
|
|
@ -17,6 +17,6 @@ export function mapOpenAIResponseFinishReason({
|
|||
case "content_filter":
|
||||
return "content-filter"
|
||||
default:
|
||||
return hasFunctionCall ? "tool-calls" : "unknown"
|
||||
return hasFunctionCall ? "tool-calls" : "other"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ export type OpenAIResponsesInputItem =
|
|||
| OpenAIResponsesLocalShellCallOutput
|
||||
| OpenAIResponsesReasoning
|
||||
| OpenAIResponsesItemReference
|
||||
| OpenAIResponsesMcpApprovalResponse
|
||||
|
||||
export type OpenAIResponsesIncludeValue =
|
||||
| "web_search_call.action.sources"
|
||||
|
|
@ -93,6 +94,12 @@ export type OpenAIResponsesItemReference = {
|
|||
id: string
|
||||
}
|
||||
|
||||
export type OpenAIResponsesMcpApprovalResponse = {
|
||||
type: "mcp_approval_response"
|
||||
approval_request_id: string
|
||||
approve: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
import {
|
||||
APICallError,
|
||||
type LanguageModelV2,
|
||||
type LanguageModelV2CallWarning,
|
||||
type LanguageModelV2Content,
|
||||
type LanguageModelV2FinishReason,
|
||||
type LanguageModelV2ProviderDefinedTool,
|
||||
type LanguageModelV2StreamPart,
|
||||
type LanguageModelV2Usage,
|
||||
type SharedV2ProviderMetadata,
|
||||
type JSONValue,
|
||||
type LanguageModelV3,
|
||||
type LanguageModelV3CallOptions,
|
||||
type LanguageModelV3Content,
|
||||
type LanguageModelV3ProviderTool,
|
||||
type LanguageModelV3StreamPart,
|
||||
type SharedV3ProviderMetadata,
|
||||
type SharedV3Warning,
|
||||
} from "@ai-sdk/provider"
|
||||
import {
|
||||
combineHeaders,
|
||||
|
|
@ -128,8 +128,8 @@ const LOGPROBS_SCHEMA = z.array(
|
|||
}),
|
||||
)
|
||||
|
||||
export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
||||
readonly specificationVersion = "v2"
|
||||
export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
|
||||
readonly specificationVersion = "v3"
|
||||
|
||||
readonly modelId: OpenAIResponsesModelId
|
||||
|
||||
|
|
@ -163,34 +163,34 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
tools,
|
||||
toolChoice,
|
||||
responseFormat,
|
||||
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
|
||||
const warnings: LanguageModelV2CallWarning[] = []
|
||||
}: LanguageModelV3CallOptions) {
|
||||
const warnings: SharedV3Warning[] = []
|
||||
const modelConfig = getResponsesModelConfig(this.modelId)
|
||||
|
||||
if (topK != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "topK" })
|
||||
warnings.push({ type: "unsupported", feature: "topK" })
|
||||
}
|
||||
|
||||
if (seed != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "seed" })
|
||||
warnings.push({ type: "unsupported", feature: "seed" })
|
||||
}
|
||||
|
||||
if (presencePenalty != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "presencePenalty",
|
||||
type: "unsupported",
|
||||
feature: "presencePenalty",
|
||||
})
|
||||
}
|
||||
|
||||
if (frequencyPenalty != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "frequencyPenalty",
|
||||
type: "unsupported",
|
||||
feature: "frequencyPenalty",
|
||||
})
|
||||
}
|
||||
|
||||
if (stopSequences != null) {
|
||||
warnings.push({ type: "unsupported-setting", setting: "stopSequences" })
|
||||
warnings.push({ type: "unsupported", feature: "stopSequences" })
|
||||
}
|
||||
|
||||
const openaiOptions = await parseProviderOptions({
|
||||
|
|
@ -218,7 +218,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
|
||||
function hasOpenAITool(id: string) {
|
||||
return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null
|
||||
return tools?.find((tool) => tool.type === "provider" && tool.id === id) != null
|
||||
}
|
||||
|
||||
// when logprobs are requested, automatically include them:
|
||||
|
|
@ -237,9 +237,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
const webSearchToolName = (
|
||||
tools?.find(
|
||||
(tool) =>
|
||||
tool.type === "provider-defined" &&
|
||||
(tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
|
||||
) as LanguageModelV2ProviderDefinedTool | undefined
|
||||
tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
|
||||
) as LanguageModelV3ProviderTool | undefined
|
||||
)?.name
|
||||
|
||||
if (webSearchToolName) {
|
||||
|
|
@ -315,8 +314,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
if (baseArgs.temperature != null) {
|
||||
baseArgs.temperature = undefined
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "temperature",
|
||||
type: "unsupported",
|
||||
feature: "temperature",
|
||||
details: "temperature is not supported for reasoning models",
|
||||
})
|
||||
}
|
||||
|
|
@ -324,24 +323,24 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
if (baseArgs.top_p != null) {
|
||||
baseArgs.top_p = undefined
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "topP",
|
||||
type: "unsupported",
|
||||
feature: "topP",
|
||||
details: "topP is not supported for reasoning models",
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if (openaiOptions?.reasoningEffort != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "reasoningEffort",
|
||||
type: "unsupported",
|
||||
feature: "reasoningEffort",
|
||||
details: "reasoningEffort is not supported for non-reasoning models",
|
||||
})
|
||||
}
|
||||
|
||||
if (openaiOptions?.reasoningSummary != null) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "reasoningSummary",
|
||||
type: "unsupported",
|
||||
feature: "reasoningSummary",
|
||||
details: "reasoningSummary is not supported for non-reasoning models",
|
||||
})
|
||||
}
|
||||
|
|
@ -350,8 +349,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
// Validate flex processing support
|
||||
if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "serviceTier",
|
||||
type: "unsupported",
|
||||
feature: "serviceTier",
|
||||
details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
|
||||
})
|
||||
// Remove from args if not supported
|
||||
|
|
@ -361,8 +360,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
// Validate priority processing support
|
||||
if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
|
||||
warnings.push({
|
||||
type: "unsupported-setting",
|
||||
setting: "serviceTier",
|
||||
type: "unsupported",
|
||||
feature: "serviceTier",
|
||||
details:
|
||||
"priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
|
||||
})
|
||||
|
|
@ -391,9 +390,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
}
|
||||
|
||||
async doGenerate(
|
||||
options: Parameters<LanguageModelV2["doGenerate"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
|
||||
async doGenerate(options: LanguageModelV3CallOptions) {
|
||||
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
||||
const url = this.config.url({
|
||||
path: "/responses",
|
||||
|
|
@ -508,7 +505,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
})
|
||||
}
|
||||
|
||||
const content: Array<LanguageModelV2Content> = []
|
||||
const content: Array<LanguageModelV3Content> = []
|
||||
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
||||
|
||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||
|
|
@ -554,7 +551,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
result: {
|
||||
result: part.result,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
|
||||
break
|
||||
|
|
@ -648,7 +644,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
toolCallId: part.id,
|
||||
toolName: webSearchToolName ?? "web_search",
|
||||
result: { status: part.status },
|
||||
providerExecuted: true,
|
||||
})
|
||||
|
||||
break
|
||||
|
|
@ -671,7 +666,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
type: "computer_use_tool_result",
|
||||
status: part.status || "completed",
|
||||
},
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
|
@ -693,14 +687,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
queries: part.queries,
|
||||
results:
|
||||
part.results?.map((result) => ({
|
||||
attributes: result.attributes,
|
||||
attributes: result.attributes as Record<string, JSONValue>,
|
||||
fileId: result.file_id,
|
||||
filename: result.filename,
|
||||
score: result.score,
|
||||
text: result.text,
|
||||
})) ?? null,
|
||||
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
|
|
@ -724,14 +717,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
result: {
|
||||
outputs: part.outputs,
|
||||
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
openai: { responseId: response.id },
|
||||
}
|
||||
|
||||
|
|
@ -745,16 +737,29 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
|
||||
return {
|
||||
content,
|
||||
finishReason: mapOpenAIResponseFinishReason({
|
||||
finishReason: response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
finishReason: {
|
||||
unified: mapOpenAIResponseFinishReason({
|
||||
finishReason: response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
raw: response.incomplete_details?.reason,
|
||||
},
|
||||
usage: {
|
||||
inputTokens: response.usage.input_tokens,
|
||||
outputTokens: response.usage.output_tokens,
|
||||
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
||||
reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
|
||||
cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined,
|
||||
inputTokens: {
|
||||
total: response.usage.input_tokens,
|
||||
noCache:
|
||||
response.usage.input_tokens_details?.cached_tokens != null
|
||||
? response.usage.input_tokens - response.usage.input_tokens_details.cached_tokens
|
||||
: undefined,
|
||||
cacheRead: response.usage.input_tokens_details?.cached_tokens ?? undefined,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: response.usage.output_tokens,
|
||||
text: undefined,
|
||||
reasoning: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
|
||||
},
|
||||
raw: response.usage,
|
||||
},
|
||||
request: { body },
|
||||
response: {
|
||||
|
|
@ -769,9 +774,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
}
|
||||
}
|
||||
|
||||
async doStream(
|
||||
options: Parameters<LanguageModelV2["doStream"]>[0],
|
||||
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
|
||||
async doStream(options: LanguageModelV3CallOptions) {
|
||||
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
||||
|
||||
const { responseHeaders, value: response } = await postJsonToApi({
|
||||
|
|
@ -792,11 +795,25 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
|
||||
const self = this
|
||||
|
||||
let finishReason: LanguageModelV2FinishReason = "unknown"
|
||||
const usage: LanguageModelV2Usage = {
|
||||
let finishReason: {
|
||||
unified: ReturnType<typeof mapOpenAIResponseFinishReason>
|
||||
raw: string | undefined
|
||||
} = {
|
||||
unified: "other",
|
||||
raw: undefined,
|
||||
}
|
||||
const usage: {
|
||||
inputTokens: number | undefined
|
||||
outputTokens: number | undefined
|
||||
totalTokens: number | undefined
|
||||
reasoningTokens: number | undefined
|
||||
cachedInputTokens: number | undefined
|
||||
} = {
|
||||
inputTokens: undefined,
|
||||
outputTokens: undefined,
|
||||
totalTokens: undefined,
|
||||
reasoningTokens: undefined,
|
||||
cachedInputTokens: undefined,
|
||||
}
|
||||
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
||||
let responseId: string | null = null
|
||||
|
|
@ -837,7 +854,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV2StreamPart>({
|
||||
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV3StreamPart>({
|
||||
start(controller) {
|
||||
controller.enqueue({ type: "stream-start", warnings })
|
||||
},
|
||||
|
|
@ -849,7 +866,10 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
|
||||
// handle failed chunk parsing / validation:
|
||||
if (!chunk.success) {
|
||||
finishReason = "error"
|
||||
finishReason = {
|
||||
unified: "error",
|
||||
raw: undefined,
|
||||
}
|
||||
controller.enqueue({ type: "error", error: chunk.error })
|
||||
return
|
||||
}
|
||||
|
|
@ -999,7 +1019,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
toolCallId: value.item.id,
|
||||
toolName: "web_search",
|
||||
result: { status: value.item.status },
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "computer_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
|
|
@ -1025,7 +1044,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
type: "computer_use_tool_result",
|
||||
status: value.item.status || "completed",
|
||||
},
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "file_search_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
|
|
@ -1038,14 +1056,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
queries: value.item.queries,
|
||||
results:
|
||||
value.item.results?.map((result) => ({
|
||||
attributes: result.attributes,
|
||||
attributes: result.attributes as Record<string, JSONValue>,
|
||||
fileId: result.file_id,
|
||||
filename: result.filename,
|
||||
score: result.score,
|
||||
text: result.text,
|
||||
})) ?? null,
|
||||
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "code_interpreter_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
|
|
@ -1057,7 +1074,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
result: {
|
||||
outputs: value.item.outputs,
|
||||
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "image_generation_call") {
|
||||
controller.enqueue({
|
||||
|
|
@ -1067,7 +1083,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
result: {
|
||||
result: value.item.result,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (value.item.type === "local_shell_call") {
|
||||
ongoingToolCalls[value.output_index] = undefined
|
||||
|
|
@ -1137,7 +1152,6 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
result: {
|
||||
result: value.partial_image_b64,
|
||||
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
||||
providerExecuted: true,
|
||||
})
|
||||
} else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
|
||||
const toolCall = ongoingToolCalls[value.output_index]
|
||||
|
|
@ -1244,10 +1258,13 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
})
|
||||
}
|
||||
} else if (isResponseFinishedChunk(value)) {
|
||||
finishReason = mapOpenAIResponseFinishReason({
|
||||
finishReason: value.response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
})
|
||||
finishReason = {
|
||||
unified: mapOpenAIResponseFinishReason({
|
||||
finishReason: value.response.incomplete_details?.reason,
|
||||
hasFunctionCall,
|
||||
}),
|
||||
raw: value.response.incomplete_details?.reason ?? undefined,
|
||||
}
|
||||
usage.inputTokens = value.response.usage.input_tokens
|
||||
usage.outputTokens = value.response.usage.output_tokens
|
||||
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
|
||||
|
|
@ -1287,7 +1304,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
currentTextId = null
|
||||
}
|
||||
|
||||
const providerMetadata: SharedV2ProviderMetadata = {
|
||||
const providerMetadata: SharedV3ProviderMetadata = {
|
||||
openai: {
|
||||
responseId,
|
||||
},
|
||||
|
|
@ -1304,7 +1321,27 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||
controller.enqueue({
|
||||
type: "finish",
|
||||
finishReason,
|
||||
usage,
|
||||
usage: {
|
||||
inputTokens: {
|
||||
total: usage.inputTokens,
|
||||
noCache:
|
||||
usage.inputTokens != null && usage.cachedInputTokens != null
|
||||
? usage.inputTokens - usage.cachedInputTokens
|
||||
: undefined,
|
||||
cacheRead: usage.cachedInputTokens,
|
||||
cacheWrite: undefined,
|
||||
},
|
||||
outputTokens: {
|
||||
total: usage.outputTokens,
|
||||
text: undefined,
|
||||
reasoning: usage.reasoningTokens,
|
||||
},
|
||||
raw: {
|
||||
input_tokens: usage.inputTokens,
|
||||
output_tokens: usage.outputTokens,
|
||||
total_tokens: usage.totalTokens,
|
||||
},
|
||||
},
|
||||
providerMetadata,
|
||||
})
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,8 +1,4 @@
|
|||
import {
|
||||
type LanguageModelV2CallOptions,
|
||||
type LanguageModelV2CallWarning,
|
||||
UnsupportedFunctionalityError,
|
||||
} from "@ai-sdk/provider"
|
||||
import { type LanguageModelV3CallOptions, type SharedV3Warning, UnsupportedFunctionalityError } from "@ai-sdk/provider"
|
||||
import { codeInterpreterArgsSchema } from "./tool/code-interpreter"
|
||||
import { fileSearchArgsSchema } from "./tool/file-search"
|
||||
import { webSearchArgsSchema } from "./tool/web-search"
|
||||
|
|
@ -15,8 +11,8 @@ export function prepareResponsesTools({
|
|||
toolChoice,
|
||||
strictJsonSchema,
|
||||
}: {
|
||||
tools: LanguageModelV2CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV2CallOptions["toolChoice"]
|
||||
tools: LanguageModelV3CallOptions["tools"]
|
||||
toolChoice?: LanguageModelV3CallOptions["toolChoice"]
|
||||
strictJsonSchema: boolean
|
||||
}): {
|
||||
tools?: Array<OpenAIResponsesTool>
|
||||
|
|
@ -30,12 +26,12 @@ export function prepareResponsesTools({
|
|||
| { type: "function"; name: string }
|
||||
| { type: "code_interpreter" }
|
||||
| { type: "image_generation" }
|
||||
toolWarnings: LanguageModelV2CallWarning[]
|
||||
toolWarnings: SharedV3Warning[]
|
||||
} {
|
||||
// when the tools array is empty, change it to undefined to prevent errors:
|
||||
tools = tools?.length ? tools : undefined
|
||||
|
||||
const toolWarnings: LanguageModelV2CallWarning[] = []
|
||||
const toolWarnings: SharedV3Warning[] = []
|
||||
|
||||
if (tools == null) {
|
||||
return { tools: undefined, toolChoice: undefined, toolWarnings }
|
||||
|
|
@ -54,7 +50,7 @@ export function prepareResponsesTools({
|
|||
strict: strictJsonSchema,
|
||||
})
|
||||
break
|
||||
case "provider-defined": {
|
||||
case "provider": {
|
||||
switch (tool.id) {
|
||||
case "openai.file_search": {
|
||||
const args = fileSearchArgsSchema.parse(tool.args)
|
||||
|
|
@ -138,7 +134,7 @@ export function prepareResponsesTools({
|
|||
break
|
||||
}
|
||||
default:
|
||||
toolWarnings.push({ type: "unsupported-tool", tool })
|
||||
toolWarnings.push({ type: "unsupported", feature: "tool type" })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const codeInterpreterInputSchema = z.object({
|
||||
|
|
@ -37,7 +37,7 @@ type CodeInterpreterArgs = {
|
|||
container?: string | { fileIds?: string[] }
|
||||
}
|
||||
|
||||
export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
export const codeInterpreterToolFactory = createProviderToolFactoryWithOutputSchema<
|
||||
{
|
||||
/**
|
||||
* The code to run, or null if not available.
|
||||
|
|
@ -76,7 +76,6 @@ export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOu
|
|||
CodeInterpreterArgs
|
||||
>({
|
||||
id: "openai.code_interpreter",
|
||||
name: "code_interpreter",
|
||||
inputSchema: codeInterpreterInputSchema,
|
||||
outputSchema: codeInterpreterOutputSchema,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import type {
|
||||
OpenAIResponsesFileSearchToolComparisonFilter,
|
||||
OpenAIResponsesFileSearchToolCompoundFilter,
|
||||
|
|
@ -43,7 +43,7 @@ export const fileSearchOutputSchema = z.object({
|
|||
.nullable(),
|
||||
})
|
||||
|
||||
export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
export const fileSearch = createProviderToolFactoryWithOutputSchema<
|
||||
{},
|
||||
{
|
||||
/**
|
||||
|
|
@ -122,7 +122,6 @@ export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
|
|||
}
|
||||
>({
|
||||
id: "openai.file_search",
|
||||
name: "file_search",
|
||||
inputSchema: z.object({}),
|
||||
outputSchema: fileSearchOutputSchema,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const imageGenerationArgsSchema = z
|
||||
|
|
@ -92,7 +92,7 @@ type ImageGenerationArgs = {
|
|||
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
|
||||
}
|
||||
|
||||
const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
const imageGenerationToolFactory = createProviderToolFactoryWithOutputSchema<
|
||||
{},
|
||||
{
|
||||
/**
|
||||
|
|
@ -103,7 +103,6 @@ const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSch
|
|||
ImageGenerationArgs
|
||||
>({
|
||||
id: "openai.image_generation",
|
||||
name: "image_generation",
|
||||
inputSchema: z.object({}),
|
||||
outputSchema: imageGenerationOutputSchema,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const localShellInputSchema = z.object({
|
||||
|
|
@ -16,7 +16,7 @@ export const localShellOutputSchema = z.object({
|
|||
output: z.string(),
|
||||
})
|
||||
|
||||
export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
|
||||
export const localShell = createProviderToolFactoryWithOutputSchema<
|
||||
{
|
||||
/**
|
||||
* Execute a shell command on the server.
|
||||
|
|
@ -59,7 +59,6 @@ export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
|
|||
{}
|
||||
>({
|
||||
id: "openai.local_shell",
|
||||
name: "local_shell",
|
||||
inputSchema: localShellInputSchema,
|
||||
outputSchema: localShellOutputSchema,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
// Args validation schema
|
||||
|
|
@ -40,7 +40,7 @@ export const webSearchPreviewArgsSchema = z.object({
|
|||
.optional(),
|
||||
})
|
||||
|
||||
export const webSearchPreview = createProviderDefinedToolFactory<
|
||||
export const webSearchPreview = createProviderToolFactory<
|
||||
{
|
||||
// Web search doesn't take input parameters - it's controlled by the prompt
|
||||
},
|
||||
|
|
@ -81,7 +81,6 @@ export const webSearchPreview = createProviderDefinedToolFactory<
|
|||
}
|
||||
>({
|
||||
id: "openai.web_search_preview",
|
||||
name: "web_search_preview",
|
||||
inputSchema: z.object({
|
||||
action: z
|
||||
.discriminatedUnion("type", [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
|
||||
import { z } from "zod/v4"
|
||||
|
||||
export const webSearchArgsSchema = z.object({
|
||||
|
|
@ -21,7 +21,7 @@ export const webSearchArgsSchema = z.object({
|
|||
.optional(),
|
||||
})
|
||||
|
||||
export const webSearchToolFactory = createProviderDefinedToolFactory<
|
||||
export const webSearchToolFactory = createProviderToolFactory<
|
||||
{
|
||||
// Web search doesn't take input parameters - it's controlled by the prompt
|
||||
},
|
||||
|
|
@ -74,7 +74,6 @@ export const webSearchToolFactory = createProviderDefinedToolFactory<
|
|||
}
|
||||
>({
|
||||
id: "openai.web_search",
|
||||
name: "web_search",
|
||||
inputSchema: z.object({
|
||||
action: z
|
||||
.discriminatedUnion("type", [
|
||||
|
|
|
|||
|
|
@ -25,8 +25,9 @@ export namespace ProviderTransform {
|
|||
switch (npm) {
|
||||
case "@ai-sdk/github-copilot":
|
||||
return "copilot"
|
||||
case "@ai-sdk/openai":
|
||||
case "@ai-sdk/azure":
|
||||
return "azure"
|
||||
case "@ai-sdk/openai":
|
||||
return "openai"
|
||||
case "@ai-sdk/amazon-bedrock":
|
||||
return "bedrock"
|
||||
|
|
@ -34,6 +35,7 @@ export namespace ProviderTransform {
|
|||
case "@ai-sdk/google-vertex/anthropic":
|
||||
return "anthropic"
|
||||
case "@ai-sdk/google-vertex":
|
||||
return "vertex"
|
||||
case "@ai-sdk/google":
|
||||
return "google"
|
||||
case "@ai-sdk/gateway":
|
||||
|
|
@ -72,17 +74,29 @@ export namespace ProviderTransform {
|
|||
}
|
||||
|
||||
if (model.api.id.includes("claude")) {
|
||||
const scrub = (id: string) => id.replace(/[^a-zA-Z0-9_-]/g, "_")
|
||||
return msgs.map((msg) => {
|
||||
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
|
||||
msg.content = msg.content.map((part) => {
|
||||
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
|
||||
return {
|
||||
...part,
|
||||
toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
|
||||
if (msg.role === "assistant" && Array.isArray(msg.content)) {
|
||||
return {
|
||||
...msg,
|
||||
content: msg.content.map((part) => {
|
||||
if (part.type === "tool-call" || part.type === "tool-result") {
|
||||
return { ...part, toolCallId: scrub(part.toolCallId) }
|
||||
}
|
||||
}
|
||||
return part
|
||||
})
|
||||
return part
|
||||
}),
|
||||
}
|
||||
}
|
||||
if (msg.role === "tool" && Array.isArray(msg.content)) {
|
||||
return {
|
||||
...msg,
|
||||
content: msg.content.map((part) => {
|
||||
if (part.type === "tool-result") {
|
||||
return { ...part, toolCallId: scrub(part.toolCallId) }
|
||||
}
|
||||
return part
|
||||
}),
|
||||
}
|
||||
}
|
||||
return msg
|
||||
})
|
||||
|
|
@ -92,29 +106,33 @@ export namespace ProviderTransform {
|
|||
model.api.id.toLowerCase().includes("mistral") ||
|
||||
model.api.id.toLocaleLowerCase().includes("devstral")
|
||||
) {
|
||||
const scrub = (id: string) => {
|
||||
return id
|
||||
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
|
||||
.substring(0, 9) // Take first 9 characters
|
||||
.padEnd(9, "0") // Pad with zeros if less than 9 characters
|
||||
}
|
||||
const result: ModelMessage[] = []
|
||||
for (let i = 0; i < msgs.length; i++) {
|
||||
const msg = msgs[i]
|
||||
const nextMsg = msgs[i + 1]
|
||||
|
||||
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
|
||||
if (msg.role === "assistant" && Array.isArray(msg.content)) {
|
||||
msg.content = msg.content.map((part) => {
|
||||
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
|
||||
// Mistral requires alphanumeric tool call IDs with exactly 9 characters
|
||||
const normalizedId = part.toolCallId
|
||||
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
|
||||
.substring(0, 9) // Take first 9 characters
|
||||
.padEnd(9, "0") // Pad with zeros if less than 9 characters
|
||||
|
||||
return {
|
||||
...part,
|
||||
toolCallId: normalizedId,
|
||||
}
|
||||
if (part.type === "tool-call" || part.type === "tool-result") {
|
||||
return { ...part, toolCallId: scrub(part.toolCallId) }
|
||||
}
|
||||
return part
|
||||
})
|
||||
}
|
||||
if (msg.role === "tool" && Array.isArray(msg.content)) {
|
||||
msg.content = msg.content.map((part) => {
|
||||
if (part.type === "tool-result") {
|
||||
return { ...part, toolCallId: scrub(part.toolCallId) }
|
||||
}
|
||||
return part
|
||||
})
|
||||
}
|
||||
|
||||
result.push(msg)
|
||||
|
||||
// Fix message sequence: tool messages cannot be followed by user messages
|
||||
|
|
@ -202,7 +220,12 @@ export namespace ProviderTransform {
|
|||
|
||||
if (shouldUseContentOptions) {
|
||||
const lastContent = msg.content[msg.content.length - 1]
|
||||
if (lastContent && typeof lastContent === "object") {
|
||||
if (
|
||||
lastContent &&
|
||||
typeof lastContent === "object" &&
|
||||
lastContent.type !== "tool-approval-request" &&
|
||||
lastContent.type !== "tool-approval-response"
|
||||
) {
|
||||
lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
|
||||
continue
|
||||
}
|
||||
|
|
@ -284,7 +307,12 @@ export namespace ProviderTransform {
|
|||
return {
|
||||
...msg,
|
||||
providerOptions: remap(msg.providerOptions),
|
||||
content: msg.content.map((part) => ({ ...part, providerOptions: remap(part.providerOptions) })),
|
||||
content: msg.content.map((part) => {
|
||||
if (part.type === "tool-approval-request" || part.type === "tool-approval-response") {
|
||||
return { ...part }
|
||||
}
|
||||
return { ...part, providerOptions: remap(part.providerOptions) }
|
||||
}),
|
||||
} as typeof msg
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ When constructing the summary, try to stick to this template:
|
|||
tools: {},
|
||||
system: [],
|
||||
messages: [
|
||||
...MessageV2.toModelMessages(msgs, model, { stripMedia: true }),
|
||||
...(await MessageV2.toModelMessages(msgs, model, { stripMedia: true })),
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
|
|
|
|||
|
|
@ -1,16 +1,6 @@
|
|||
import { Installation } from "@/installation"
|
||||
import { Provider } from "@/provider/provider"
|
||||
import { Log } from "@/util/log"
|
||||
import {
|
||||
streamText,
|
||||
wrapLanguageModel,
|
||||
type ModelMessage,
|
||||
type StreamTextResult,
|
||||
type Tool,
|
||||
type ToolSet,
|
||||
tool,
|
||||
jsonSchema,
|
||||
} from "ai"
|
||||
import { streamText, wrapLanguageModel, type ModelMessage, type Tool, tool, jsonSchema } from "ai"
|
||||
import { mergeDeep, pipe } from "remeda"
|
||||
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
import { ProviderTransform } from "@/provider/transform"
|
||||
|
|
@ -23,6 +13,7 @@ import { SystemPrompt } from "./system"
|
|||
import { Flag } from "@/flag/flag"
|
||||
import { Permission } from "@/permission"
|
||||
import { Auth } from "@/auth"
|
||||
import { Installation } from "@/installation"
|
||||
|
||||
export namespace LLM {
|
||||
const log = Log.create({ service: "llm" })
|
||||
|
|
@ -43,8 +34,6 @@ export namespace LLM {
|
|||
toolChoice?: "auto" | "required" | "none"
|
||||
}
|
||||
|
||||
export type StreamOutput = StreamTextResult<ToolSet, unknown>
|
||||
|
||||
export async function stream(input: StreamInput) {
|
||||
const l = log
|
||||
.clone()
|
||||
|
|
@ -273,8 +262,10 @@ export namespace LLM {
|
|||
model: language,
|
||||
middleware: [
|
||||
{
|
||||
specificationVersion: "v3" as const,
|
||||
async transformParams(args) {
|
||||
if (args.type === "stream") {
|
||||
// TODO: verify that LanguageModelV3Prompt is still compat here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// @ts-expect-error
|
||||
args.params.prompt = ProviderTransform.message(args.params.prompt, input.model, options)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -573,11 +573,11 @@ export namespace MessageV2 {
|
|||
}))
|
||||
}
|
||||
|
||||
export function toModelMessages(
|
||||
export async function toModelMessages(
|
||||
input: WithParts[],
|
||||
model: Provider.Model,
|
||||
options?: { stripMedia?: boolean },
|
||||
): ModelMessage[] {
|
||||
): Promise<ModelMessage[]> {
|
||||
const result: UIMessage[] = []
|
||||
const toolNames = new Set<string>()
|
||||
// Track media from tool results that need to be injected as user messages
|
||||
|
|
@ -601,7 +601,8 @@ export namespace MessageV2 {
|
|||
return false
|
||||
})()
|
||||
|
||||
const toModelOutput = (output: unknown) => {
|
||||
const toModelOutput = (options: { toolCallId: string; input: unknown; output: unknown }) => {
|
||||
const output = options.output
|
||||
if (typeof output === "string") {
|
||||
return { type: "text", value: output }
|
||||
}
|
||||
|
|
@ -799,7 +800,7 @@ export namespace MessageV2 {
|
|||
|
||||
const tools = Object.fromEntries(Array.from(toolNames).map((toolName) => [toolName, { toModelOutput }]))
|
||||
|
||||
return convertToModelMessages(
|
||||
return await convertToModelMessages(
|
||||
result.filter((msg) => msg.parts.some((part) => part.type !== "step-start")),
|
||||
{
|
||||
//@ts-expect-error (convertToModelMessages expects a ToolSet but only actually needs tools[name]?.toModelOutput)
|
||||
|
|
@ -871,7 +872,13 @@ export namespace MessageV2 {
|
|||
db.select().from(PartTable).where(eq(PartTable.message_id, message_id)).orderBy(PartTable.id).all(),
|
||||
)
|
||||
return rows.map(
|
||||
(row) => ({ ...row.data, id: row.id, sessionID: row.session_id, messageID: row.message_id }) as MessageV2.Part,
|
||||
(row) =>
|
||||
({
|
||||
...row.data,
|
||||
id: row.id,
|
||||
sessionID: row.session_id,
|
||||
messageID: row.message_id,
|
||||
}) as MessageV2.Part,
|
||||
)
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import { Session } from "."
|
|||
import { Agent } from "../agent/agent"
|
||||
import { Provider } from "../provider/provider"
|
||||
import { ModelID, ProviderID } from "../provider/schema"
|
||||
import { type Tool as AITool, tool, jsonSchema, type ToolCallOptions, asSchema } from "ai"
|
||||
import { type Tool as AITool, tool, jsonSchema, type ToolExecutionOptions, asSchema } from "ai"
|
||||
import { SessionCompaction } from "./compaction"
|
||||
import { Instance } from "../project/instance"
|
||||
import { Bus } from "../bus"
|
||||
|
|
@ -321,7 +321,13 @@ export namespace SessionPrompt {
|
|||
if (!lastUser) throw new Error("No user message found in stream. This should never happen.")
|
||||
if (
|
||||
lastAssistant?.finish &&
|
||||
!["tool-calls", "unknown"].includes(lastAssistant.finish) &&
|
||||
![
|
||||
"tool-calls",
|
||||
// in v6 unknown became other but other existed in v5 too and was distinctly different
|
||||
// I think there are certain providers that used to have bad stop reasons, not rlly sure which
|
||||
// ones if any still have this?
|
||||
// "unknown",
|
||||
].includes(lastAssistant.finish) &&
|
||||
lastUser.id < lastAssistant.id
|
||||
) {
|
||||
log.info("exiting loop", { sessionID })
|
||||
|
|
@ -692,7 +698,7 @@ export namespace SessionPrompt {
|
|||
sessionID,
|
||||
system,
|
||||
messages: [
|
||||
...MessageV2.toModelMessages(msgs, model),
|
||||
...(await MessageV2.toModelMessages(msgs, model)),
|
||||
...(isLastStep
|
||||
? [
|
||||
{
|
||||
|
|
@ -775,7 +781,7 @@ export namespace SessionPrompt {
|
|||
using _ = log.time("resolveTools")
|
||||
const tools: Record<string, AITool> = {}
|
||||
|
||||
const context = (args: any, options: ToolCallOptions): Tool.Context => ({
|
||||
const context = (args: any, options: ToolExecutionOptions): Tool.Context => ({
|
||||
sessionID: input.session.id,
|
||||
abort: options.abortSignal!,
|
||||
messageID: input.processor.message.id,
|
||||
|
|
@ -861,7 +867,8 @@ export namespace SessionPrompt {
|
|||
const execute = item.execute
|
||||
if (!execute) continue
|
||||
|
||||
const transformed = ProviderTransform.schema(input.model, asSchema(item.inputSchema).jsonSchema)
|
||||
const schema = await asSchema(item.inputSchema).jsonSchema
|
||||
const transformed = ProviderTransform.schema(input.model, schema)
|
||||
item.inputSchema = jsonSchema(transformed)
|
||||
// Wrap execute to add plugin hooks and format output
|
||||
item.execute = async (args, opts) => {
|
||||
|
|
@ -974,10 +981,10 @@ export namespace SessionPrompt {
|
|||
metadata: { valid: true },
|
||||
}
|
||||
},
|
||||
toModelOutput(result) {
|
||||
toModelOutput({ output }) {
|
||||
return {
|
||||
type: "text",
|
||||
value: result.output,
|
||||
value: output.output,
|
||||
}
|
||||
},
|
||||
})
|
||||
|
|
@ -2010,28 +2017,28 @@ NOTE: At any point in time through this workflow you should feel free to ask the
|
|||
(await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
|
||||
)
|
||||
})
|
||||
const result = await LLM.stream({
|
||||
agent,
|
||||
user: firstRealUser.info as MessageV2.User,
|
||||
system: [],
|
||||
small: true,
|
||||
tools: {},
|
||||
model,
|
||||
abort: new AbortController().signal,
|
||||
sessionID: input.session.id,
|
||||
retries: 2,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Generate a title for this conversation:\n",
|
||||
},
|
||||
...(hasOnlySubtaskParts
|
||||
? [{ role: "user" as const, content: subtaskParts.map((p) => p.prompt).join("\n") }]
|
||||
: MessageV2.toModelMessages(contextMessages, model)),
|
||||
],
|
||||
})
|
||||
const text = await result.text.catch((err) => log.error("failed to generate title", { error: err }))
|
||||
if (text) {
|
||||
try {
|
||||
const result = await LLM.stream({
|
||||
agent,
|
||||
user: firstRealUser.info as MessageV2.User,
|
||||
system: [],
|
||||
small: true,
|
||||
tools: {},
|
||||
model,
|
||||
abort: new AbortController().signal,
|
||||
sessionID: input.session.id,
|
||||
retries: 2,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "Generate a title for this conversation:\n",
|
||||
},
|
||||
...(hasOnlySubtaskParts
|
||||
? [{ role: "user" as const, content: subtaskParts.map((p) => p.prompt).join("\n") }]
|
||||
: await MessageV2.toModelMessages(contextMessages, model)),
|
||||
],
|
||||
})
|
||||
const text = await result.text
|
||||
const cleaned = text
|
||||
.replace(/<think>[\s\S]*?<\/think>\s*/g, "")
|
||||
.split("\n")
|
||||
|
|
@ -2044,6 +2051,8 @@ NOTE: At any point in time through this workflow you should feel free to ask the
|
|||
if (NotFoundError.isInstance(err)) return
|
||||
throw err
|
||||
})
|
||||
} catch (error) {
|
||||
log.error("failed to generate title", { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { OpenAICompatibleChatLanguageModel } from "@/provider/sdk/copilot/chat/openai-compatible-chat-language-model"
|
||||
import { describe, test, expect, mock } from "bun:test"
|
||||
import type { LanguageModelV2Prompt } from "@ai-sdk/provider"
|
||||
import type { LanguageModelV3Prompt } from "@ai-sdk/provider"
|
||||
|
||||
async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promise<T[]> {
|
||||
const reader = stream.getReader()
|
||||
|
|
@ -13,7 +13,7 @@ async function convertReadableStreamToArray<T>(stream: ReadableStream<T>): Promi
|
|||
return result
|
||||
}
|
||||
|
||||
const TEST_PROMPT: LanguageModelV2Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
const TEST_PROMPT: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
|
||||
|
||||
// Fixtures from copilot_test.exs
|
||||
const FIXTURES = {
|
||||
|
|
@ -123,7 +123,7 @@ describe("doStream", () => {
|
|||
{ type: "text-delta", id: "txt-0", delta: " world" },
|
||||
{ type: "text-delta", id: "txt-0", delta: "!" },
|
||||
{ type: "text-end", id: "txt-0" },
|
||||
{ type: "finish", finishReason: "stop" },
|
||||
{ type: "finish", finishReason: { unified: "stop" } },
|
||||
])
|
||||
})
|
||||
|
||||
|
|
@ -201,10 +201,10 @@ describe("doStream", () => {
|
|||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
usage: {
|
||||
inputTokens: 19581,
|
||||
outputTokens: 53,
|
||||
inputTokens: { total: 19581 },
|
||||
outputTokens: { total: 53 },
|
||||
},
|
||||
})
|
||||
})
|
||||
|
|
@ -256,10 +256,10 @@ describe("doStream", () => {
|
|||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
finishReason: { unified: "stop" },
|
||||
usage: {
|
||||
inputTokens: 5778,
|
||||
outputTokens: 59,
|
||||
inputTokens: { total: 5778 },
|
||||
outputTokens: { total: 59 },
|
||||
},
|
||||
providerMetadata: {
|
||||
copilot: {
|
||||
|
|
@ -315,7 +315,7 @@ describe("doStream", () => {
|
|||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "stop",
|
||||
finishReason: { unified: "stop" },
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -388,10 +388,10 @@ describe("doStream", () => {
|
|||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
usage: {
|
||||
inputTokens: 3767,
|
||||
outputTokens: 19,
|
||||
inputTokens: { total: 3767 },
|
||||
outputTokens: { total: 19 },
|
||||
},
|
||||
})
|
||||
})
|
||||
|
|
@ -449,7 +449,7 @@ describe("doStream", () => {
|
|||
const finish = parts.find((p) => p.type === "finish")
|
||||
expect(finish).toMatchObject({
|
||||
type: "finish",
|
||||
finishReason: "tool-calls",
|
||||
finishReason: { unified: "tool-calls" },
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,408 +1,412 @@
|
|||
import { test, expect, describe } from "bun:test"
|
||||
import path from "path"
|
||||
// TODO: UNCOMMENT WHEN GITLAB SUPPORT IS COMPLETED
|
||||
//
|
||||
//
|
||||
//
|
||||
// import { test, expect, describe } from "bun:test"
|
||||
// import path from "path"
|
||||
|
||||
import { ProviderID, ModelID } from "../../src/provider/schema"
|
||||
import { tmpdir } from "../fixture/fixture"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
import { Env } from "../../src/env"
|
||||
import { Global } from "../../src/global"
|
||||
import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
// import { ProviderID, ModelID } from "../../src/provider/schema"
|
||||
// import { tmpdir } from "../fixture/fixture"
|
||||
// import { Instance } from "../../src/project/instance"
|
||||
// import { Provider } from "../../src/provider/provider"
|
||||
// import { Env } from "../../src/env"
|
||||
// import { Global } from "../../src/global"
|
||||
// import { GitLabWorkflowLanguageModel } from "gitlab-ai-provider"
|
||||
|
||||
test("GitLab Duo: loads provider with API key from environment", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-gitlab-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].key).toBe("test-gitlab-token")
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: loads provider with API key from environment", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-gitlab-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].key).toBe("test-gitlab-token")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: config instanceUrl option sets baseURL", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
instanceUrl: "https://gitlab.example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.example.com")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.example.com")
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: config instanceUrl option sets baseURL", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// provider: {
|
||||
// gitlab: {
|
||||
// options: {
|
||||
// instanceUrl: "https://gitlab.example.com",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// Env.set("GITLAB_INSTANCE_URL", "https://gitlab.example.com")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.example.com")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: loads with OAuth token from auth.json", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
// test("GitLab Duo: loads with OAuth token from auth.json", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
|
||||
const authPath = path.join(Global.Path.data, "auth.json")
|
||||
await Bun.write(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
gitlab: {
|
||||
type: "oauth",
|
||||
access: "test-access-token",
|
||||
refresh: "test-refresh-token",
|
||||
expires: Date.now() + 3600000,
|
||||
},
|
||||
}),
|
||||
)
|
||||
// const authPath = path.join(Global.Path.data, "auth.json")
|
||||
// await Bun.write(
|
||||
// authPath,
|
||||
// JSON.stringify({
|
||||
// gitlab: {
|
||||
// type: "oauth",
|
||||
// access: "test-access-token",
|
||||
// refresh: "test-refresh-token",
|
||||
// expires: Date.now() + 3600000,
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: loads with Personal Access Token from auth.json", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
// test("GitLab Duo: loads with Personal Access Token from auth.json", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
|
||||
const authPath2 = path.join(Global.Path.data, "auth.json")
|
||||
await Bun.write(
|
||||
authPath2,
|
||||
JSON.stringify({
|
||||
gitlab: {
|
||||
type: "api",
|
||||
key: "glpat-test-pat-token",
|
||||
},
|
||||
}),
|
||||
)
|
||||
// const authPath2 = path.join(Global.Path.data, "auth.json")
|
||||
// await Bun.write(
|
||||
// authPath2,
|
||||
// JSON.stringify({
|
||||
// gitlab: {
|
||||
// type: "api",
|
||||
// key: "glpat-test-pat-token",
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].key).toBe("glpat-test-pat-token")
|
||||
},
|
||||
})
|
||||
})
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].key).toBe("glpat-test-pat-token")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: supports self-hosted instance configuration", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
instanceUrl: "https://gitlab.company.internal",
|
||||
apiKey: "glpat-internal-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_INSTANCE_URL", "https://gitlab.company.internal")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.company.internal")
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: supports self-hosted instance configuration", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// provider: {
|
||||
// gitlab: {
|
||||
// options: {
|
||||
// instanceUrl: "https://gitlab.company.internal",
|
||||
// apiKey: "glpat-internal-token",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_INSTANCE_URL", "https://gitlab.company.internal")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].options?.instanceUrl).toBe("https://gitlab.company.internal")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: config apiKey takes precedence over environment variable", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
apiKey: "config-token",
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "env-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: config apiKey takes precedence over environment variable", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// provider: {
|
||||
// gitlab: {
|
||||
// options: {
|
||||
// apiKey: "config-token",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "env-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: includes context-1m beta header in aiGatewayHeaders", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.aiGatewayHeaders?.["anthropic-beta"]).toContain(
|
||||
"context-1m-2025-08-07",
|
||||
)
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: includes context-1m beta header in aiGatewayHeaders", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].options?.aiGatewayHeaders?.["anthropic-beta"]).toContain(
|
||||
// "context-1m-2025-08-07",
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: supports feature flags configuration", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
provider: {
|
||||
gitlab: {
|
||||
options: {
|
||||
featureFlags: {
|
||||
duo_agent_platform_agentic_chat: true,
|
||||
duo_agent_platform: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.featureFlags).toBeDefined()
|
||||
expect(providers[ProviderID.gitlab].options?.featureFlags?.duo_agent_platform_agentic_chat).toBe(true)
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: supports feature flags configuration", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// provider: {
|
||||
// gitlab: {
|
||||
// options: {
|
||||
// featureFlags: {
|
||||
// duo_agent_platform_agentic_chat: true,
|
||||
// duo_agent_platform: true,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].options?.featureFlags).toBeDefined()
|
||||
// expect(providers[ProviderID.gitlab].options?.featureFlags?.duo_agent_platform_agentic_chat).toBe(true)
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("GitLab Duo: has multiple agentic chat models available", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
expect(models.length).toBeGreaterThan(0)
|
||||
expect(models).toContain("duo-chat-haiku-4-5")
|
||||
expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
expect(models).toContain("duo-chat-opus-4-5")
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("GitLab Duo: has multiple agentic chat models available", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(
|
||||
// path.join(dir, "opencode.json"),
|
||||
// JSON.stringify({
|
||||
// $schema: "https://opencode.ai/config.json",
|
||||
// }),
|
||||
// )
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
// expect(models.length).toBeGreaterThan(0)
|
||||
// expect(models).toContain("duo-chat-haiku-4-5")
|
||||
// expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
// expect(models).toContain("duo-chat-opus-4-5")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
describe("GitLab Duo: workflow model routing", () => {
|
||||
test("duo-workflow-* model routes through workflowChat", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const gitlab = providers[ProviderID.gitlab]
|
||||
expect(gitlab).toBeDefined()
|
||||
gitlab.models["duo-workflow-sonnet-4-6"] = {
|
||||
id: ModelID.make("duo-workflow-sonnet-4-6"),
|
||||
providerID: ProviderID.make("gitlab"),
|
||||
name: "Agent Platform (Claude Sonnet 4.6)",
|
||||
family: "",
|
||||
api: { id: "duo-workflow-sonnet-4-6", url: "https://gitlab.com", npm: "gitlab-ai-provider" },
|
||||
status: "active",
|
||||
headers: {},
|
||||
options: { workflowRef: "claude_sonnet_4_6" },
|
||||
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
|
||||
limit: { context: 200000, output: 64000 },
|
||||
capabilities: {
|
||||
temperature: false,
|
||||
reasoning: true,
|
||||
attachment: true,
|
||||
toolcall: true,
|
||||
input: { text: true, audio: false, image: true, video: false, pdf: true },
|
||||
output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||
interleaved: false,
|
||||
},
|
||||
release_date: "",
|
||||
variants: {},
|
||||
}
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-workflow-sonnet-4-6"))
|
||||
expect(model).toBeDefined()
|
||||
expect(model.options?.workflowRef).toBe("claude_sonnet_4_6")
|
||||
const language = await Provider.getLanguage(model)
|
||||
expect(language).toBeDefined()
|
||||
expect(language).toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
},
|
||||
})
|
||||
})
|
||||
// describe("GitLab Duo: workflow model routing", () => {
|
||||
// test("duo-workflow-* model routes through workflowChat", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// const gitlab = providers[ProviderID.gitlab]
|
||||
// expect(gitlab).toBeDefined()
|
||||
// gitlab.models["duo-workflow-sonnet-4-6"] = {
|
||||
// id: ModelID.make("duo-workflow-sonnet-4-6"),
|
||||
// providerID: ProviderID.make("gitlab"),
|
||||
// name: "Agent Platform (Claude Sonnet 4.6)",
|
||||
// family: "",
|
||||
// api: { id: "duo-workflow-sonnet-4-6", url: "https://gitlab.com", npm: "gitlab-ai-provider" },
|
||||
// status: "active",
|
||||
// headers: {},
|
||||
// options: { workflowRef: "claude_sonnet_4_6" },
|
||||
// cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
|
||||
// limit: { context: 200000, output: 64000 },
|
||||
// capabilities: {
|
||||
// temperature: false,
|
||||
// reasoning: true,
|
||||
// attachment: true,
|
||||
// toolcall: true,
|
||||
// input: { text: true, audio: false, image: true, video: false, pdf: true },
|
||||
// output: { text: true, audio: false, image: false, video: false, pdf: false },
|
||||
// interleaved: false,
|
||||
// },
|
||||
// release_date: "",
|
||||
// variants: {},
|
||||
// }
|
||||
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-workflow-sonnet-4-6"))
|
||||
// expect(model).toBeDefined()
|
||||
// expect(model.options?.workflowRef).toBe("claude_sonnet_4_6")
|
||||
// const language = await Provider.getLanguage(model)
|
||||
// expect(language).toBeDefined()
|
||||
// expect(language).toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("duo-chat-* model routes through agenticChat (not workflow)", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
expect(model).toBeDefined()
|
||||
const language = await Provider.getLanguage(model)
|
||||
expect(language).toBeDefined()
|
||||
expect(language).not.toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
},
|
||||
})
|
||||
})
|
||||
// test("duo-chat-* model routes through agenticChat (not workflow)", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// expect(providers[ProviderID.gitlab]).toBeDefined()
|
||||
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
// expect(model).toBeDefined()
|
||||
// const language = await Provider.getLanguage(model)
|
||||
// expect(language).toBeDefined()
|
||||
// expect(language).not.toBeInstanceOf(GitLabWorkflowLanguageModel)
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
|
||||
test("model.options merged with provider.options in getLanguage", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const gitlab = providers[ProviderID.gitlab]
|
||||
expect(gitlab.options?.featureFlags).toBeDefined()
|
||||
const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
expect(model).toBeDefined()
|
||||
expect(model.options).toBeDefined()
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
// test("model.options merged with provider.options in getLanguage", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// const gitlab = providers[ProviderID.gitlab]
|
||||
// expect(gitlab.options?.featureFlags).toBeDefined()
|
||||
// const model = await Provider.getModel(ProviderID.gitlab, ModelID.make("duo-chat-sonnet-4-5"))
|
||||
// expect(model).toBeDefined()
|
||||
// expect(model.options).toBeDefined()
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
// })
|
||||
|
||||
describe("GitLab Duo: static models", () => {
|
||||
test("static duo-chat models always present regardless of discovery", async () => {
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
},
|
||||
})
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
init: async () => {
|
||||
Env.set("GITLAB_TOKEN", "test-token")
|
||||
},
|
||||
fn: async () => {
|
||||
const providers = await Provider.list()
|
||||
const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
expect(models).toContain("duo-chat-haiku-4-5")
|
||||
expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
expect(models).toContain("duo-chat-opus-4-5")
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
// describe("GitLab Duo: static models", () => {
|
||||
// test("static duo-chat models always present regardless of discovery", async () => {
|
||||
// await using tmp = await tmpdir({
|
||||
// init: async (dir) => {
|
||||
// await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
|
||||
// },
|
||||
// })
|
||||
// await Instance.provide({
|
||||
// directory: tmp.path,
|
||||
// init: async () => {
|
||||
// Env.set("GITLAB_TOKEN", "test-token")
|
||||
// },
|
||||
// fn: async () => {
|
||||
// const providers = await Provider.list()
|
||||
// const models = Object.keys(providers[ProviderID.gitlab].models)
|
||||
// expect(models).toContain("duo-chat-haiku-4-5")
|
||||
// expect(models).toContain("duo-chat-sonnet-4-5")
|
||||
// expect(models).toContain("duo-chat-opus-4-5")
|
||||
// },
|
||||
// })
|
||||
// })
|
||||
// })
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import path from "path"
|
|||
import { tool, type ModelMessage } from "ai"
|
||||
import z from "zod"
|
||||
import { LLM } from "../../src/session/llm"
|
||||
import { Global } from "../../src/global"
|
||||
import { Instance } from "../../src/project/instance"
|
||||
import { Provider } from "../../src/provider/provider"
|
||||
import { ProviderTransform } from "../../src/provider/transform"
|
||||
|
|
@ -535,6 +534,130 @@ describe("session.llm.stream", () => {
|
|||
})
|
||||
})
|
||||
|
||||
test("accepts user image attachments as data URLs for OpenAI models", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
throw new Error("Server not initialized")
|
||||
}
|
||||
|
||||
const source = await loadFixture("openai", "gpt-5.2")
|
||||
const model = source.model
|
||||
const chunks = [
|
||||
{
|
||||
type: "response.created",
|
||||
response: {
|
||||
id: "resp-data-url",
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
model: model.id,
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
{
|
||||
type: "response.output_text.delta",
|
||||
item_id: "item-data-url",
|
||||
delta: "Looks good",
|
||||
logprobs: null,
|
||||
},
|
||||
{
|
||||
type: "response.completed",
|
||||
response: {
|
||||
incomplete_details: null,
|
||||
usage: {
|
||||
input_tokens: 1,
|
||||
input_tokens_details: null,
|
||||
output_tokens: 1,
|
||||
output_tokens_details: null,
|
||||
},
|
||||
service_tier: null,
|
||||
},
|
||||
},
|
||||
]
|
||||
const request = waitRequest("/responses", createEventResponse(chunks, true))
|
||||
const image = `data:image/png;base64,${Buffer.from(
|
||||
await Bun.file(path.join(import.meta.dir, "../tool/fixtures/large-image.png")).arrayBuffer(),
|
||||
).toString("base64")}`
|
||||
|
||||
await using tmp = await tmpdir({
|
||||
init: async (dir) => {
|
||||
await Bun.write(
|
||||
path.join(dir, "opencode.json"),
|
||||
JSON.stringify({
|
||||
$schema: "https://opencode.ai/config.json",
|
||||
enabled_providers: ["openai"],
|
||||
provider: {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
env: ["OPENAI_API_KEY"],
|
||||
npm: "@ai-sdk/openai",
|
||||
api: "https://api.openai.com/v1",
|
||||
models: {
|
||||
[model.id]: model,
|
||||
},
|
||||
options: {
|
||||
apiKey: "test-openai-key",
|
||||
baseURL: `${server.url.origin}/v1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
await Instance.provide({
|
||||
directory: tmp.path,
|
||||
fn: async () => {
|
||||
const resolved = await Provider.getModel(ProviderID.openai, ModelID.make(model.id))
|
||||
const sessionID = SessionID.make("session-test-data-url")
|
||||
const agent = {
|
||||
name: "test",
|
||||
mode: "primary",
|
||||
options: {},
|
||||
permission: [{ permission: "*", pattern: "*", action: "allow" }],
|
||||
} satisfies Agent.Info
|
||||
|
||||
const user = {
|
||||
id: MessageID.make("user-data-url"),
|
||||
sessionID,
|
||||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make("openai"), modelID: resolved.id },
|
||||
} satisfies MessageV2.User
|
||||
|
||||
const stream = await LLM.stream({
|
||||
user,
|
||||
sessionID,
|
||||
model: resolved,
|
||||
agent,
|
||||
system: ["You are a helpful assistant."],
|
||||
abort: new AbortController().signal,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: "Describe this image" },
|
||||
{
|
||||
type: "file",
|
||||
mediaType: "image/png",
|
||||
filename: "large-image.png",
|
||||
data: image,
|
||||
},
|
||||
],
|
||||
},
|
||||
] as ModelMessage[],
|
||||
tools: {},
|
||||
})
|
||||
|
||||
for await (const _ of stream.fullStream) {
|
||||
}
|
||||
|
||||
const capture = await request
|
||||
expect(capture.url.pathname.endsWith("/responses")).toBe(true)
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
test("sends messages API payload for Anthropic models", async () => {
|
||||
const server = state.server
|
||||
if (!server) {
|
||||
|
|
@ -625,7 +748,7 @@ describe("session.llm.stream", () => {
|
|||
role: "user",
|
||||
time: { created: Date.now() },
|
||||
agent: agent.name,
|
||||
model: { providerID: ProviderID.make(providerID), modelID: resolved.id },
|
||||
model: { providerID: ProviderID.make("minimax"), modelID: ModelID.make("MiniMax-M2.7") },
|
||||
} satisfies MessageV2.User
|
||||
|
||||
const stream = await LLM.stream({
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ function basePart(messageID: string, id: string) {
|
|||
}
|
||||
|
||||
describe("session.message-v2.toModelMessage", () => {
|
||||
test("filters out messages with no parts", () => {
|
||||
test("filters out messages with no parts", async () => {
|
||||
const input: MessageV2.WithParts[] = [
|
||||
{
|
||||
info: userInfo("m-empty"),
|
||||
|
|
@ -126,7 +126,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "hello" }],
|
||||
|
|
@ -134,7 +134,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("filters out messages with only ignored parts", () => {
|
||||
test("filters out messages with only ignored parts", async () => {
|
||||
const messageID = "m-user"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -151,10 +151,10 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
})
|
||||
|
||||
test("includes synthetic text parts", () => {
|
||||
test("includes synthetic text parts", async () => {
|
||||
const messageID = "m-user"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -182,7 +182,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "hello" }],
|
||||
|
|
@ -194,7 +194,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("converts user text/file parts and injects compaction/subtask prompts", () => {
|
||||
test("converts user text/file parts and injects compaction/subtask prompts", async () => {
|
||||
const messageID = "m-user"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -249,7 +249,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
|
|
@ -267,7 +267,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("converts assistant tool completion into tool-call + tool-result messages with attachments", () => {
|
||||
test("converts assistant tool completion into tool-call + tool-result messages with attachments", async () => {
|
||||
const userID = "m-user"
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
|
|
@ -319,7 +319,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "run tool" }],
|
||||
|
|
@ -359,7 +359,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("omits provider metadata when assistant model differs", () => {
|
||||
test("omits provider metadata when assistant model differs", async () => {
|
||||
const userID = "m-user"
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
|
|
@ -402,7 +402,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "run tool" }],
|
||||
|
|
@ -434,7 +434,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("replaces compacted tool output with placeholder", () => {
|
||||
test("replaces compacted tool output with placeholder", async () => {
|
||||
const userID = "m-user"
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
|
|
@ -470,7 +470,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "run tool" }],
|
||||
|
|
@ -501,7 +501,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("converts assistant tool error into error-text tool result", () => {
|
||||
test("converts assistant tool error into error-text tool result", async () => {
|
||||
const userID = "m-user"
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
|
|
@ -537,7 +537,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "user",
|
||||
content: [{ type: "text", text: "run tool" }],
|
||||
|
|
@ -570,7 +570,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("filters assistant messages with non-abort errors", () => {
|
||||
test("filters assistant messages with non-abort errors", async () => {
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -590,10 +590,10 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
})
|
||||
|
||||
test("includes aborted assistant messages only when they have non-step-start/reasoning content", () => {
|
||||
test("includes aborted assistant messages only when they have non-step-start/reasoning content", async () => {
|
||||
const assistantID1 = "m-assistant-1"
|
||||
const assistantID2 = "m-assistant-2"
|
||||
|
||||
|
|
@ -633,7 +633,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [
|
||||
|
|
@ -644,7 +644,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("splits assistant messages on step-start boundaries", () => {
|
||||
test("splits assistant messages on step-start boundaries", async () => {
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -669,7 +669,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([
|
||||
{
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: "first" }],
|
||||
|
|
@ -681,7 +681,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
])
|
||||
})
|
||||
|
||||
test("drops messages that only contain step-start parts", () => {
|
||||
test("drops messages that only contain step-start parts", async () => {
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
const input: MessageV2.WithParts[] = [
|
||||
|
|
@ -696,10 +696,10 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
expect(MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
expect(await MessageV2.toModelMessages(input, model)).toStrictEqual([])
|
||||
})
|
||||
|
||||
test("converts pending/running tool calls to error results to prevent dangling tool_use", () => {
|
||||
test("converts pending/running tool calls to error results to prevent dangling tool_use", async () => {
|
||||
const userID = "m-user"
|
||||
const assistantID = "m-assistant"
|
||||
|
||||
|
|
@ -743,7 +743,7 @@ describe("session.message-v2.toModelMessage", () => {
|
|||
},
|
||||
]
|
||||
|
||||
const result = MessageV2.toModelMessages(input, model)
|
||||
const result = await MessageV2.toModelMessages(input, model)
|
||||
|
||||
expect(result).toStrictEqual([
|
||||
{
|
||||
|
|
|
|||
|
|
@ -363,20 +363,25 @@ describe("structured-output.createStructuredOutputTool", () => {
|
|||
expect(inputSchema.jsonSchema?.properties?.tags?.items?.type).toBe("string")
|
||||
})
|
||||
|
||||
test("toModelOutput returns text value", () => {
|
||||
test("toModelOutput returns text value", async () => {
|
||||
const tool = SessionPrompt.createStructuredOutputTool({
|
||||
schema: { type: "object" },
|
||||
onSuccess: () => {},
|
||||
})
|
||||
|
||||
expect(tool.toModelOutput).toBeDefined()
|
||||
const modelOutput = tool.toModelOutput!({
|
||||
output: "Test output",
|
||||
title: "Test",
|
||||
metadata: { valid: true },
|
||||
})
|
||||
const modelOutput = await Promise.resolve(
|
||||
tool.toModelOutput!({
|
||||
toolCallId: "test-call-id",
|
||||
input: {},
|
||||
output: {
|
||||
output: "Test output",
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
expect(modelOutput.type).toBe("text")
|
||||
if (modelOutput.type !== "text") throw new Error("expected text model output")
|
||||
expect(modelOutput.value).toBe("Test output")
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,119 @@
|
|||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -3155,15 +3155,6 @@
|
||||
});
|
||||
}
|
||||
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
|
||||
- } else {
|
||||
- if (topP != null && temperature != null) {
|
||||
- warnings.push({
|
||||
- type: "unsupported",
|
||||
- feature: "topP",
|
||||
- details: `topP is not supported when temperature is set. topP is ignored.`
|
||||
- });
|
||||
- baseArgs.top_p = void 0;
|
||||
- }
|
||||
}
|
||||
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
|
||||
if (maxOutputTokens != null) {
|
||||
@@ -5180,4 +5171,4 @@
|
||||
createAnthropic,
|
||||
forwardAnthropicContainerIdFromLastStep
|
||||
});
|
||||
-//# sourceMappingURL=index.js.map
|
||||
\ No newline at end of file
|
||||
+//# sourceMappingURL=index.js.map
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -3192,15 +3192,6 @@
|
||||
});
|
||||
}
|
||||
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
|
||||
- } else {
|
||||
- if (topP != null && temperature != null) {
|
||||
- warnings.push({
|
||||
- type: "unsupported",
|
||||
- feature: "topP",
|
||||
- details: `topP is not supported when temperature is set. topP is ignored.`
|
||||
- });
|
||||
- baseArgs.top_p = void 0;
|
||||
- }
|
||||
}
|
||||
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
|
||||
if (maxOutputTokens != null) {
|
||||
@@ -5256,4 +5247,4 @@
|
||||
createAnthropic,
|
||||
forwardAnthropicContainerIdFromLastStep
|
||||
};
|
||||
-//# sourceMappingURL=index.mjs.map
|
||||
\ No newline at end of file
|
||||
+//# sourceMappingURL=index.mjs.map
|
||||
--- a/dist/internal/index.js
|
||||
+++ b/dist/internal/index.js
|
||||
@@ -3147,15 +3147,6 @@
|
||||
});
|
||||
}
|
||||
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
|
||||
- } else {
|
||||
- if (topP != null && temperature != null) {
|
||||
- warnings.push({
|
||||
- type: "unsupported",
|
||||
- feature: "topP",
|
||||
- details: `topP is not supported when temperature is set. topP is ignored.`
|
||||
- });
|
||||
- baseArgs.top_p = void 0;
|
||||
- }
|
||||
}
|
||||
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
|
||||
if (maxOutputTokens != null) {
|
||||
@@ -5080,4 +5071,4 @@
|
||||
anthropicTools,
|
||||
prepareTools
|
||||
});
|
||||
-//# sourceMappingURL=index.js.map
|
||||
\ No newline at end of file
|
||||
+//# sourceMappingURL=index.js.map
|
||||
--- a/dist/internal/index.mjs
|
||||
+++ b/dist/internal/index.mjs
|
||||
@@ -3176,15 +3176,6 @@
|
||||
});
|
||||
}
|
||||
baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
|
||||
- } else {
|
||||
- if (topP != null && temperature != null) {
|
||||
- warnings.push({
|
||||
- type: "unsupported",
|
||||
- feature: "topP",
|
||||
- details: `topP is not supported when temperature is set. topP is ignored.`
|
||||
- });
|
||||
- baseArgs.top_p = void 0;
|
||||
- }
|
||||
}
|
||||
if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
|
||||
if (maxOutputTokens != null) {
|
||||
@@ -5148,4 +5139,4 @@
|
||||
anthropicTools,
|
||||
prepareTools
|
||||
};
|
||||
-//# sourceMappingURL=index.mjs.map
|
||||
\ No newline at end of file
|
||||
+//# sourceMappingURL=index.mjs.map
|
||||
--- a/src/anthropic-messages-language-model.ts
|
||||
+++ b/src/anthropic-messages-language-model.ts
|
||||
@@ -534,16 +534,6 @@
|
||||
|
||||
// adjust max tokens to account for thinking:
|
||||
baseArgs.max_tokens = maxTokens + (thinkingBudget ?? 0);
|
||||
- } else {
|
||||
- // Only check temperature/topP mutual exclusivity when thinking is not enabled
|
||||
- if (topP != null && temperature != null) {
|
||||
- warnings.push({
|
||||
- type: 'unsupported',
|
||||
- feature: 'topP',
|
||||
- details: `topP is not supported when temperature is set. topP is ignored.`,
|
||||
- });
|
||||
- baseArgs.top_p = undefined;
|
||||
- }
|
||||
}
|
||||
|
||||
// limit to max output tokens for known models to enable model switching without breaking it:
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 9aa8e83684777e860d905ff7a6895995a7347a4f..820797581ac2a33e731e139da3ebc98b4d93fdcf 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -395,10 +395,13 @@ function validateDownloadUrl(url) {
|
||||
message: `Invalid URL: ${url}`
|
||||
});
|
||||
}
|
||||
+ if (parsed.protocol === "data:") {
|
||||
+ return;
|
||||
+ }
|
||||
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
|
||||
throw new DownloadError({
|
||||
url,
|
||||
- message: `URL scheme must be http or https, got ${parsed.protocol}`
|
||||
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`
|
||||
});
|
||||
}
|
||||
const hostname = parsed.hostname;
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 095fdc188b1d7f227b42591c78ecb71fe2e2cf8b..ca5227d3b6e358aea8ecd85782a0a2b48130a2c9 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -299,10 +299,13 @@ function validateDownloadUrl(url) {
|
||||
message: `Invalid URL: ${url}`
|
||||
});
|
||||
}
|
||||
+ if (parsed.protocol === "data:") {
|
||||
+ return;
|
||||
+ }
|
||||
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
|
||||
throw new DownloadError({
|
||||
url,
|
||||
- message: `URL scheme must be http or https, got ${parsed.protocol}`
|
||||
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`
|
||||
});
|
||||
}
|
||||
const hostname = parsed.hostname;
|
||||
diff --git a/src/validate-download-url.ts b/src/validate-download-url.ts
|
||||
index 7c026ad6b400aef551ce3a424c343e1cedc60997..6a2f11398e58f80a8e11995ac1ce5f4d7c110561 100644
|
||||
--- a/src/validate-download-url.ts
|
||||
+++ b/src/validate-download-url.ts
|
||||
@@ -18,11 +18,16 @@ export function validateDownloadUrl(url: string): void {
|
||||
});
|
||||
}
|
||||
|
||||
- // Only allow http and https protocols
|
||||
+ // data: URLs are inline content and do not make network requests.
|
||||
+ if (parsed.protocol === 'data:') {
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ // Only allow http and https network protocols
|
||||
if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
|
||||
throw new DownloadError({
|
||||
url,
|
||||
- message: `URL scheme must be http or https, got ${parsed.protocol}`,
|
||||
+ message: `URL scheme must be http, https, or data, got ${parsed.protocol}`,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -1,108 +0,0 @@
|
|||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -959,7 +959,7 @@
|
||||
model: z4.string().nullish(),
|
||||
object: z4.literal("response"),
|
||||
output: z4.array(outputItemSchema),
|
||||
- usage: xaiResponsesUsageSchema,
|
||||
+ usage: xaiResponsesUsageSchema.nullish(),
|
||||
status: z4.string()
|
||||
});
|
||||
var xaiResponsesChunkSchema = z4.union([
|
||||
\ No newline at end of file
|
||||
@@ -1143,6 +1143,18 @@
|
||||
z4.object({
|
||||
type: z4.literal("response.completed"),
|
||||
response: xaiResponsesResponseSchema
|
||||
+ }),
|
||||
+ z4.object({
|
||||
+ type: z4.literal("response.function_call_arguments.delta"),
|
||||
+ item_id: z4.string(),
|
||||
+ output_index: z4.number(),
|
||||
+ delta: z4.string()
|
||||
+ }),
|
||||
+ z4.object({
|
||||
+ type: z4.literal("response.function_call_arguments.done"),
|
||||
+ item_id: z4.string(),
|
||||
+ output_index: z4.number(),
|
||||
+ arguments: z4.string()
|
||||
})
|
||||
]);
|
||||
|
||||
\ No newline at end of file
|
||||
@@ -1940,6 +1952,9 @@
|
||||
if (response2.status) {
|
||||
finishReason = mapXaiResponsesFinishReason(response2.status);
|
||||
}
|
||||
+ if (seenToolCalls.size > 0 && finishReason !== "tool-calls") {
|
||||
+ finishReason = "tool-calls";
|
||||
+ }
|
||||
return;
|
||||
}
|
||||
if (event.type === "response.output_item.added" || event.type === "response.output_item.done") {
|
||||
\ No newline at end of file
|
||||
@@ -2024,7 +2039,7 @@
|
||||
}
|
||||
}
|
||||
} else if (part.type === "function_call") {
|
||||
- if (!seenToolCalls.has(part.call_id)) {
|
||||
+ if (event.type === "response.output_item.done" && !seenToolCalls.has(part.call_id)) {
|
||||
seenToolCalls.add(part.call_id);
|
||||
controller.enqueue({
|
||||
type: "tool-input-start",
|
||||
\ No newline at end of file
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -964,7 +964,7 @@
|
||||
model: import_v44.z.string().nullish(),
|
||||
object: import_v44.z.literal("response"),
|
||||
output: import_v44.z.array(outputItemSchema),
|
||||
- usage: xaiResponsesUsageSchema,
|
||||
+ usage: xaiResponsesUsageSchema.nullish(),
|
||||
status: import_v44.z.string()
|
||||
});
|
||||
var xaiResponsesChunkSchema = import_v44.z.union([
|
||||
\ No newline at end of file
|
||||
@@ -1148,6 +1148,18 @@
|
||||
import_v44.z.object({
|
||||
type: import_v44.z.literal("response.completed"),
|
||||
response: xaiResponsesResponseSchema
|
||||
+ }),
|
||||
+ import_v44.z.object({
|
||||
+ type: import_v44.z.literal("response.function_call_arguments.delta"),
|
||||
+ item_id: import_v44.z.string(),
|
||||
+ output_index: import_v44.z.number(),
|
||||
+ delta: import_v44.z.string()
|
||||
+ }),
|
||||
+ import_v44.z.object({
|
||||
+ type: import_v44.z.literal("response.function_call_arguments.done"),
|
||||
+ item_id: import_v44.z.string(),
|
||||
+ output_index: import_v44.z.number(),
|
||||
+ arguments: import_v44.z.string()
|
||||
})
|
||||
]);
|
||||
|
||||
\ No newline at end of file
|
||||
@@ -1935,6 +1947,9 @@
|
||||
if (response2.status) {
|
||||
finishReason = mapXaiResponsesFinishReason(response2.status);
|
||||
}
|
||||
+ if (seenToolCalls.size > 0 && finishReason !== "tool-calls") {
|
||||
+ finishReason = "tool-calls";
|
||||
+ }
|
||||
return;
|
||||
}
|
||||
if (event.type === "response.output_item.added" || event.type === "response.output_item.done") {
|
||||
\ No newline at end of file
|
||||
@@ -2019,7 +2034,7 @@
|
||||
}
|
||||
}
|
||||
} else if (part.type === "function_call") {
|
||||
- if (!seenToolCalls.has(part.call_id)) {
|
||||
+ if (event.type === "response.output_item.done" && !seenToolCalls.has(part.call_id)) {
|
||||
seenToolCalls.add(part.call_id);
|
||||
controller.enqueue({
|
||||
type: "tool-input-start",
|
||||
\ No newline at end of file
|
||||
|
|
@ -1,128 +0,0 @@
|
|||
diff --git a/dist/index.js b/dist/index.js
|
||||
index f33510a50d11a2cb92a90ea70cc0ac84c89f29b9..e887a60352c0c08ab794b1e6821854dfeefd20cc 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -2110,7 +2110,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted && !textStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
reasoningStarted = false;
|
||||
}
|
||||
@@ -2307,7 +2312,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
}
|
||||
if (textStarted) {
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 8a688331b88b4af738ee4ca8062b5f24124d3d81..6310cb8b7c8d0a728d86e1eed09906c6b4c91ae2 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -2075,7 +2075,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted && !textStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
reasoningStarted = false;
|
||||
}
|
||||
@@ -2272,7 +2277,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
}
|
||||
if (textStarted) {
|
||||
diff --git a/dist/internal/index.js b/dist/internal/index.js
|
||||
index d40fa66125941155ac13a4619503caba24d89f8a..8dd86d1b473f2fa31c1acd9881d72945b294a197 100644
|
||||
--- a/dist/internal/index.js
|
||||
+++ b/dist/internal/index.js
|
||||
@@ -2064,7 +2064,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted && !textStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
reasoningStarted = false;
|
||||
}
|
||||
@@ -2261,7 +2266,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
}
|
||||
if (textStarted) {
|
||||
diff --git a/dist/internal/index.mjs b/dist/internal/index.mjs
|
||||
index b0ed9d113549c5c55ea3b1e08abb3db6f92ae5a7..5695930a8e038facc071d58a4179a369a29be9c7 100644
|
||||
--- a/dist/internal/index.mjs
|
||||
+++ b/dist/internal/index.mjs
|
||||
@@ -2030,7 +2030,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted && !textStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
reasoningStarted = false;
|
||||
}
|
||||
@@ -2227,7 +2232,12 @@ var OpenRouterChatLanguageModel = class {
|
||||
if (reasoningStarted) {
|
||||
controller.enqueue({
|
||||
type: "reasoning-end",
|
||||
- id: reasoningId || generateId()
|
||||
+ id: reasoningId || generateId(),
|
||||
+ providerMetadata: accumulatedReasoningDetails.length > 0 ? {
|
||||
+ openrouter: {
|
||||
+ reasoning_details: accumulatedReasoningDetails
|
||||
+ }
|
||||
+ } : undefined
|
||||
});
|
||||
}
|
||||
if (textStarted) {
|
||||
Loading…
Reference in New Issue