diff --git a/bun.lock b/bun.lock
index 8beb9ae6f6..325eeb15f5 100644
--- a/bun.lock
+++ b/bun.lock
@@ -21,12 +21,12 @@
"prettier": "3.6.2",
"semver": "^7.6.0",
"sst": "3.18.10",
- "turbo": "2.5.6",
+ "turbo": "2.8.13",
},
},
"packages/app": {
"name": "@opencode-ai/app",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@kobalte/core": "catalog:",
"@opencode-ai/sdk": "workspace:*",
@@ -47,7 +47,7 @@
"@thisbeyond/solid-dnd": "0.7.5",
"diff": "catalog:",
"fuzzysort": "catalog:",
- "ghostty-web": "0.4.0",
+ "ghostty-web": "github:anomalyco/ghostty-web#main",
"luxon": "catalog:",
"marked": "catalog:",
"marked-shiki": "catalog:",
@@ -76,7 +76,7 @@
},
"packages/console/app": {
"name": "@opencode-ai/console-app",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@cloudflare/vite-plugin": "1.15.2",
"@ibm/plex": "6.4.1",
@@ -110,7 +110,7 @@
},
"packages/console/core": {
"name": "@opencode-ai/console-core",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@aws-sdk/client-sts": "3.782.0",
"@jsx-email/render": "1.1.1",
@@ -137,7 +137,7 @@
},
"packages/console/function": {
"name": "@opencode-ai/console-function",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@ai-sdk/anthropic": "2.0.0",
"@ai-sdk/openai": "2.0.2",
@@ -161,7 +161,7 @@
},
"packages/console/mail": {
"name": "@opencode-ai/console-mail",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@jsx-email/all": "2.2.3",
"@jsx-email/cli": "1.4.3",
@@ -185,7 +185,7 @@
},
"packages/desktop": {
"name": "@opencode-ai/desktop",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@opencode-ai/app": "workspace:*",
"@opencode-ai/ui": "workspace:*",
@@ -218,7 +218,7 @@
},
"packages/desktop-electron": {
"name": "@opencode-ai/desktop-electron",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@opencode-ai/app": "workspace:*",
"@opencode-ai/ui": "workspace:*",
@@ -248,7 +248,7 @@
},
"packages/enterprise": {
"name": "@opencode-ai/enterprise",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@opencode-ai/ui": "workspace:*",
"@opencode-ai/util": "workspace:*",
@@ -277,7 +277,7 @@
},
"packages/function": {
"name": "@opencode-ai/function",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@octokit/auth-app": "8.0.1",
"@octokit/rest": "catalog:",
@@ -293,7 +293,7 @@
},
"packages/opencode": {
"name": "opencode",
- "version": "1.2.17",
+ "version": "1.2.18",
"bin": {
"opencode": "./bin/opencode",
},
@@ -407,7 +407,7 @@
},
"packages/plugin": {
"name": "@opencode-ai/plugin",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@opencode-ai/sdk": "workspace:*",
"zod": "catalog:",
@@ -427,7 +427,7 @@
},
"packages/sdk/js": {
"name": "@opencode-ai/sdk",
- "version": "1.2.17",
+ "version": "1.2.18",
"devDependencies": {
"@hey-api/openapi-ts": "0.90.10",
"@tsconfig/node22": "catalog:",
@@ -438,7 +438,7 @@
},
"packages/slack": {
"name": "@opencode-ai/slack",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@opencode-ai/sdk": "workspace:*",
"@slack/bolt": "^3.17.1",
@@ -473,7 +473,7 @@
},
"packages/ui": {
"name": "@opencode-ai/ui",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@kobalte/core": "catalog:",
"@opencode-ai/sdk": "workspace:*",
@@ -484,6 +484,7 @@
"@solid-primitives/media": "2.3.3",
"@solid-primitives/resize-observer": "2.1.3",
"@solidjs/meta": "catalog:",
+ "@solidjs/router": "catalog:",
"dompurify": "3.3.1",
"fuzzysort": "catalog:",
"katex": "0.16.27",
@@ -518,7 +519,7 @@
},
"packages/util": {
"name": "@opencode-ai/util",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"zod": "catalog:",
},
@@ -529,7 +530,7 @@
},
"packages/web": {
"name": "@opencode-ai/web",
- "version": "1.2.17",
+ "version": "1.2.18",
"dependencies": {
"@astrojs/cloudflare": "12.6.3",
"@astrojs/markdown-remark": "6.3.1",
@@ -2957,7 +2958,7 @@
"get-tsconfig": ["get-tsconfig@4.13.6", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw=="],
- "ghostty-web": ["ghostty-web@0.4.0", "", {}, "sha512-0puDBik2qapbD/QQBW9o5ZHfXnZBqZWx/ctBiVtKZ6ZLds4NYb+wZuw1cRLXZk9zYovIQ908z3rvFhexAvc5Hg=="],
+ "ghostty-web": ["ghostty-web@github:anomalyco/ghostty-web#4af877d", {}, "anomalyco-ghostty-web-4af877d", "sha512-fbEK8mtr7ar4ySsF+JUGjhaZrane7dKphanN+SxHt5XXI6yLMAh/Hpf6sNCOyyVa2UlGCd7YpXG/T2v2RUAX+A=="],
"gifwrap": ["gifwrap@0.10.1", "", { "dependencies": { "image-q": "^4.0.0", "omggif": "^1.0.10" } }, "sha512-2760b1vpJHNmLzZ/ubTtNnEx5WApN/PYWJvXvgS+tL1egTTthayFYIQQNi136FLEDcN/IyEY2EcGpIITD6eYUw=="],
@@ -4385,19 +4386,19 @@
"tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="],
- "turbo": ["turbo@2.5.6", "", { "optionalDependencies": { "turbo-darwin-64": "2.5.6", "turbo-darwin-arm64": "2.5.6", "turbo-linux-64": "2.5.6", "turbo-linux-arm64": "2.5.6", "turbo-windows-64": "2.5.6", "turbo-windows-arm64": "2.5.6" }, "bin": { "turbo": "bin/turbo" } }, "sha512-gxToHmi9oTBNB05UjUsrWf0OyN5ZXtD0apOarC1KIx232Vp3WimRNy3810QzeNSgyD5rsaIDXlxlbnOzlouo+w=="],
+ "turbo": ["turbo@2.8.13", "", { "optionalDependencies": { "turbo-darwin-64": "2.8.13", "turbo-darwin-arm64": "2.8.13", "turbo-linux-64": "2.8.13", "turbo-linux-arm64": "2.8.13", "turbo-windows-64": "2.8.13", "turbo-windows-arm64": "2.8.13" }, "bin": { "turbo": "bin/turbo" } }, "sha512-nyM99hwFB9/DHaFyKEqatdayGjsMNYsQ/XBNO6MITc7roncZetKb97MpHxWf3uiU+LB9c9HUlU3Jp2Ixei2k1A=="],
- "turbo-darwin-64": ["turbo-darwin-64@2.5.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-3C1xEdo4aFwMJAPvtlPqz1Sw/+cddWIOmsalHFMrsqqydcptwBfu26WW2cDm3u93bUzMbBJ8k3zNKFqxJ9ei2A=="],
+ "turbo-darwin-64": ["turbo-darwin-64@2.8.13", "", { "os": "darwin", "cpu": "x64" }, "sha512-PmOvodQNiOj77+Zwoqku70vwVjKzL34RTNxxoARjp5RU5FOj/CGiC6vcDQhNtFPUOWSAaogHF5qIka9TBhX4XA=="],
- "turbo-darwin-arm64": ["turbo-darwin-arm64@2.5.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-LyiG+rD7JhMfYwLqB6k3LZQtYn8CQQUePbpA8mF/hMLPAekXdJo1g0bUPw8RZLwQXUIU/3BU7tXENvhSGz5DPA=="],
+ "turbo-darwin-arm64": ["turbo-darwin-arm64@2.8.13", "", { "os": "darwin", "cpu": "arm64" }, "sha512-kI+anKcLIM4L8h+NsM7mtAUpElkCOxv5LgiQVQR8BASyDFfc8Efj5kCk3cqxuxOvIqx0sLfCX7atrHQ2kwuNJQ=="],
- "turbo-linux-64": ["turbo-linux-64@2.5.6", "", { "os": "linux", "cpu": "x64" }, "sha512-GOcUTT0xiT/pSnHL4YD6Yr3HreUhU8pUcGqcI2ksIF9b2/r/kRHwGFcsHgpG3+vtZF/kwsP0MV8FTlTObxsYIA=="],
+ "turbo-linux-64": ["turbo-linux-64@2.8.13", "", { "os": "linux", "cpu": "x64" }, "sha512-j29KnQhHyzdzgCykBFeBqUPS4Wj7lWMnZ8CHqytlYDap4Jy70l4RNG46pOL9+lGu6DepK2s1rE86zQfo0IOdPw=="],
- "turbo-linux-arm64": ["turbo-linux-arm64@2.5.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-10Tm15bruJEA3m0V7iZcnQBpObGBcOgUcO+sY7/2vk1bweW34LMhkWi8svjV9iDF68+KJDThnYDlYE/bc7/zzQ=="],
+ "turbo-linux-arm64": ["turbo-linux-arm64@2.8.13", "", { "os": "linux", "cpu": "arm64" }, "sha512-OEl1YocXGZDRDh28doOUn49QwNe82kXljO1HXApjU0LapkDiGpfl3jkAlPKxEkGDSYWc8MH5Ll8S16Rf5tEBYg=="],
- "turbo-windows-64": ["turbo-windows-64@2.5.6", "", { "os": "win32", "cpu": "x64" }, "sha512-FyRsVpgaj76It0ludwZsNN40ytHN+17E4PFJyeliBEbxrGTc5BexlXVpufB7XlAaoaZVxbS6KT8RofLfDRyEPg=="],
+ "turbo-windows-64": ["turbo-windows-64@2.8.13", "", { "os": "win32", "cpu": "x64" }, "sha512-717bVk1+Pn2Jody7OmWludhEirEe0okoj1NpRbSm5kVZz/yNN/jfjbxWC6ilimXMz7xoMT3IDfQFJsFR3PMANA=="],
- "turbo-windows-arm64": ["turbo-windows-arm64@2.5.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-j/tWu8cMeQ7HPpKri6jvKtyXg9K1gRyhdK4tKrrchH8GNHscPX/F71zax58yYtLRWTiK04zNzPcUJuoS0+v/+Q=="],
+ "turbo-windows-arm64": ["turbo-windows-arm64@2.8.13", "", { "os": "win32", "cpu": "arm64" }, "sha512-R819HShLIT0Wj6zWVnIsYvSNtRNj1q9VIyaUz0P24SMcLCbQZIm1sV09F4SDbg+KCCumqD2lcaR2UViQ8SnUJA=="],
"turndown": ["turndown@7.2.0", "", { "dependencies": { "@mixmark-io/domino": "^2.2.0" } }, "sha512-eCZGBN4nNNqM9Owkv9HAtWRYfLA4h909E/WGAWWBpmB275ehNhZyk87/Tpvjbp0jjNl9XwCsbe6bm6CqFsgD+A=="],
diff --git a/nix/hashes.json b/nix/hashes.json
index 0ef20b235d..47e3e240bb 100644
--- a/nix/hashes.json
+++ b/nix/hashes.json
@@ -1,8 +1,8 @@
{
"nodeModules": {
- "x86_64-linux": "sha256-jtBYpfiE9g0otqZEtOksW1Nbg+O8CJP9OEOEhsa7sa8=",
- "aarch64-linux": "sha256-m+YNZIB7I7EMPyfqkKsvDvmBX9R1szmEKxXpxTNFLH8=",
- "aarch64-darwin": "sha256-1gVmtkC1/I8sdHZcaeSFJheySVlpCyKCjf9zbVsVqAQ=",
- "x86_64-darwin": "sha256-Tvk5YL6Z0xRul4jopbGme/997iHBylXC0Cq3RnjQb+I="
+ "x86_64-linux": "sha256-v83hWzYVg/g4zJiBpGsQ71wTdndPk3BQVZ2mjMApUIQ=",
+ "aarch64-linux": "sha256-inpMwkQqwBFP2wL8w/pTOP7q3fg1aOqvE0wgzVd3/B8=",
+ "aarch64-darwin": "sha256-r42LGrQWqDyIy62mBSU5Nf3M22dJ3NNo7mjN/1h8d8Y=",
+ "x86_64-darwin": "sha256-J6XrrdK5qBK3sQBQOO/B3ZluOnsAf5f65l4q/K1nDTI="
}
}
diff --git a/package.json b/package.json
index ba84d10522..36cf31d346 100644
--- a/package.json
+++ b/package.json
@@ -77,7 +77,7 @@
"prettier": "3.6.2",
"semver": "^7.6.0",
"sst": "3.18.10",
- "turbo": "2.5.6"
+ "turbo": "2.8.13"
},
"dependencies": {
"@aws-sdk/client-s3": "3.933.0",
diff --git a/packages/app/e2e/files/file-viewer.spec.ts b/packages/app/e2e/files/file-viewer.spec.ts
index bee67c7d12..49fe1baa13 100644
--- a/packages/app/e2e/files/file-viewer.spec.ts
+++ b/packages/app/e2e/files/file-viewer.spec.ts
@@ -101,3 +101,56 @@ test("cmd+f opens text viewer search while prompt is focused", async ({ page, go
await expect(findInput).toBeVisible()
await expect(findInput).toBeFocused()
})
+
+test("cmd+f opens text viewer search while prompt is not focused", async ({ page, gotoSession }) => {
+ await gotoSession()
+
+ await page.locator(promptSelector).click()
+ await page.keyboard.type("/open")
+
+ const command = page.locator('[data-slash-id="file.open"]').first()
+ await expect(command).toBeVisible()
+ await page.keyboard.press("Enter")
+
+ const dialog = page
+ .getByRole("dialog")
+ .filter({ has: page.getByPlaceholder(/search files/i) })
+ .first()
+ await expect(dialog).toBeVisible()
+
+ const input = dialog.getByRole("textbox").first()
+ await input.fill("package.json")
+
+ const items = dialog.locator('[data-slot="list-item"][data-key^="file:"]')
+ let index = -1
+ await expect
+ .poll(
+ async () => {
+ const keys = await items.evaluateAll((nodes) => nodes.map((node) => node.getAttribute("data-key") ?? ""))
+ index = keys.findIndex((key) => /packages[\\/]+app[\\/]+package\.json$/i.test(key.replace(/^file:/, "")))
+ return index >= 0
+ },
+ { timeout: 30_000 },
+ )
+ .toBe(true)
+
+ const item = items.nth(index)
+ await expect(item).toBeVisible()
+ await item.click()
+
+ await expect(dialog).toHaveCount(0)
+
+ const tab = page.getByRole("tab", { name: "package.json" })
+ await expect(tab).toBeVisible()
+ await tab.click()
+
+ const viewer = page.locator('[data-component="file"][data-mode="text"]').first()
+ await expect(viewer).toBeVisible()
+
+ await viewer.click()
+ await page.keyboard.press(`${modKey}+f`)
+
+ const findInput = page.getByPlaceholder("Find")
+ await expect(findInput).toBeVisible()
+ await expect(findInput).toBeFocused()
+})
diff --git a/packages/app/package.json b/packages/app/package.json
index ed497a761f..37ccd9b53a 100644
--- a/packages/app/package.json
+++ b/packages/app/package.json
@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/app",
- "version": "1.2.17",
+ "version": "1.2.18",
"description": "",
"type": "module",
"exports": {
@@ -57,7 +57,7 @@
"@thisbeyond/solid-dnd": "0.7.5",
"diff": "catalog:",
"fuzzysort": "catalog:",
- "ghostty-web": "0.4.0",
+ "ghostty-web": "github:anomalyco/ghostty-web#main",
"luxon": "catalog:",
"marked": "catalog:",
"marked-shiki": "catalog:",
diff --git a/packages/app/src/components/prompt-input/submit.ts b/packages/app/src/components/prompt-input/submit.ts
index a8c2609f43..db1b5a5ca1 100644
--- a/packages/app/src/components/prompt-input/submit.ts
+++ b/packages/app/src/components/prompt-input/submit.ts
@@ -16,6 +16,7 @@ import { Identifier } from "@/utils/id"
import { Worktree as WorktreeState } from "@/utils/worktree"
import { buildRequestParts } from "./build-request-parts"
import { setCursorPosition } from "./editor-dom"
+import { formatServerError } from "@/utils/server-errors"
type PendingPrompt = {
abort: AbortController
@@ -286,7 +287,7 @@ export function createPromptSubmit(input: PromptSubmitInput) {
.catch((err) => {
showToast({
title: language.t("prompt.toast.commandSendFailed.title"),
- description: errorMessage(err),
+ description: formatServerError(err, language.t, language.t("common.requestFailed")),
})
restoreInput()
})
diff --git a/packages/app/src/components/session-context-usage.tsx b/packages/app/src/components/session-context-usage.tsx
index 47030aa177..08ae4d3194 100644
--- a/packages/app/src/components/session-context-usage.tsx
+++ b/packages/app/src/components/session-context-usage.tsx
@@ -39,7 +39,7 @@ export function SessionContextUsage(props: SessionContextUsageProps) {
const usd = createMemo(
() =>
- new Intl.NumberFormat(language.locale(), {
+ new Intl.NumberFormat(language.intl(), {
style: "currency",
currency: "USD",
}),
@@ -77,7 +77,7 @@ export function SessionContextUsage(props: SessionContextUsageProps) {
{(ctx) => (
<>
- {ctx().total.toLocaleString(language.locale())}
+ {ctx().total.toLocaleString(language.intl())}
{language.t("context.usage.tokens")}
diff --git a/packages/app/src/components/session/session-context-tab.tsx b/packages/app/src/components/session/session-context-tab.tsx
index 582aa33911..39eb4b4c0e 100644
--- a/packages/app/src/components/session/session-context-tab.tsx
+++ b/packages/app/src/components/session/session-context-tab.tsx
@@ -128,7 +128,7 @@ export function SessionContextTab() {
const usd = createMemo(
() =>
- new Intl.NumberFormat(language.locale(), {
+ new Intl.NumberFormat(language.intl(), {
style: "currency",
currency: "USD",
}),
@@ -136,7 +136,7 @@ export function SessionContextTab() {
const metrics = createMemo(() => getSessionContextMetrics(messages(), sync.data.provider.all))
const ctx = createMemo(() => metrics().context)
- const formatter = createMemo(() => createSessionContextFormatter(language.locale()))
+ const formatter = createMemo(() => createSessionContextFormatter(language.intl()))
const cost = createMemo(() => {
return usd().format(metrics().totalCost)
@@ -200,7 +200,7 @@ export function SessionContextTab() {
const stats = [
{ label: "context.stats.session", value: () => info()?.title ?? params.id ?? "—" },
- { label: "context.stats.messages", value: () => counts().all.toLocaleString(language.locale()) },
+ { label: "context.stats.messages", value: () => counts().all.toLocaleString(language.intl()) },
{ label: "context.stats.provider", value: providerLabel },
{ label: "context.stats.model", value: modelLabel },
{ label: "context.stats.limit", value: () => formatter().number(ctx()?.limit) },
@@ -213,8 +213,8 @@ export function SessionContextTab() {
label: "context.stats.cacheTokens",
value: () => `${formatter().number(ctx()?.cacheRead)} / ${formatter().number(ctx()?.cacheWrite)}`,
},
- { label: "context.stats.userMessages", value: () => counts().user.toLocaleString(language.locale()) },
- { label: "context.stats.assistantMessages", value: () => counts().assistant.toLocaleString(language.locale()) },
+ { label: "context.stats.userMessages", value: () => counts().user.toLocaleString(language.intl()) },
+ { label: "context.stats.assistantMessages", value: () => counts().assistant.toLocaleString(language.intl()) },
{ label: "context.stats.totalCost", value: cost },
{ label: "context.stats.sessionCreated", value: () => formatter().time(info()?.time.created) },
{ label: "context.stats.lastActivity", value: () => formatter().time(ctx()?.message.time.created) },
@@ -307,7 +307,7 @@ export function SessionContextTab() {
{breakdownLabel(segment.key)}
-
{segment.percent.toLocaleString(language.locale())}%
+
{segment.percent.toLocaleString(language.intl())}%
)}
diff --git a/packages/app/src/components/session/session-new-view.tsx b/packages/app/src/components/session/session-new-view.tsx
index b7a544ba9a..f2ecd51501 100644
--- a/packages/app/src/components/session/session-new-view.tsx
+++ b/packages/app/src/components/session/session-new-view.tsx
@@ -51,26 +51,26 @@ export function NewSessionView(props: NewSessionViewProps) {
return (
{language.t("command.session.new")}
-
-
-
+
+
+
{getDirectory(projectRoot())}
{getFilename(projectRoot())}
-
-
-
{label(current())}
+
{(project) => (
-
-
-
+
+
+
{language.t("session.new.lastModified")}
{DateTime.fromMillis(project().time.updated ?? project().time.created)
- .setLocale(language.locale())
+ .setLocale(language.intl())
.toRelative()}
diff --git a/packages/app/src/components/settings-providers.tsx b/packages/app/src/components/settings-providers.tsx
index 21db62950d..a9839758b7 100644
--- a/packages/app/src/components/settings-providers.tsx
+++ b/packages/app/src/components/settings-providers.tsx
@@ -17,6 +17,7 @@ type ProviderItem = ReturnType
["connected"]>[num
const PROVIDER_NOTES = [
{ match: (id: string) => id === "opencode", key: "dialog.provider.opencode.note" },
+ { match: (id: string) => id === "opencode-go", key: "dialog.provider.opencodeGo.tagline" },
{ match: (id: string) => id === "anthropic", key: "dialog.provider.anthropic.note" },
{ match: (id: string) => id.startsWith("github-copilot"), key: "dialog.provider.copilot.note" },
{ match: (id: string) => id === "openai", key: "dialog.provider.openai.note" },
@@ -181,21 +182,11 @@ export const SettingsProviders: Component = () => {
{item.name}
-
-
- {language.t("dialog.provider.opencode.tagline")}
-
-
{language.t("dialog.provider.tag.recommended")}
- <>
-
- {language.t("dialog.provider.opencodeGo.tagline")}
-
- {language.t("dialog.provider.tag.recommended")}
- >
+ {language.t("dialog.provider.tag.recommended")}
diff --git a/packages/app/src/context/global-sync.tsx b/packages/app/src/context/global-sync.tsx
index 5749291157..b3a351382f 100644
--- a/packages/app/src/context/global-sync.tsx
+++ b/packages/app/src/context/global-sync.tsx
@@ -228,10 +228,7 @@ function createGlobalSync() {
showToast({
variant: "error",
title: language.t("toast.session.listFailed.title", { project }),
- description: formatServerError(err, {
- unknown: language.t("error.chain.unknown"),
- invalidConfiguration: language.t("error.server.invalidConfiguration"),
- }),
+ description: formatServerError(err, language.t),
})
})
@@ -261,8 +258,7 @@ function createGlobalSync() {
setStore: child[1],
vcsCache: cache,
loadSessions,
- unknownError: language.t("error.chain.unknown"),
- invalidConfigurationError: language.t("error.server.invalidConfiguration"),
+ translate: language.t,
})
})()
@@ -331,8 +327,7 @@ function createGlobalSync() {
url: globalSDK.url,
}),
requestFailedTitle: language.t("common.requestFailed"),
- unknownError: language.t("error.chain.unknown"),
- invalidConfigurationError: language.t("error.server.invalidConfiguration"),
+ translate: language.t,
formatMoreCount: (count) => language.t("common.moreCountSuffix", { count }),
setGlobalStore: setBootStore,
})
diff --git a/packages/app/src/context/global-sync/bootstrap.ts b/packages/app/src/context/global-sync/bootstrap.ts
index bc84eb1694..8b1a3c48c5 100644
--- a/packages/app/src/context/global-sync/bootstrap.ts
+++ b/packages/app/src/context/global-sync/bootstrap.ts
@@ -36,8 +36,7 @@ export async function bootstrapGlobal(input: {
connectErrorTitle: string
connectErrorDescription: string
requestFailedTitle: string
- unknownError: string
- invalidConfigurationError: string
+ translate: (key: string, vars?: Record) => string
formatMoreCount: (count: number) => string
setGlobalStore: SetStoreFunction
}) {
@@ -91,10 +90,7 @@ export async function bootstrapGlobal(input: {
const results = await Promise.allSettled(tasks)
const errors = results.filter((r): r is PromiseRejectedResult => r.status === "rejected").map((r) => r.reason)
if (errors.length) {
- const message = formatServerError(errors[0], {
- unknown: input.unknownError,
- invalidConfiguration: input.invalidConfigurationError,
- })
+ const message = formatServerError(errors[0], input.translate)
const more = errors.length > 1 ? input.formatMoreCount(errors.length - 1) : ""
showToast({
variant: "error",
@@ -122,8 +118,7 @@ export async function bootstrapDirectory(input: {
setStore: SetStoreFunction
vcsCache: VcsCache
loadSessions: (directory: string) => Promise | void
- unknownError: string
- invalidConfigurationError: string
+ translate: (key: string, vars?: Record) => string
}) {
if (input.store.status !== "complete") input.setStore("status", "loading")
@@ -145,10 +140,7 @@ export async function bootstrapDirectory(input: {
showToast({
variant: "error",
title: `Failed to reload ${project}`,
- description: formatServerError(err, {
- unknown: input.unknownError,
- invalidConfiguration: input.invalidConfigurationError,
- }),
+ description: formatServerError(err, input.translate),
})
input.setStore("status", "partial")
return
diff --git a/packages/app/src/context/language.tsx b/packages/app/src/context/language.tsx
index 50cc302f43..be1a1769bf 100644
--- a/packages/app/src/context/language.tsx
+++ b/packages/app/src/context/language.tsx
@@ -84,6 +84,26 @@ const LOCALES: readonly Locale[] = [
"tr",
]
+const INTL: Record = {
+ en: "en",
+ zh: "zh-Hans",
+ zht: "zh-Hant",
+ ko: "ko",
+ de: "de",
+ es: "es",
+ fr: "fr",
+ da: "da",
+ ja: "ja",
+ pl: "pl",
+ ru: "ru",
+ ar: "ar",
+ no: "nb-NO",
+ br: "pt-BR",
+ th: "th",
+ bs: "bs",
+ tr: "tr",
+}
+
const LABEL_KEY: Record = {
en: "language.en",
zh: "language.zh",
@@ -197,6 +217,7 @@ export const { use: useLanguage, provider: LanguageProvider } = createSimpleCont
)
const locale = createMemo(() => normalizeLocale(store.locale))
+ const intl = createMemo(() => INTL[locale()])
const dict = createMemo(() => DICT[locale()])
@@ -213,6 +234,7 @@ export const { use: useLanguage, provider: LanguageProvider } = createSimpleCont
return {
ready,
locale,
+ intl,
locales: LOCALES,
label,
t,
diff --git a/packages/app/src/context/local.tsx b/packages/app/src/context/local.tsx
index ac5da60e86..75d1334a5a 100644
--- a/packages/app/src/context/local.tsx
+++ b/packages/app/src/context/local.tsx
@@ -35,6 +35,8 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
const agent = (() => {
const list = createMemo(() => sync.data.agent.filter((x) => x.mode !== "subagent" && !x.hidden))
+ const models = useModels()
+
const [store, setStore] = createStore<{
current?: string
}>({
@@ -53,11 +55,17 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
setStore("current", undefined)
return
}
- if (name && available.some((x) => x.name === name)) {
- setStore("current", name)
- return
- }
- setStore("current", available[0].name)
+ const match = name ? available.find((x) => x.name === name) : undefined
+ const value = match ?? available[0]
+ if (!value) return
+ setStore("current", value.name)
+ if (!value.model) return
+ setModel({
+ providerID: value.model.providerID,
+ modelID: value.model.modelID,
+ })
+ if (value.variant)
+ models.variant.set({ providerID: value.model.providerID, modelID: value.model.modelID }, value.variant)
},
move(direction: 1 | -1) {
const available = list()
@@ -71,11 +79,13 @@ export const { use: useLocal, provider: LocalProvider } = createSimpleContext({
const value = available[next]
if (!value) return
setStore("current", value.name)
- if (value.model)
- setModel({
- providerID: value.model.providerID,
- modelID: value.model.modelID,
- })
+ if (!value.model) return
+ setModel({
+ providerID: value.model.providerID,
+ modelID: value.model.modelID,
+ })
+ if (value.variant)
+ models.variant.set({ providerID: value.model.providerID, modelID: value.model.modelID }, value.variant)
},
}
})()
diff --git a/packages/app/src/pages/layout.tsx b/packages/app/src/pages/layout.tsx
index 2fd2f2fe3d..bd0315efbf 100644
--- a/packages/app/src/pages/layout.tsx
+++ b/packages/app/src/pages/layout.tsx
@@ -22,7 +22,7 @@ import { ResizeHandle } from "@opencode-ai/ui/resize-handle"
import { Button } from "@opencode-ai/ui/button"
import { Icon } from "@opencode-ai/ui/icon"
import { IconButton } from "@opencode-ai/ui/icon-button"
-import { Tooltip, TooltipKeybind } from "@opencode-ai/ui/tooltip"
+import { Tooltip } from "@opencode-ai/ui/tooltip"
import { DropdownMenu } from "@opencode-ai/ui/dropdown-menu"
import { Dialog } from "@opencode-ai/ui/dialog"
import { getFilename } from "@opencode-ai/util/path"
@@ -44,6 +44,7 @@ import { playSound, soundSrc } from "@/utils/sound"
import { createAim } from "@/utils/aim"
import { setNavigate } from "@/utils/notification-click"
import { Worktree as WorktreeState } from "@/utils/worktree"
+import { setSessionHandoff } from "@/pages/session/handoff"
import { useDialog } from "@opencode-ai/ui/context/dialog"
import { useTheme, type ColorScheme } from "@opencode-ai/ui/theme"
@@ -67,7 +68,12 @@ import {
sortedRootSessions,
workspaceKey,
} from "./layout/helpers"
-import { collectOpenProjectDeepLinks, deepLinkEvent, drainPendingDeepLinks } from "./layout/deep-links"
+import {
+ collectNewSessionDeepLinks,
+ collectOpenProjectDeepLinks,
+ deepLinkEvent,
+ drainPendingDeepLinks,
+} from "./layout/deep-links"
import { createInlineEditorController } from "./layout/inline-editor"
import {
LocalWorkspace,
@@ -1177,9 +1183,20 @@ export default function Layout(props: ParentProps) {
const handleDeepLinks = (urls: string[]) => {
if (!server.isLocal()) return
+
for (const directory of collectOpenProjectDeepLinks(urls)) {
openProject(directory)
}
+
+ for (const link of collectNewSessionDeepLinks(urls)) {
+ openProject(link.directory, false)
+ const slug = base64Encode(link.directory)
+ if (link.prompt) {
+ setSessionHandoff(slug, { prompt: link.prompt })
+ }
+ const href = link.prompt ? `/${slug}/session?prompt=${encodeURIComponent(link.prompt)}` : `/${slug}/session`
+ navigateWithSidebarReset(href)
+ }
}
onMount(() => {
@@ -1829,7 +1846,7 @@ export default function Layout(props: ParentProps) {
}}
style={{ width: panelProps.mobile ? undefined : `${Math.max(layout.sidebar.width() - 64, 0)}px` }}
>
-
+
{(p) => (
<>
@@ -1838,7 +1855,7 @@ export default function Layout(props: ParentProps) {
renameProject(p(), next)}
+ onSave={(next) => renameProject(p, next)}
class="text-14-medium text-text-strong truncate"
displayClass="text-14-medium text-text-strong truncate"
stopPropagation
@@ -1847,7 +1864,7 @@ export default function Layout(props: ParentProps) {
- {p().worktree.replace(homedir(), "~")}
+ {p.worktree.replace(homedir(), "~")}
@@ -1866,7 +1883,7 @@ export default function Layout(props: ParentProps) {
icon="dot-grid"
variant="ghost"
data-action="project-menu"
- data-project={base64Encode(p().worktree)}
+ data-project={base64Encode(p.worktree)}
class="shrink-0 size-6 rounded-md data-[expanded]:bg-surface-base-active"
classList={{
"opacity-0 group-hover/project:opacity-100 data-[expanded]:opacity-100": !panelProps.mobile,
@@ -1875,24 +1892,24 @@ export default function Layout(props: ParentProps) {
/>
- showEditProjectDialog(p())}>
+ showEditProjectDialog(p)}>
{language.t("common.edit")}
toggleProjectWorkspaces(p())}
+ data-project={base64Encode(p.worktree)}
+ disabled={p.vcs !== "git" && !layout.sidebar.workspaces(p.worktree)()}
+ onSelect={() => toggleProjectWorkspaces(p)}
>
- {layout.sidebar.workspaces(p().worktree)()
+ {layout.sidebar.workspaces(p.worktree)()
? language.t("sidebar.workspaces.disable")
: language.t("sidebar.workspaces.enable")}
@@ -1903,8 +1920,8 @@ export default function Layout(props: ParentProps) {
closeProject(p().worktree)}
+ data-project={base64Encode(p.worktree)}
+ onSelect={() => closeProject(p.worktree)}
>
{language.t("common.close")}
@@ -1920,25 +1937,19 @@ export default function Layout(props: ParentProps) {
fallback={
<>
- navigateWithSidebarReset(`/${base64Encode(p.worktree)}/session`)}
>
-
-
+ {language.t("command.session.new")}
+
@@ -1948,15 +1959,9 @@ export default function Layout(props: ParentProps) {
>
<>
-
-
-
+
diff --git a/packages/app/src/pages/layout/deep-links.ts b/packages/app/src/pages/layout/deep-links.ts
index 7bdb002a36..5dca421f74 100644
--- a/packages/app/src/pages/layout/deep-links.ts
+++ b/packages/app/src/pages/layout/deep-links.ts
@@ -1,15 +1,17 @@
export const deepLinkEvent = "opencode:deep-link"
-export const parseDeepLink = (input: string) => {
+const parseUrl = (input: string) => {
if (!input.startsWith("opencode://")) return
if (typeof URL.canParse === "function" && !URL.canParse(input)) return
- const url = (() => {
- try {
- return new URL(input)
- } catch {
- return undefined
- }
- })()
+ try {
+ return new URL(input)
+ } catch {
+ return
+ }
+}
+
+export const parseDeepLink = (input: string) => {
+ const url = parseUrl(input)
if (!url) return
if (url.hostname !== "open-project") return
const directory = url.searchParams.get("directory")
@@ -17,9 +19,23 @@ export const parseDeepLink = (input: string) => {
return directory
}
+export const parseNewSessionDeepLink = (input: string) => {
+ const url = parseUrl(input)
+ if (!url) return
+ if (url.hostname !== "new-session") return
+ const directory = url.searchParams.get("directory")
+ if (!directory) return
+ const prompt = url.searchParams.get("prompt") || undefined
+ if (!prompt) return { directory }
+ return { directory, prompt }
+}
+
export const collectOpenProjectDeepLinks = (urls: string[]) =>
urls.map(parseDeepLink).filter((directory): directory is string => !!directory)
+export const collectNewSessionDeepLinks = (urls: string[]) =>
+ urls.map(parseNewSessionDeepLink).filter((link): link is { directory: string; prompt?: string } => !!link)
+
type OpenCodeWindow = Window & {
__OPENCODE__?: {
deepLinks?: string[]
diff --git a/packages/app/src/pages/layout/helpers.test.ts b/packages/app/src/pages/layout/helpers.test.ts
index 29517b6248..d1569dbd9a 100644
--- a/packages/app/src/pages/layout/helpers.test.ts
+++ b/packages/app/src/pages/layout/helpers.test.ts
@@ -1,15 +1,14 @@
import { describe, expect, test } from "bun:test"
-import { type Session } from "@opencode-ai/sdk/v2/client"
-import { collectOpenProjectDeepLinks, drainPendingDeepLinks, parseDeepLink } from "./deep-links"
import {
- displayName,
- errorMessage,
- getDraggableId,
- hasProjectPermissions,
- latestRootSession,
- syncWorkspaceOrder,
- workspaceKey,
-} from "./helpers"
+ collectNewSessionDeepLinks,
+ collectOpenProjectDeepLinks,
+ drainPendingDeepLinks,
+ parseDeepLink,
+ parseNewSessionDeepLink,
+} from "./deep-links"
+import { displayName, errorMessage, getDraggableId, syncWorkspaceOrder, workspaceKey } from "./helpers"
+import { type Session } from "@opencode-ai/sdk/v2/client"
+import { hasProjectPermissions, latestRootSession } from "./helpers"
const session = (input: Partial
& Pick) =>
({
@@ -62,6 +61,28 @@ describe("layout deep links", () => {
expect(result).toEqual(["/a", "/c"])
})
+ test("parses new-session deep links with optional prompt", () => {
+ expect(parseNewSessionDeepLink("opencode://new-session?directory=/tmp/demo")).toEqual({ directory: "/tmp/demo" })
+ expect(parseNewSessionDeepLink("opencode://new-session?directory=/tmp/demo&prompt=hello%20world")).toEqual({
+ directory: "/tmp/demo",
+ prompt: "hello world",
+ })
+ })
+
+ test("ignores new-session deep links without directory", () => {
+ expect(parseNewSessionDeepLink("opencode://new-session")).toBeUndefined()
+ expect(parseNewSessionDeepLink("opencode://new-session?directory=")).toBeUndefined()
+ })
+
+ test("collects only valid new-session deep links", () => {
+ const result = collectNewSessionDeepLinks([
+ "opencode://new-session?directory=/a",
+ "opencode://open-project?directory=/b",
+ "opencode://new-session?directory=/c&prompt=ship%20it",
+ ])
+ expect(result).toEqual([{ directory: "/a" }, { directory: "/c", prompt: "ship it" }])
+ })
+
test("drains global deep links once", () => {
const target = {
__OPENCODE__: {
diff --git a/packages/app/src/pages/layout/sidebar-items.tsx b/packages/app/src/pages/layout/sidebar-items.tsx
index 0aaabc03ba..e991d8225d 100644
--- a/packages/app/src/pages/layout/sidebar-items.tsx
+++ b/packages/app/src/pages/layout/sidebar-items.tsx
@@ -1,10 +1,4 @@
-import { A, useNavigate, useParams } from "@solidjs/router"
-import { useGlobalSync } from "@/context/global-sync"
-import { useLanguage } from "@/context/language"
-import { useLayout, type LocalProject, getAvatarColors } from "@/context/layout"
-import { useNotification } from "@/context/notification"
-import { usePermission } from "@/context/permission"
-import { base64Encode } from "@opencode-ai/util/encode"
+import type { Message, Session, TextPart, UserMessage } from "@opencode-ai/sdk/v2/client"
import { Avatar } from "@opencode-ai/ui/avatar"
import { HoverCard } from "@opencode-ai/ui/hover-card"
import { Icon } from "@opencode-ai/ui/icon"
@@ -12,12 +6,18 @@ import { IconButton } from "@opencode-ai/ui/icon-button"
import { MessageNav } from "@opencode-ai/ui/message-nav"
import { Spinner } from "@opencode-ai/ui/spinner"
import { Tooltip } from "@opencode-ai/ui/tooltip"
+import { base64Encode } from "@opencode-ai/util/encode"
import { getFilename } from "@opencode-ai/util/path"
-import { type Message, type Session, type TextPart, type UserMessage } from "@opencode-ai/sdk/v2/client"
-import { For, Match, Show, Switch, createMemo, onCleanup, type Accessor, type JSX } from "solid-js"
+import { A, useNavigate, useParams } from "@solidjs/router"
+import { type Accessor, createMemo, For, type JSX, Match, onCleanup, Show, Switch } from "solid-js"
+import { useGlobalSync } from "@/context/global-sync"
+import { useLanguage } from "@/context/language"
+import { getAvatarColors, type LocalProject, useLayout } from "@/context/layout"
+import { useNotification } from "@/context/notification"
+import { usePermission } from "@/context/permission"
import { agentColor } from "@/utils/agent"
-import { hasProjectPermissions } from "./helpers"
import { sessionPermissionRequest } from "../session/composer/session-request-tree"
+import { hasProjectPermissions } from "./helpers"
const OPENCODE_PROJECT_ID = "4b0ea68d7af9a6031a7ffda7ad66e0cb83315750"
@@ -231,7 +231,9 @@ export const SessionItem = (props: SessionItemProps): JSX.Element => {
const hoverEnabled = createMemo(() => (props.popover ?? true) && hoverAllowed())
const isActive = createMemo(() => props.session.id === params.id)
- const hoverPrefetch = { current: undefined as ReturnType | undefined }
+ const hoverPrefetch = {
+ current: undefined as ReturnType | undefined,
+ }
const cancelHoverPrefetch = () => {
if (hoverPrefetch.current === undefined) return
clearTimeout(hoverPrefetch.current)
@@ -300,17 +302,15 @@ export const SessionItem = (props: SessionItemProps): JSX.Element => {
setHoverSession={props.setHoverSession}
messageLabel={messageLabel}
onMessageSelect={(message) => {
- if (!isActive()) {
+ if (!isActive())
layout.pendingMessage.set(`${base64Encode(props.session.directory)}/${props.session.id}`, message.id)
- navigate(`${props.slug}/session/${props.session.id}`)
- return
- }
- window.history.replaceState(null, "", `#message-${message.id}`)
- window.dispatchEvent(new HashChangeEvent("hashchange"))
+
+ navigate(`${props.slug}/session/${props.session.id}#message-${message.id}`)
}}
trigger={item}
/>
+
()
+
+ createEffect(() => {
+ if (!untrack(() => prompt.ready())) return
+ prompt.ready()
+ untrack(() => {
+ if (params.id || !prompt.ready()) return
+ const text = searchParams.prompt
+ if (!text) return
+ prompt.set([{ type: "text", content: text, start: 0, end: text.length }], text.length)
+ setSearchParams({ ...searchParams, prompt: undefined })
+ })
+ })
const [ui, setUi] = createStore({
pendingMessage: undefined as string | undefined,
@@ -405,7 +416,10 @@ export default function Page() {
() => {
const msg = lastUserMessage()
if (!msg) return
- if (msg.agent) local.agent.set(msg.agent)
+ if (msg.agent) {
+ local.agent.set(msg.agent)
+ if (local.agent.current()?.model) return
+ }
if (msg.model) local.model.set(msg.model)
},
),
@@ -679,7 +693,11 @@ export default function Page() {
on(
sessionKey,
() => {
- setTree({ reviewScroll: undefined, pendingDiff: undefined, activeDiff: undefined })
+ setTree({
+ reviewScroll: undefined,
+ pendingDiff: undefined,
+ activeDiff: undefined,
+ })
},
{ defer: true },
),
@@ -702,6 +720,7 @@ export default function Page() {
showAllFiles,
tabForPath: file.tab,
openTab: tabs().open,
+ setActive: tabs().setActive,
loadFile: file.load,
})
diff --git a/packages/app/src/pages/session/composer/session-question-dock.tsx b/packages/app/src/pages/session/composer/session-question-dock.tsx
index fd2ced3dc8..b22a92eb0a 100644
--- a/packages/app/src/pages/session/composer/session-question-dock.tsx
+++ b/packages/app/src/pages/session/composer/session-question-dock.tsx
@@ -8,6 +8,8 @@ import type { QuestionAnswer, QuestionRequest } from "@opencode-ai/sdk/v2"
import { useLanguage } from "@/context/language"
import { useSDK } from "@/context/sdk"
+const cache = new Map()
+
export const SessionQuestionDock: Component<{ request: QuestionRequest; onSubmit: () => void }> = (props) => {
const sdk = useSDK()
const language = useLanguage()
@@ -15,16 +17,18 @@ export const SessionQuestionDock: Component<{ request: QuestionRequest; onSubmit
const questions = createMemo(() => props.request.questions)
const total = createMemo(() => questions().length)
+ const cached = cache.get(props.request.id)
const [store, setStore] = createStore({
- tab: 0,
- answers: [] as QuestionAnswer[],
- custom: [] as string[],
- customOn: [] as boolean[],
+ tab: cached?.tab ?? 0,
+ answers: cached?.answers ?? ([] as QuestionAnswer[]),
+ custom: cached?.custom ?? ([] as string[]),
+ customOn: cached?.customOn ?? ([] as boolean[]),
editing: false,
sending: false,
})
let root: HTMLDivElement | undefined
+ let replied = false
const question = createMemo(() => questions()[store.tab])
const options = createMemo(() => question()?.options ?? [])
@@ -107,6 +111,16 @@ export const SessionQuestionDock: Component<{ request: QuestionRequest; onSubmit
})
})
+ onCleanup(() => {
+ if (replied) return
+ cache.set(props.request.id, {
+ tab: store.tab,
+ answers: store.answers.map((a) => (a ? [...a] : [])),
+ custom: store.custom.map((s) => s ?? ""),
+ customOn: store.customOn.map((b) => b ?? false),
+ })
+ })
+
const fail = (err: unknown) => {
const message = err instanceof Error ? err.message : String(err)
showToast({ title: language.t("common.requestFailed"), description: message })
@@ -119,6 +133,8 @@ export const SessionQuestionDock: Component<{ request: QuestionRequest; onSubmit
setStore("sending", true)
try {
await sdk.client.question.reply({ requestID: props.request.id, answers })
+ replied = true
+ cache.delete(props.request.id)
} catch (err) {
fail(err)
} finally {
@@ -133,6 +149,8 @@ export const SessionQuestionDock: Component<{ request: QuestionRequest; onSubmit
setStore("sending", true)
try {
await sdk.client.question.reject({ requestID: props.request.id })
+ replied = true
+ cache.delete(props.request.id)
} catch (err) {
fail(err)
} finally {
diff --git a/packages/app/src/pages/session/file-tabs.tsx b/packages/app/src/pages/session/file-tabs.tsx
index 93264353bd..77643789d0 100644
--- a/packages/app/src/pages/session/file-tabs.tsx
+++ b/packages/app/src/pages/session/file-tabs.tsx
@@ -234,7 +234,6 @@ export function FileTabContent(props: { tab: string }) {
if (typeof window === "undefined") return
const onKeyDown = (event: KeyboardEvent) => {
- if (event.defaultPrevented) return
if (tabs().active() !== props.tab) return
if (!(event.metaKey || event.ctrlKey) || event.altKey || event.shiftKey) return
if (event.key.toLowerCase() !== "f") return
diff --git a/packages/app/src/pages/session/helpers.test.ts b/packages/app/src/pages/session/helpers.test.ts
index aaa5b932fe..9c77c34af4 100644
--- a/packages/app/src/pages/session/helpers.test.ts
+++ b/packages/app/src/pages/session/helpers.test.ts
@@ -11,12 +11,13 @@ describe("createOpenReviewFile", () => {
return `file://${path}`
},
openTab: (tab) => calls.push(`open:${tab}`),
+ setActive: (tab) => calls.push(`active:${tab}`),
loadFile: (path) => calls.push(`load:${path}`),
})
openReviewFile("src/a.ts")
- expect(calls).toEqual(["show", "load:src/a.ts", "tab:src/a.ts", "open:file://src/a.ts"])
+ expect(calls).toEqual(["show", "load:src/a.ts", "tab:src/a.ts", "open:file://src/a.ts", "active:file://src/a.ts"])
})
})
diff --git a/packages/app/src/pages/session/helpers.ts b/packages/app/src/pages/session/helpers.ts
index 20f1d99a8b..60b26cdf47 100644
--- a/packages/app/src/pages/session/helpers.ts
+++ b/packages/app/src/pages/session/helpers.ts
@@ -24,15 +24,20 @@ export const createOpenReviewFile = (input: {
showAllFiles: () => void
tabForPath: (path: string) => string
openTab: (tab: string) => void
+ setActive: (tab: string) => void
loadFile: (path: string) => any | Promise
}) => {
return (path: string) => {
batch(() => {
input.showAllFiles()
const maybePromise = input.loadFile(path)
- const openTab = () => input.openTab(input.tabForPath(path))
- if (maybePromise instanceof Promise) maybePromise.then(openTab)
- else openTab()
+ const open = () => {
+ const tab = input.tabForPath(path)
+ input.openTab(tab)
+ input.setActive(tab)
+ }
+ if (maybePromise instanceof Promise) maybePromise.then(open)
+ else open()
})
}
}
diff --git a/packages/app/src/pages/session/message-id-from-hash.ts b/packages/app/src/pages/session/message-id-from-hash.ts
new file mode 100644
index 0000000000..2857f4b01d
--- /dev/null
+++ b/packages/app/src/pages/session/message-id-from-hash.ts
@@ -0,0 +1,6 @@
+export const messageIdFromHash = (hash: string) => {
+ const value = hash.startsWith("#") ? hash.slice(1) : hash
+ const match = value.match(/^message-(.+)$/)
+ if (!match) return
+ return match[1]
+}
diff --git a/packages/app/src/pages/session/message-timeline.tsx b/packages/app/src/pages/session/message-timeline.tsx
index 433c36e2e6..f320a2ebbf 100644
--- a/packages/app/src/pages/session/message-timeline.tsx
+++ b/packages/app/src/pages/session/message-timeline.tsx
@@ -1,4 +1,4 @@
-import { For, createEffect, createMemo, on, onCleanup, Show, startTransition, Index, type JSX } from "solid-js"
+import { For, createEffect, createMemo, on, onCleanup, Show, Index, type JSX } from "solid-js"
import { createStore, produce } from "solid-js/store"
import { useNavigate, useParams } from "@solidjs/router"
import { Button } from "@opencode-ai/ui/button"
@@ -160,7 +160,7 @@ function createTimelineStaging(input: TimelineStageInput) {
}
const currentTotal = input.messages().length
count = Math.min(currentTotal, count + input.config.batch)
- startTransition(() => setState("count", count))
+ setState("count", count)
if (count >= currentTotal) {
setState({ completedSession: sessionKey, activeSession: "" })
frame = undefined
@@ -611,7 +611,7 @@ export function MessageTimeline(props: {
-
+
{(id) => (
@@ -646,12 +646,12 @@ export function MessageTimeline(props: {
>
{language.t("common.rename")}
-
void archiveSession(id())}>
+ void archiveSession(id)}>
{language.t("common.archive")}
dialog.show(() => )}
+ onSelect={() => dialog.show(() => )}
>
{language.t("common.delete")}
diff --git a/packages/app/src/pages/session/session-side-panel.tsx b/packages/app/src/pages/session/session-side-panel.tsx
index ad802d15d1..55c1607a09 100644
--- a/packages/app/src/pages/session/session-side-panel.tsx
+++ b/packages/app/src/pages/session/session-side-panel.tsx
@@ -331,7 +331,9 @@ export function SessionSidePanel(props: {
const path = createMemo(() => file.pathFromTab(tab))
return (
- {(p) => }
+
+ {(p) => }
+
)
}}
diff --git a/packages/app/src/pages/session/terminal-panel.tsx b/packages/app/src/pages/session/terminal-panel.tsx
index cc4c17ee21..c8bfc14053 100644
--- a/packages/app/src/pages/session/terminal-panel.tsx
+++ b/packages/app/src/pages/session/terminal-panel.tsx
@@ -191,8 +191,8 @@ export function TerminalPanel() {
{(id) => (
-
- {(pty) => }
+
+ {(pty) => }
)}
@@ -217,10 +217,10 @@ export function TerminalPanel() {
{(id) => (
-
+
{(pty) => (
- terminal.clone(id)} />
+ terminal.clone(id)} />
)}
@@ -229,14 +229,14 @@ export function TerminalPanel() {
-
+
{(draggedId) => (
-
+
{(t) => (
{terminalTabLabel({
- title: t().title,
- titleNumber: t().titleNumber,
+ title: t.title,
+ titleNumber: t.titleNumber,
t: language.t as (key: string, vars?: Record) => string,
})}
diff --git a/packages/app/src/pages/session/use-session-hash-scroll.test.ts b/packages/app/src/pages/session/use-session-hash-scroll.test.ts
index 844f5451e3..7f3389baaa 100644
--- a/packages/app/src/pages/session/use-session-hash-scroll.test.ts
+++ b/packages/app/src/pages/session/use-session-hash-scroll.test.ts
@@ -1,5 +1,5 @@
import { describe, expect, test } from "bun:test"
-import { messageIdFromHash } from "./use-session-hash-scroll"
+import { messageIdFromHash } from "./message-id-from-hash"
describe("messageIdFromHash", () => {
test("parses hash with leading #", () => {
diff --git a/packages/app/src/pages/session/use-session-hash-scroll.ts b/packages/app/src/pages/session/use-session-hash-scroll.ts
index 473409fd99..20e88a3ea3 100644
--- a/packages/app/src/pages/session/use-session-hash-scroll.ts
+++ b/packages/app/src/pages/session/use-session-hash-scroll.ts
@@ -1,12 +1,9 @@
-import { createEffect, createMemo, onCleanup, onMount } from "solid-js"
-import { UserMessage } from "@opencode-ai/sdk/v2"
+import type { UserMessage } from "@opencode-ai/sdk/v2"
+import { useLocation, useNavigate } from "@solidjs/router"
+import { createEffect, createMemo, onMount } from "solid-js"
+import { messageIdFromHash } from "./message-id-from-hash"
-export const messageIdFromHash = (hash: string) => {
- const value = hash.startsWith("#") ? hash.slice(1) : hash
- const match = value.match(/^message-(.+)$/)
- if (!match) return
- return match[1]
-}
+export { messageIdFromHash } from "./message-id-from-hash"
export const useSessionHashScroll = (input: {
sessionKey: () => string
@@ -30,13 +27,18 @@ export const useSessionHashScroll = (input: {
const messageIndex = createMemo(() => new Map(visibleUserMessages().map((m, i) => [m.id, i])))
let pendingKey = ""
+ const location = useLocation()
+ const navigate = useNavigate()
+
const clearMessageHash = () => {
- if (!window.location.hash) return
- window.history.replaceState(null, "", window.location.href.replace(/#.*$/, ""))
+ if (!location.hash) return
+ navigate(location.pathname + location.search, { replace: true })
}
const updateHash = (id: string) => {
- window.history.replaceState(null, "", `#${input.anchor(id)}`)
+ navigate(location.pathname + location.search + `#${input.anchor(id)}`, {
+ replace: true,
+ })
}
const scrollToElement = (el: HTMLElement, behavior: ScrollBehavior) => {
@@ -53,6 +55,7 @@ export const useSessionHashScroll = (input: {
}
const scrollToMessage = (message: UserMessage, behavior: ScrollBehavior = "smooth") => {
+ console.log({ message, behavior })
if (input.currentMessageId() !== message.id) input.setActiveMessage(message)
const index = messageIndex().get(message.id) ?? -1
@@ -100,7 +103,7 @@ export const useSessionHashScroll = (input: {
}
const applyHash = (behavior: ScrollBehavior) => {
- const hash = window.location.hash.slice(1)
+ const hash = location.hash.slice(1)
if (!hash) {
input.autoScroll.forceScrollToBottom()
const el = input.scroller()
@@ -132,6 +135,7 @@ export const useSessionHashScroll = (input: {
}
createEffect(() => {
+ location.hash
if (!input.sessionID() || !input.messagesReady()) return
requestAnimationFrame(() => applyHash("auto"))
})
@@ -155,7 +159,7 @@ export const useSessionHashScroll = (input: {
}
}
- if (!targetId) targetId = messageIdFromHash(window.location.hash)
+ if (!targetId) targetId = messageIdFromHash(location.hash)
if (!targetId) return
if (input.currentMessageId() === targetId) return
@@ -171,14 +175,6 @@ export const useSessionHashScroll = (input: {
if (typeof window !== "undefined" && "scrollRestoration" in window.history) {
window.history.scrollRestoration = "manual"
}
-
- const handler = () => {
- if (!input.sessionID() || !input.messagesReady()) return
- requestAnimationFrame(() => applyHash("auto"))
- }
-
- window.addEventListener("hashchange", handler)
- onCleanup(() => window.removeEventListener("hashchange", handler))
})
return {
diff --git a/packages/app/src/utils/notification-click.ts b/packages/app/src/utils/notification-click.ts
index 94086c5959..316b278206 100644
--- a/packages/app/src/utils/notification-click.ts
+++ b/packages/app/src/utils/notification-click.ts
@@ -7,6 +7,7 @@ export const setNavigate = (fn: (href: string) => void) => {
export const handleNotificationClick = (href?: string) => {
window.focus()
if (!href) return
- if (nav) nav(href)
- else window.location.assign(href)
+ if (nav) return nav(href)
+ console.warn("notification-click: navigate function not set, falling back to window.location.assign")
+ window.location.assign(href)
}
diff --git a/packages/app/src/utils/server-errors.test.ts b/packages/app/src/utils/server-errors.test.ts
index 1969d1afc2..1f53bb8cf6 100644
--- a/packages/app/src/utils/server-errors.test.ts
+++ b/packages/app/src/utils/server-errors.test.ts
@@ -1,8 +1,37 @@
import { describe, expect, test } from "bun:test"
-import type { ConfigInvalidError } from "./server-errors"
-import { formatServerError, parseReabaleConfigInvalidError } from "./server-errors"
+import type { ConfigInvalidError, ProviderModelNotFoundError } from "./server-errors"
+import { formatServerError, parseReadableConfigInvalidError } from "./server-errors"
-describe("parseReabaleConfigInvalidError", () => {
+function fill(text: string, vars?: Record) {
+ if (!vars) return text
+ return text.replace(/{{\s*(\w+)\s*}}/g, (_, key: string) => {
+ const value = vars[key]
+ if (value === undefined) return ""
+ return String(value)
+ })
+}
+
+function useLanguageMock() {
+ const dict: Record = {
+ "error.chain.unknown": "Erro desconhecido",
+ "error.chain.configInvalid": "Arquivo de config em {{path}} invalido",
+ "error.chain.configInvalidWithMessage": "Arquivo de config em {{path}} invalido: {{message}}",
+ "error.chain.modelNotFound": "Modelo nao encontrado: {{provider}}/{{model}}",
+ "error.chain.didYouMean": "Voce quis dizer: {{suggestions}}",
+ "error.chain.checkConfig": "Revise provider/model no config",
+ }
+ return {
+ t(key: string, vars?: Record) {
+ const text = dict[key]
+ if (!text) return key
+ return fill(text, vars)
+ },
+ }
+}
+
+const language = useLanguageMock()
+
+describe("parseReadableConfigInvalidError", () => {
test("formats issues with file path", () => {
const error = {
name: "ConfigInvalidError",
@@ -15,10 +44,10 @@ describe("parseReabaleConfigInvalidError", () => {
},
} satisfies ConfigInvalidError
- const result = parseReabaleConfigInvalidError(error)
+ const result = parseReadableConfigInvalidError(error, language.t)
expect(result).toBe(
- ["Invalid configuration", "opencode.config.ts", "settings.host: Required", "mode: Invalid"].join("\n"),
+ ["Arquivo de config em opencode.config.ts invalido: settings.host: Required", "mode: Invalid"].join("\n"),
)
})
@@ -31,9 +60,9 @@ describe("parseReabaleConfigInvalidError", () => {
},
} satisfies ConfigInvalidError
- const result = parseReabaleConfigInvalidError(error)
+ const result = parseReadableConfigInvalidError(error, language.t)
- expect(result).toBe(["Invalid configuration", "Bad value"].join("\n"))
+ expect(result).toBe("Arquivo de config em config invalido: Bad value")
})
})
@@ -46,24 +75,57 @@ describe("formatServerError", () => {
},
} satisfies ConfigInvalidError
- const result = formatServerError(error)
+ const result = formatServerError(error, language.t)
- expect(result).toBe(["Invalid configuration", "Missing host"].join("\n"))
+ expect(result).toBe("Arquivo de config em config invalido: Missing host")
})
test("returns error messages", () => {
- expect(formatServerError(new Error("Request failed with status 503"))).toBe("Request failed with status 503")
+ expect(formatServerError(new Error("Request failed with status 503"), language.t)).toBe(
+ "Request failed with status 503",
+ )
})
test("returns provided string errors", () => {
- expect(formatServerError("Failed to connect to server")).toBe("Failed to connect to server")
+ expect(formatServerError("Failed to connect to server", language.t)).toBe("Failed to connect to server")
})
- test("falls back to unknown", () => {
- expect(formatServerError(0)).toBe("Unknown error")
+ test("uses translated unknown fallback", () => {
+ expect(formatServerError(0, language.t)).toBe("Erro desconhecido")
})
test("falls back for unknown error objects and names", () => {
- expect(formatServerError({ name: "ServerTimeoutError", data: { seconds: 30 } })).toBe("Unknown error")
+ expect(formatServerError({ name: "ServerTimeoutError", data: { seconds: 30 } }, language.t)).toBe(
+ "Erro desconhecido",
+ )
+ })
+
+ test("formats provider model errors using provider/model", () => {
+ const error = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "openai",
+ modelID: "gpt-4.1",
+ },
+ } satisfies ProviderModelNotFoundError
+
+ expect(formatServerError(error, language.t)).toBe(
+ ["Modelo nao encontrado: openai/gpt-4.1", "Revise provider/model no config"].join("\n"),
+ )
+ })
+
+ test("formats provider model suggestions", () => {
+ const error = {
+ name: "ProviderModelNotFoundError",
+ data: {
+ providerID: "x",
+ modelID: "y",
+ suggestions: ["x/y2", "x/y3"],
+ },
+ } satisfies ProviderModelNotFoundError
+
+ expect(formatServerError(error, language.t)).toBe(
+ ["Modelo nao encontrado: x/y", "Voce quis dizer: x/y2, x/y3", "Revise provider/model no config"].join("\n"),
+ )
})
})
diff --git a/packages/app/src/utils/server-errors.ts b/packages/app/src/utils/server-errors.ts
index 85ebca1320..2c3a8c54db 100644
--- a/packages/app/src/utils/server-errors.ts
+++ b/packages/app/src/utils/server-errors.ts
@@ -7,28 +7,31 @@ export type ConfigInvalidError = {
}
}
-type Label = {
- unknown: string
- invalidConfiguration: string
-}
-
-const fallback: Label = {
- unknown: "Unknown error",
- invalidConfiguration: "Invalid configuration",
-}
-
-function resolveLabel(labels: Partial
+ }
+ >
+ {content()}
{content()}
diff --git a/packages/ui/src/components/message-part.css b/packages/ui/src/components/message-part.css
index 3eee45c75f..8fc7090133 100644
--- a/packages/ui/src/components/message-part.css
+++ b/packages/ui/src/components/message-part.css
@@ -577,6 +577,46 @@
justify-content: center;
}
+[data-component="exa-tool-output"] {
+ width: 100%;
+ padding-top: 8px;
+ display: flex;
+ flex-direction: column;
+}
+
+[data-slot="basic-tool-tool-subtitle"].exa-tool-query {
+ display: block;
+ max-width: 100%;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+[data-slot="exa-tool-links"] {
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+}
+
+[data-slot="exa-tool-link"] {
+ display: block;
+ max-width: 100%;
+ color: var(--text-interactive-base);
+ text-decoration: underline;
+ text-underline-offset: 2px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+
+ &:hover {
+ color: var(--text-interactive-base);
+ }
+
+ &:visited {
+ color: var(--text-interactive-base);
+ }
+}
+
[data-component="todos"] {
padding: 10px 0 24px 0;
display: flex;
diff --git a/packages/ui/src/components/message-part.tsx b/packages/ui/src/components/message-part.tsx
index aecdbc8e41..fbeb8bda28 100644
--- a/packages/ui/src/components/message-part.tsx
+++ b/packages/ui/src/components/message-part.tsx
@@ -52,6 +52,7 @@ import { TextShimmer } from "./text-shimmer"
import { AnimatedCountList } from "./tool-count-summary"
import { ToolStatusTitle } from "./tool-status-title"
import { animate } from "motion"
+import { useLocation } from "@solidjs/router"
function ShellSubmessage(props: { text: string; animate?: boolean }) {
let widthRef: HTMLSpanElement | undefined
@@ -242,6 +243,18 @@ export function getToolInfo(tool: string, input: any = {}): ToolInfo {
title: i18n.t("ui.tool.webfetch"),
subtitle: input.url,
}
+ case "websearch":
+ return {
+ icon: "window-cursor",
+ title: i18n.t("ui.tool.websearch"),
+ subtitle: input.query,
+ }
+ case "codesearch":
+ return {
+ icon: "code",
+ title: i18n.t("ui.tool.codesearch"),
+ subtitle: input.query,
+ }
case "task":
return {
icon: "task",
@@ -302,6 +315,18 @@ export function getToolInfo(tool: string, input: any = {}): ToolInfo {
}
}
+function urls(text: string | undefined) {
+ if (!text) return []
+ const seen = new Set
()
+ return [...text.matchAll(/https?:\/\/[^\s<>"'`)\]]+/g)]
+ .map((item) => item[0].replace(/[),.;:!?]+$/g, ""))
+ .filter((item) => {
+ if (seen.has(item)) return false
+ seen.add(item)
+ return true
+ })
+}
+
const CONTEXT_GROUP_TOOLS = new Set(["read", "glob", "grep", "list"])
const HIDDEN_TOOLS = new Set(["todowrite", "todoread"])
@@ -597,6 +622,32 @@ function contextToolSummary(parts: ToolPart[]) {
return { read, search, list }
}
+function ExaOutput(props: { output?: string }) {
+ const links = createMemo(() => urls(props.output))
+
+ return (
+ 0}>
+
+
+ )
+}
+
export function registerPartComponent(type: string, component: PartComponent) {
PART_MAPPING[type] = component
}
@@ -1466,11 +1517,64 @@ ToolRegistry.register({
},
})
+ToolRegistry.register({
+ name: "websearch",
+ render(props) {
+ const i18n = useI18n()
+ const query = createMemo(() => {
+ const value = props.input.query
+ if (typeof value !== "string") return ""
+ return value
+ })
+
+ return (
+
+
+
+ )
+ },
+})
+
+ToolRegistry.register({
+ name: "codesearch",
+ render(props) {
+ const i18n = useI18n()
+ const query = createMemo(() => {
+ const value = props.input.query
+ if (typeof value !== "string") return ""
+ return value
+ })
+
+ return (
+
+
+
+ )
+ },
+})
+
ToolRegistry.register({
name: "task",
render(props) {
const data = useData()
const i18n = useI18n()
+ const location = useLocation()
const childSessionId = () => props.metadata.sessionId as string | undefined
const title = createMemo(() => i18n.t("ui.tool.agent", { type: props.input.subagent_type || props.tool }))
const description = createMemo(() => {
@@ -1487,8 +1591,7 @@ ToolRegistry.register({
const direct = data.sessionHref?.(sessionId)
if (direct) return direct
- if (typeof window === "undefined") return
- const path = window.location.pathname
+ const path = location.pathname
const idx = path.indexOf("/session")
if (idx === -1) return
return `${path.slice(0, idx)}/session/${sessionId}`
diff --git a/packages/ui/src/components/session-review.tsx b/packages/ui/src/components/session-review.tsx
index c75baf921a..ad9e5b2c33 100644
--- a/packages/ui/src/components/session-review.tsx
+++ b/packages/ui/src/components/session-review.tsx
@@ -355,8 +355,6 @@ export const SessionReview = (props: SessionReviewProps) => {
if (typeof window === "undefined") return
const onKeyDown = (event: KeyboardEvent) => {
- if (event.defaultPrevented) return
-
const mod = event.metaKey || event.ctrlKey
if (!mod) return
diff --git a/packages/ui/src/components/session-turn.css b/packages/ui/src/components/session-turn.css
index 15d7b50352..cf1e981159 100644
--- a/packages/ui/src/components/session-turn.css
+++ b/packages/ui/src/components/session-turn.css
@@ -171,10 +171,8 @@
[data-slot="session-turn-diff-path"] {
display: flex;
+ flex-grow: 1;
min-width: 0;
- align-items: baseline;
- overflow: hidden;
- white-space: nowrap;
font-family: var(--font-family-sans);
font-size: var(--font-size-small);
@@ -182,24 +180,16 @@
}
[data-slot="session-turn-diff-directory"] {
- flex: 1 1 auto;
- color: var(--text-weak);
- min-width: 0;
+ color: var(--text-base);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
direction: rtl;
- unicode-bidi: plaintext;
text-align: left;
}
[data-slot="session-turn-diff-filename"] {
flex-shrink: 0;
- max-width: 100%;
- min-width: 0;
- overflow: hidden;
- text-overflow: ellipsis;
- white-space: nowrap;
color: var(--text-strong);
font-weight: var(--font-weight-medium);
}
diff --git a/packages/ui/src/components/tabs.css b/packages/ui/src/components/tabs.css
index f8045702fd..51917489e2 100644
--- a/packages/ui/src/components/tabs.css
+++ b/packages/ui/src/components/tabs.css
@@ -241,26 +241,26 @@
[data-slot="tabs-trigger"] {
.tab-fileicon-color,
.tab-fileicon-mono {
- transition: opacity 120ms ease;
+ pointer-events: none;
}
.tab-fileicon-color {
- opacity: 0;
+ display: none;
}
.tab-fileicon-mono {
- opacity: 1;
+ display: block;
color: currentColor;
}
&[data-selected],
&:hover {
.tab-fileicon-color {
- opacity: 1;
+ display: block;
}
.tab-fileicon-mono {
- opacity: 0;
+ display: none;
}
}
}
diff --git a/packages/ui/src/components/tooltip.tsx b/packages/ui/src/components/tooltip.tsx
index 055e504654..63105d00fc 100644
--- a/packages/ui/src/components/tooltip.tsx
+++ b/packages/ui/src/components/tooltip.tsx
@@ -47,7 +47,7 @@ export function Tooltip(props: TooltipProps) {
{local.children}
-
+
{local.children}
diff --git a/packages/ui/src/i18n/ar.ts b/packages/ui/src/i18n/ar.ts
index 3579eff5a8..f0a56f772c 100644
--- a/packages/ui/src/i18n/ar.ts
+++ b/packages/ui/src/i18n/ar.ts
@@ -94,6 +94,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "جلب الويب",
+ "ui.tool.websearch": "بحث الويب",
+ "ui.tool.codesearch": "بحث الكود",
"ui.tool.shell": "Shell",
"ui.tool.patch": "تصحيح",
"ui.tool.todos": "المهام",
diff --git a/packages/ui/src/i18n/br.ts b/packages/ui/src/i18n/br.ts
index 76028878f9..d060506054 100644
--- a/packages/ui/src/i18n/br.ts
+++ b/packages/ui/src/i18n/br.ts
@@ -94,6 +94,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Buscar Web",
+ "ui.tool.websearch": "Pesquisa na Web",
+ "ui.tool.codesearch": "Pesquisa de Código",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Tarefas",
diff --git a/packages/ui/src/i18n/bs.ts b/packages/ui/src/i18n/bs.ts
index 9bc2293361..754c6bcefe 100644
--- a/packages/ui/src/i18n/bs.ts
+++ b/packages/ui/src/i18n/bs.ts
@@ -98,6 +98,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Web preuzimanje",
+ "ui.tool.websearch": "Pretraga weba",
+ "ui.tool.codesearch": "Pretraga koda",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Lista zadataka",
diff --git a/packages/ui/src/i18n/da.ts b/packages/ui/src/i18n/da.ts
index 1bb4758568..0126a60c89 100644
--- a/packages/ui/src/i18n/da.ts
+++ b/packages/ui/src/i18n/da.ts
@@ -93,6 +93,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webhentning",
+ "ui.tool.websearch": "Websøgning",
+ "ui.tool.codesearch": "Kodesøgning",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Opgaver",
diff --git a/packages/ui/src/i18n/de.ts b/packages/ui/src/i18n/de.ts
index 951833c309..24d99ef790 100644
--- a/packages/ui/src/i18n/de.ts
+++ b/packages/ui/src/i18n/de.ts
@@ -99,6 +99,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webabruf",
+ "ui.tool.websearch": "Websuche",
+ "ui.tool.codesearch": "Codesuche",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Aufgaben",
diff --git a/packages/ui/src/i18n/en.ts b/packages/ui/src/i18n/en.ts
index 9c9ae6e27a..1d92ea507c 100644
--- a/packages/ui/src/i18n/en.ts
+++ b/packages/ui/src/i18n/en.ts
@@ -95,6 +95,8 @@ export const dict: Record = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "Web Search",
+ "ui.tool.codesearch": "Code Search",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "To-dos",
diff --git a/packages/ui/src/i18n/es.ts b/packages/ui/src/i18n/es.ts
index 6fb6eea511..9ee95d8245 100644
--- a/packages/ui/src/i18n/es.ts
+++ b/packages/ui/src/i18n/es.ts
@@ -94,6 +94,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "Búsqueda web",
+ "ui.tool.codesearch": "Búsqueda de código",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Parche",
"ui.tool.todos": "Tareas",
diff --git a/packages/ui/src/i18n/fr.ts b/packages/ui/src/i18n/fr.ts
index 3a77a3f5c6..431abe5683 100644
--- a/packages/ui/src/i18n/fr.ts
+++ b/packages/ui/src/i18n/fr.ts
@@ -94,6 +94,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "Recherche Web",
+ "ui.tool.codesearch": "Recherche de code",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Tâches",
diff --git a/packages/ui/src/i18n/ja.ts b/packages/ui/src/i18n/ja.ts
index 9dfb03f76b..c6cb2ac401 100644
--- a/packages/ui/src/i18n/ja.ts
+++ b/packages/ui/src/i18n/ja.ts
@@ -93,6 +93,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "Web検索",
+ "ui.tool.codesearch": "コード検索",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Todo",
diff --git a/packages/ui/src/i18n/ko.ts b/packages/ui/src/i18n/ko.ts
index 84d261ac89..cd306e879e 100644
--- a/packages/ui/src/i18n/ko.ts
+++ b/packages/ui/src/i18n/ko.ts
@@ -94,6 +94,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "웹 가져오기",
+ "ui.tool.websearch": "웹 검색",
+ "ui.tool.codesearch": "코드 검색",
"ui.tool.shell": "셸",
"ui.tool.patch": "패치",
"ui.tool.todos": "할 일",
diff --git a/packages/ui/src/i18n/no.ts b/packages/ui/src/i18n/no.ts
index dd1822beee..ddfe094618 100644
--- a/packages/ui/src/i18n/no.ts
+++ b/packages/ui/src/i18n/no.ts
@@ -97,6 +97,8 @@ export const dict: Record = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webhenting",
+ "ui.tool.websearch": "Nettsøk",
+ "ui.tool.codesearch": "Kodesøk",
"ui.tool.shell": "Shell",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Gjøremål",
diff --git a/packages/ui/src/i18n/pl.ts b/packages/ui/src/i18n/pl.ts
index fcfedb2ef9..73fa96afae 100644
--- a/packages/ui/src/i18n/pl.ts
+++ b/packages/ui/src/i18n/pl.ts
@@ -93,6 +93,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Pobieranie sieciowe",
+ "ui.tool.websearch": "Wyszukiwanie w sieci",
+ "ui.tool.codesearch": "Wyszukiwanie kodu",
"ui.tool.shell": "Terminal",
"ui.tool.patch": "Patch",
"ui.tool.todos": "Zadania",
diff --git a/packages/ui/src/i18n/ru.ts b/packages/ui/src/i18n/ru.ts
index 713ff47d1e..085be28436 100644
--- a/packages/ui/src/i18n/ru.ts
+++ b/packages/ui/src/i18n/ru.ts
@@ -93,6 +93,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "Веб-поиск",
+ "ui.tool.codesearch": "Поиск кода",
"ui.tool.shell": "Оболочка",
"ui.tool.patch": "Патч",
"ui.tool.todos": "Задачи",
diff --git a/packages/ui/src/i18n/th.ts b/packages/ui/src/i18n/th.ts
index 44761a279e..705f68d1b4 100644
--- a/packages/ui/src/i18n/th.ts
+++ b/packages/ui/src/i18n/th.ts
@@ -95,6 +95,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "ดึงจากเว็บ",
+ "ui.tool.websearch": "ค้นหาเว็บ",
+ "ui.tool.codesearch": "ค้นหาโค้ด",
"ui.tool.shell": "เชลล์",
"ui.tool.patch": "แพตช์",
"ui.tool.todos": "รายการงาน",
diff --git a/packages/ui/src/i18n/tr.ts b/packages/ui/src/i18n/tr.ts
index 5ec108d4aa..fa3bddb218 100644
--- a/packages/ui/src/i18n/tr.ts
+++ b/packages/ui/src/i18n/tr.ts
@@ -90,6 +90,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Web getir",
+ "ui.tool.websearch": "Web Araması",
+ "ui.tool.codesearch": "Kod Araması",
"ui.tool.shell": "Kabuk",
"ui.tool.patch": "Yama",
"ui.tool.todos": "Görevler",
diff --git a/packages/ui/src/i18n/zh.ts b/packages/ui/src/i18n/zh.ts
index 39226605b9..571574d92e 100644
--- a/packages/ui/src/i18n/zh.ts
+++ b/packages/ui/src/i18n/zh.ts
@@ -98,6 +98,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "网络搜索",
+ "ui.tool.codesearch": "代码搜索",
"ui.tool.shell": "Shell",
"ui.tool.patch": "补丁",
"ui.tool.todos": "待办",
diff --git a/packages/ui/src/i18n/zht.ts b/packages/ui/src/i18n/zht.ts
index 068e222d65..edbc96b12f 100644
--- a/packages/ui/src/i18n/zht.ts
+++ b/packages/ui/src/i18n/zht.ts
@@ -98,6 +98,8 @@ export const dict = {
"ui.tool.glob": "Glob",
"ui.tool.grep": "Grep",
"ui.tool.webfetch": "Webfetch",
+ "ui.tool.websearch": "網頁搜尋",
+ "ui.tool.codesearch": "程式碼搜尋",
"ui.tool.shell": "Shell",
"ui.tool.patch": "修補",
"ui.tool.todos": "待辦",
diff --git a/packages/util/package.json b/packages/util/package.json
index f6f2b5514a..87cf20591e 100644
--- a/packages/util/package.json
+++ b/packages/util/package.json
@@ -1,6 +1,6 @@
{
"name": "@opencode-ai/util",
- "version": "1.2.17",
+ "version": "1.2.18",
"private": true,
"type": "module",
"license": "MIT",
diff --git a/packages/web/package.json b/packages/web/package.json
index ee5671e3ec..4e7ac7cd8b 100644
--- a/packages/web/package.json
+++ b/packages/web/package.json
@@ -2,7 +2,7 @@
"name": "@opencode-ai/web",
"type": "module",
"license": "MIT",
- "version": "1.2.17",
+ "version": "1.2.18",
"scripts": {
"dev": "astro dev",
"dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev",
diff --git a/packages/web/src/content/docs/ar/zen.mdx b/packages/web/src/content/docs/ar/zen.mdx
index 2810dea7dd..e155748fbf 100644
--- a/packages/web/src/content/docs/ar/zen.mdx
+++ b/packages/web/src/content/docs/ar/zen.mdx
@@ -59,6 +59,7 @@ OpenCode Zen هو بوابة للذكاء الاصطناعي تتيح لك ال
| النموذج | معرّف النموذج | نقطة النهاية | حزمة AI SDK |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -141,6 +142,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -184,6 +186,19 @@ https://opencode.ai/zen/v1/models
---
+### نماذج مهملة
+
+| النموذج | تاريخ الإيقاف |
+| ---------------- | ------------- |
+| Qwen3 Coder 480B | 6 فبراير 2026 |
+| Kimi K2 Thinking | 6 مارس 2026 |
+| Kimi K2 | 6 مارس 2026 |
+| MiniMax M2.1 | 15 مارس 2026 |
+| GLM 4.7 | 15 مارس 2026 |
+| GLM 4.6 | 15 مارس 2026 |
+
+---
+
## الخصوصية
تتم استضافة جميع نماذجنا في الولايات المتحدة. يلتزم مزوّدونا بسياسة عدم الاحتفاظ بالبيانات (zero-retention) ولا يستخدمون بياناتك لتدريب النماذج، مع الاستثناءات التالية:
diff --git a/packages/web/src/content/docs/bs/zen.mdx b/packages/web/src/content/docs/bs/zen.mdx
index ad428884d3..8da6697d09 100644
--- a/packages/web/src/content/docs/bs/zen.mdx
+++ b/packages/web/src/content/docs/bs/zen.mdx
@@ -55,6 +55,7 @@ Nasim modelima mozete pristupiti i preko sljedecih API endpointa.
| Model | Model ID | Endpoint | AI SDK Package |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ Podrzavamo pay-as-you-go model. Ispod su cijene **po 1M tokena**.
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ Na primjer, ako postavite mjesecni limit na $20, Zen nece potrositi vise od $20
---
+### Zastarjeli modeli
+
+| Model | Datum ukidanja |
+| ---------------- | -------------- |
+| Qwen3 Coder 480B | 6. feb. 2026. |
+| Kimi K2 Thinking | 6. mart 2026. |
+| Kimi K2 | 6. mart 2026. |
+| MiniMax M2.1 | 15. mart 2026. |
+| GLM 4.7 | 15. mart 2026. |
+| GLM 4.6 | 15. mart 2026. |
+
+---
+
## Privatnost
Svi nasi modeli su hostovani u SAD-u. Provajderi prate zero-retention politiku i ne koriste vase podatke za treniranje modela, uz sljedece izuzetke:
diff --git a/packages/web/src/content/docs/da/zen.mdx b/packages/web/src/content/docs/da/zen.mdx
index e99c626c57..dee93e3bea 100644
--- a/packages/web/src/content/docs/da/zen.mdx
+++ b/packages/web/src/content/docs/da/zen.mdx
@@ -64,6 +64,7 @@ Du kan også få adgang til vores modeller gennem følgende API-endpoints.
| Model | Model ID | Endpoint | AI SDK Pakke |
| ------------------- | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -147,6 +148,7 @@ Vi støtter en pay-as-you-go-model. Nedenfor er priserne **per 1 million tokens*
| Gemini 3 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
| Gemini 3 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
| Gemini 3 Flash | $0,50 | $3,00 | $0,05 | - |
+| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
| GPT 5.3 Codex | $1,75 | $14,00 | $0,175 | - |
| GPT 5.2 | $1,75 | $14,00 | $0,175 | - |
| GPT 5.2 Codex | $1,75 | $14,00 | $0,175 | - |
@@ -192,6 +194,19 @@ at opkræve dig mere end $20, hvis din saldo går under $5.
---
+### Udfasede modeller
+
+| Model | Udfasningsdato |
+| ---------------- | -------------- |
+| Qwen3-koder 480B | 6. feb. 2026 |
+| Kimi K2 Tenker | 6. marts 2026 |
+| Kimi K2 | 6. marts 2026 |
+| MiniMax M2.1 | 15. marts 2026 |
+| GLM 4.7 | 15. marts 2026 |
+| GLM 4.6 | 15. marts 2026 |
+
+---
+
## Privatliv
Alle vores modeller er hostet i USA. Vores udbydere følger en nul-opbevaringspolitik og bruger ikke dine data til modeltræning, med følgende undtagelser:
diff --git a/packages/web/src/content/docs/de/zen.mdx b/packages/web/src/content/docs/de/zen.mdx
index 7545b10deb..e5661ad569 100644
--- a/packages/web/src/content/docs/de/zen.mdx
+++ b/packages/web/src/content/docs/de/zen.mdx
@@ -57,6 +57,7 @@ Du kannst unsere Modelle auch ueber die folgenden API-Endpunkte aufrufen.
| Model | Model ID | Endpoint | AI SDK Package |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -114,12 +115,12 @@ Unten siehst du die Preise **pro 1 Mio. Tokens**.
| --------------------------------- | ------ | ------ | ----------- | ------------ |
| Big Pickle | Free | Free | Free | - |
| MiniMax M2.5 Free | Free | Free | Free | - |
-| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
+| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
-| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
+| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
| Kimi K2 | $0.40 | $2.50 | - | - |
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
@@ -140,6 +141,7 @@ Unten siehst du die Preise **pro 1 Mio. Tokens**.
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -184,6 +186,19 @@ Mit aktiviertem Auto-Reload kann die Abrechnung dennoch darueber liegen, falls d
---
+### Veraltete Modelle
+
+| Model | Datum der Abschaltung |
+| ---------------- | --------------------- |
+| Qwen3 Coder 480B | 6. Feb. 2026 |
+| Kimi K2 Thinking | 6. Maerz 2026 |
+| Kimi K2 | 6. Maerz 2026 |
+| MiniMax M2.1 | 15. Maerz 2026 |
+| GLM 4.7 | 15. Maerz 2026 |
+| GLM 4.6 | 15. Maerz 2026 |
+
+---
+
## Datenschutz
Alle Modelle werden in den USA gehostet.
diff --git a/packages/web/src/content/docs/es/zen.mdx b/packages/web/src/content/docs/es/zen.mdx
index 94838902a5..9848eb100a 100644
--- a/packages/web/src/content/docs/es/zen.mdx
+++ b/packages/web/src/content/docs/es/zen.mdx
@@ -62,6 +62,7 @@ También puede acceder a nuestros modelos a través de los siguientes puntos fin
| Modelo | Model ID | Endpoint | AI SDK package |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -145,6 +146,7 @@ Apoyamos un modelo de pago por uso. A continuación se muestran los precios **po
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0,20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0,40 | - |
| Gemini 3 Flash | $0,50 | $3.00 | $0,05 | - |
+| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
| GPT 5.3 Codex | $1,75 | $14.00 | $0,175 | - |
| GPT 5.2 | $1,75 | $14.00 | $0,175 | - |
| GPT 5.2 Codex | $1,75 | $14.00 | $0,175 | - |
@@ -190,6 +192,19 @@ cobrarle más de $20 si su saldo es inferior a $5.
---
+### Modelos obsoletos
+
+| Modelo | Fecha de retiro |
+| ---------------- | ------------------- |
+| Qwen3 Coder 480B | 6 de feb. de 2026 |
+| Kimi K2 Thinking | 6 de marzo de 2026 |
+| Kimi K2 | 6 de marzo de 2026 |
+| MiniMax M2.1 | 15 de marzo de 2026 |
+| GLM 4.7 | 15 de marzo de 2026 |
+| GLM 4.6 | 15 de marzo de 2026 |
+
+---
+
## Privacidad
Todos nuestros modelos están alojados en los EE. UU. Nuestros proveedores siguen una política de retención cero y no utilizan sus datos para la capacitación de modelos, con las siguientes excepciones:
diff --git a/packages/web/src/content/docs/fr/zen.mdx b/packages/web/src/content/docs/fr/zen.mdx
index e40b1be77e..7310922aea 100644
--- a/packages/web/src/content/docs/fr/zen.mdx
+++ b/packages/web/src/content/docs/fr/zen.mdx
@@ -55,6 +55,7 @@ Vous pouvez également accéder à nos modèles via les points de terminaison AP
| Modèle | ID du modèle | Point de terminaison | Package SDK IA |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ Nous soutenons un modèle de paiement à l'utilisation. Vous trouverez ci-dessou
| Gemini 3 Pro (≤ 200K jetons) | 2,00 $ | 12,00 $ | 0,20 $ | - |
| Gemini 3 Pro (> 200K jetons) | 4,00 $ | 18,00 $ | 0,40 $ | - |
| Gemini 3 Flash | 0,50 $ | 3,00 $ | 0,05 $ | - |
+| GPT 5.4 | 2,50 $ | 15,00 $ | 0,25 $ | - |
| GPT 5.3 Codex | 1,75 $ | 14,00 $ | 0,175 $ | - |
| GPT 5.2 | 1,75 $ | 14,00 $ | 0,175 $ | - |
| GPT 5.2 Codex | 1,75 $ | 14,00 $ | 0,175 $ | - |
@@ -178,6 +180,19 @@ Par exemple, disons que vous définissez une limite d'utilisation mensuelle à 2
---
+### Modèles obsolètes
+
+| Modèle | Date de dépréciation |
+| ---------------- | -------------------- |
+| Qwen3 Coder 480B | 6 février 2026 |
+| Kimi K2 Thinking | 6 mars 2026 |
+| Kimi K2 | 6 mars 2026 |
+| MiniMax M2.1 | 15 mars 2026 |
+| GLM 4.7 | 15 mars 2026 |
+| GLM 4.6 | 15 mars 2026 |
+
+---
+
## Confidentialité
Tous nos modèles sont hébergés aux États-Unis. Nos fournisseurs suivent une politique de rétention zéro et n'utilisent pas vos données pour la formation de modèles, avec les exceptions suivantes :
diff --git a/packages/web/src/content/docs/it/zen.mdx b/packages/web/src/content/docs/it/zen.mdx
index db0434db50..3c892f0d48 100644
--- a/packages/web/src/content/docs/it/zen.mdx
+++ b/packages/web/src/content/docs/it/zen.mdx
@@ -55,6 +55,7 @@ Puoi anche accedere ai nostri modelli tramite i seguenti endpoint API.
| Modello | ID modello | Endpoint | Pacchetto AI SDK |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ Supportiamo un modello pay-as-you-go. Qui sotto trovi i prezzi **per 1M token**.
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ Per esempio, se imposti un limite mensile a $20, Zen non usera piu di $20 in un
---
+### Modelli deprecati
+
+| Modello | Data di deprecazione |
+| ---------------- | -------------------- |
+| Qwen3 Coder 480B | 6 feb 2026 |
+| Kimi K2 Thinking | 6 mar 2026 |
+| Kimi K2 | 6 mar 2026 |
+| MiniMax M2.1 | 15 mar 2026 |
+| GLM 4.7 | 15 mar 2026 |
+| GLM 4.6 | 15 mar 2026 |
+
+---
+
## Privacy
Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una policy di zero-retention e non usano i tuoi dati per training dei modelli, con le seguenti eccezioni:
diff --git a/packages/web/src/content/docs/ja/zen.mdx b/packages/web/src/content/docs/ja/zen.mdx
index c7121fb3b7..7a380aa9fb 100644
--- a/packages/web/src/content/docs/ja/zen.mdx
+++ b/packages/web/src/content/docs/ja/zen.mdx
@@ -54,6 +54,7 @@ OpenCode Zen は、OpenCode の他のプロバイダーと同様に機能しま
| Model | Model ID | Endpoint | AI SDK Package |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -137,6 +138,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -179,6 +181,19 @@ https://opencode.ai/zen/v1/models
---
+### 非推奨モデル
+
+| Model | Deprecation date |
+| ---------------- | ---------------- |
+| Qwen3 Coder 480B | 2026年2月6日 |
+| Kimi K2 Thinking | 2026年3月6日 |
+| Kimi K2 | 2026年3月6日 |
+| MiniMax M2.1 | 2026年3月15日 |
+| GLM 4.7 | 2026年3月15日 |
+| GLM 4.6 | 2026年3月15日 |
+
+---
+
## プライバシー
すべてのモデルは米国でホストされています。当社のプロバイダーはゼロ保持ポリシーに従い、次の例外を除いて、モデルのトレーニングにデータを使用しません。
diff --git a/packages/web/src/content/docs/ko/zen.mdx b/packages/web/src/content/docs/ko/zen.mdx
index ae598cee18..5c2b9644ff 100644
--- a/packages/web/src/content/docs/ko/zen.mdx
+++ b/packages/web/src/content/docs/ko/zen.mdx
@@ -55,6 +55,7 @@ OpenCode Zen은 OpenCode의 다른 제공자와 동일한 방식으로 작동합
| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -111,12 +112,12 @@ https://opencode.ai/zen/v1/models
| --------------------------------- | ------ | ------ | --------- | --------- |
| Big Pickle | Free | Free | Free | - |
| MiniMax M2.5 Free | Free | Free | Free | - |
-| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
+| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
-| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
+| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
| Kimi K2 | $0.40 | $2.50 | - | - |
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
@@ -137,6 +138,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -180,6 +182,19 @@ https://opencode.ai/zen/v1/models
---
+### 지원 중단 모델
+
+| 모델 | 지원 중단일 |
+| ---------------- | --------------- |
+| Qwen3 Coder 480B | 2026년 2월 6일 |
+| Kimi K2 Thinking | 2026년 3월 6일 |
+| Kimi K2 | 2026년 3월 6일 |
+| MiniMax M2.1 | 2026년 3월 15일 |
+| GLM 4.7 | 2026년 3월 15일 |
+| GLM 4.6 | 2026년 3월 15일 |
+
+---
+
## 개인정보 보호
당사의 모든 모델은 미국에서 호스팅됩니다. 당사 제공자는 데이터 무보존(zero-retention) 정책을 따르며, 아래의 예외를 제외하고는 귀하의 데이터를 모델 학습에 사용하지 않습니다.
diff --git a/packages/web/src/content/docs/nb/zen.mdx b/packages/web/src/content/docs/nb/zen.mdx
index 51399615e5..71dd0e9eaf 100644
--- a/packages/web/src/content/docs/nb/zen.mdx
+++ b/packages/web/src/content/docs/nb/zen.mdx
@@ -64,6 +64,7 @@ Du kan også få tilgang til modellene våre gjennom følgende API-endepunkter.
| Modell | Modell ID | Endepunkt | AI SDK Pakke |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -121,7 +122,7 @@ Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1 million tokens*
| --------------------------------- | ------- | ------ | ------------- | --------------- |
| Big Pickle | Gratis | Gratis | Gratis | - |
| MiniMax M2.5 Free | Gratis | Gratis | Gratis | - |
-| MiniMax M2.5 | $0,30 | $1,20 | $0,06 | - |
+| MiniMax M2.5 | $0,30 | $1,20 | $0,06 | $0,375 |
| MiniMax M2.1 | $0,30 | $1,20 | $0,10 | - |
| GLM 5 | $1,00 | $3,20 | $0,20 | - |
| GLM 4.7 | $0,60 | $2,20 | $0,10 | - |
@@ -147,6 +148,7 @@ Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1 million tokens*
| Gemini 3 Pro (≤ 200K tokens) | $2,00 | $12,00 | $0,20 | - |
| Gemini 3 Pro (> 200K tokens) | $4,00 | $18,00 | $0,40 | - |
| Gemini 3 Flash | $0,50 | $3,00 | $0,05 | - |
+| GPT 5.4 | $2,50 | $15,00 | $0,25 | - |
| GPT 5.3 Codex | $1,75 | $14,00 | $0,175 | - |
| GPT 5.2 | $1,75 | $14,00 | $0,175 | - |
| GPT 5.2 Codex | $1,75 | $14,00 | $0,175 | - |
@@ -192,6 +194,19 @@ belaster deg mer enn $20 hvis saldoen din går under $5.
---
+### Utfasede modeller
+
+| Modell | Utfasingdato |
+| ---------------- | ------------- |
+| Qwen3 Coder 480B | 6. feb. 2026 |
+| Kimi K2 Thinking | 6. mars 2026 |
+| Kimi K2 | 6. mars 2026 |
+| MiniMax M2.1 | 15. mars 2026 |
+| GLM 4.7 | 15. mars 2026 |
+| GLM 4.6 | 15. mars 2026 |
+
+---
+
## Personvern
Alle våre modeller er hostet i USA. Leverandørene våre følger retningslinjer om ingen datalagring og bruker ikke dataene dine til modellopplæring, med følgende unntak:
diff --git a/packages/web/src/content/docs/pl/zen.mdx b/packages/web/src/content/docs/pl/zen.mdx
index dbb75489cb..ddb7d2ff15 100644
--- a/packages/web/src/content/docs/pl/zen.mdx
+++ b/packages/web/src/content/docs/pl/zen.mdx
@@ -1,21 +1,21 @@
---
title: Zen
-description: Wyselekcjonowana lista modeli dostarczonych przez opencode.
+description: Wyselekcjonowana lista modeli dostarczonych przez OpenCode.
---
import config from "../../../../config.mjs"
export const console = config.console
export const email = `mailto:${config.email}`
-OpenCode Zen to lista przetestowanych i zweryfikowanych modeli udostępniona przez zespół opencode.
+OpenCode Zen to lista przetestowanych i zweryfikowanych modeli udostępniona przez zespół OpenCode.
:::note
-OpenCode Zen is currently in beta.
+OpenCode Zen jest obecnie w wersji beta.
:::
-Zen działa jak każdy inny dostawca opencode. Logujesz się do OpenCode Zen i dostajesz
-Twój klucz API. Jest **całkowicie opcjonalny** i nie musisz go używać, aby z niego korzystać
-opencode.
+Zen działa jak każdy inny dostawca w OpenCode. Logujesz się do OpenCode Zen i otrzymujesz
+swój klucz API. Jest to **całkowicie opcjonalne** i nie musisz tego używać, aby korzystać z
+OpenCode.
---
@@ -23,23 +23,23 @@ opencode.
Istnieje ogromna liczba modeli, ale tylko kilka z nich
działa dobrze jako agenci kodujący. Dodatkowo większość dostawców jest
-skonfigurowana bardzo różnie; więc otrzymujesz zupełnie inną wydajność i jakość.
+skonfigurowana bardzo różnie, więc otrzymujesz bardzo różną wydajność i jakość.
:::tip
-Przetestowaliśmy wybraną grupę modeli i dostawców, którzy dobrze współpracują z opencode.
+Przetestowaliśmy wybraną grupę modeli i dostawców, którzy dobrze współpracują z OpenCode.
:::
-Jeśli więc używasz modelu za pośrednictwem czegoś takiego jak OpenRouter, nigdy nie będzie to możliwe
+Jeśli więc używasz modelu za pośrednictwem czegoś takiego jak OpenRouter, nigdy nie możesz być
pewien, czy otrzymujesz najlepszą wersję modelu, jaki chcesz.
Aby to naprawić, zrobiliśmy kilka rzeczy:
-1. Przetestowaliśmy wybraną grupę modeli i rozmawialiśmy z ich zespołami o tym, jak to zrobić
- najlepiej je uruchom.
+1. Przetestowaliśmy wybraną grupę modeli i rozmawialiśmy z ich zespołami o tym, jak
+ najlepiej je uruchamiać.
2. Następnie współpracowaliśmy z kilkoma dostawcami, aby upewnić się, że są one obsługiwane
- correctly.
-3. Na koniec porównaliśmy kombinację modelu/dostawcy i otrzymaliśmy wynik
- z listą, którą z przyjemnością polecamy.
+ poprawnie.
+3. Na koniec sprawdziliśmy wydajność kombinacji modelu/dostawcy i stworzyliśmy
+ listę, którą z czystym sumieniem polecamy.
OpenCode Zen to brama AI, która zapewnia dostęp do tych modeli.
@@ -47,14 +47,14 @@ OpenCode Zen to brama AI, która zapewnia dostęp do tych modeli.
## Jak to działa
-OpenCode Zen działa jak każdy inny dostawca opencode.
+OpenCode Zen działa jak każdy inny dostawca w OpenCode.
-1. Logujesz się do **OpenCode Zen**, dodajesz swoje rozliczenia
- szczegóły i skopiuj klucz API.
+1. Logujesz się do **OpenCode Zen**, dodajesz dane rozliczeniowe
+ i kopiujesz swój klucz API.
2. Uruchamiasz polecenie `/connect` w TUI, wybierasz OpenCode Zen i wklejasz klucz API.
3. Uruchom `/models` w TUI, aby zobaczyć listę zalecanych przez nas modeli.
-Opłata jest pobierana za każde żądanie i możesz dodać kredyty do swojego konta.
+Opłata jest pobierana za każde żądanie i możesz dodać środki do swojego konta.
---
@@ -64,6 +64,7 @@ Dostęp do naszych modeli można również uzyskać za pośrednictwem następuj
| Model | Identyfikator modelu | Punkt końcowy | Pakiet SDK AI |
| ------------------ | -------------------- | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -97,9 +98,9 @@ Dostęp do naszych modeli można również uzyskać za pośrednictwem następuj
| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-[Identyfikator modelu](/docs/config/#models) w konfiguracji opencode
-używa formatu `opencode/`. Na przykład w przypadku Kodeksu GPT 5.2 zrobiłbyś to
-użyj `opencode/gpt-5.2-codex` w swojej konfiguracji.
+[Identyfikator modelu](/docs/config/#models) w konfiguracji OpenCode
+używa formatu `opencode/`. Na przykład w przypadku GPT 5.2 Codex użyłbyś
+`opencode/gpt-5.2-codex` w swojej konfiguracji.
---
@@ -121,12 +122,12 @@ Wspieramy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów**.
| --------------------------------- | ------- | ------- | --------------------------- | -------------------------- |
| Big Pickle | Free | Free | Free | - |
| MiniMax M2.5 Free | Free | Free | Free | - |
-| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
+| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
-| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
+| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
| Kimi K2 | $0.40 | $2.50 | - | - |
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
@@ -147,6 +148,7 @@ Wspieramy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów**.
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -158,10 +160,10 @@ Wspieramy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów**.
| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
| GPT 5 Nano | Free | Free | Free | - |
-Możesz zauważyć _Claude Haiku 3.5_ w swojej historii użytkowania. To jest [model niskokosztowy](/docs/config/#models), który służy do generowania tytułów sesji.
+Możesz zauważyć _Claude Haiku 3.5_ w swojej historii użytkowania. Jest to [tani model](/docs/config/#models), który jest używany do generowania tytułów Twoich sesji.
:::note
-Opłaty za karty kredytowe są przenoszone na koszt (4,4% + 0,30 USD za transakcję); nie pobieramy żadnych dodatkowych opłat.
+Opłaty za karty kredytowe są przenoszone po kosztach (4,4% + 0,30 USD za transakcję); nie pobieramy nic poza tym.
:::
Darmowe modele:
@@ -177,18 +179,31 @@ Darmowe modele:
Jeśli Twoje saldo spadnie poniżej 5 USD, Zen automatycznie doładuje 20 USD.
-Możesz zmienić kwotę automatycznego doładowania. Możesz także całkowicie wyłączyć automatyczne przeładowywanie.
+Możesz zmienić kwotę automatycznego doładowania. Możesz także całkowicie wyłączyć automatyczne doładowanie.
---
### Limity miesięczne
-Możesz także ustawić miesięczny limit wykorzystania dla całego obszaru roboczego i dla każdego z nich
-członek Twojego zespołu.
+Możesz także ustawić miesięczny limit użytkowania dla całego obszaru roboczego i dla każdego
+członka Twojego zespołu.
-Załóżmy na przykład, że ustawiłeś miesięczny limit użytkowania na 20 USD, Zen nie będzie z niego korzystał
-ponad 20 dolarów miesięcznie. Ale jeśli masz włączone automatyczne przeładowywanie, Zen może się skończyć
-obciąży Cię kwotą wyższą niż 20 USD, jeśli saldo spadnie poniżej 5 USD.
+Na przykład, jeśli ustawisz miesięczny limit użytkowania na 20 USD, Zen nie zużyje
+więcej niż 20 dolarów w miesiącu. Ale jeśli masz włączone automatyczne doładowanie, Zen może
+obciążyć Cię kwotą wyższą niż 20 USD, jeśli saldo spadnie poniżej 5 USD.
+
+---
+
+### Przestarzałe modele
+
+| Model | Data wycofania |
+| ---------------- | -------------- |
+| Qwen3 Coder 480B | 6 lutego 2026 |
+| Kimi K2 Thinking | 6 marca 2026 |
+| Kimi K2 | 6 marca 2026 |
+| MiniMax M2.1 | 15 marca 2026 |
+| GLM 4.7 | 15 marca 2026 |
+| GLM 4.6 | 15 marca 2026 |
---
@@ -198,22 +213,22 @@ Wszystkie nasze modele są hostowane w USA. Nasi dostawcy przestrzegają polityk
- Big Pickle: W okresie bezpłatnym zebrane dane mogą zostać wykorzystane do udoskonalenia modelu.
- MiniMax M2.5 Free: W okresie bezpłatnym zebrane dane mogą zostać wykorzystane do udoskonalenia modelu.
-- Interfejsy API OpenAI: żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych OpenAI](https://platform.openai.com/docs/guides/your-data).
-- Interfejsy API Anthropic: żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych firmy Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
+- API OpenAI: Żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych OpenAI](https://platform.openai.com/docs/guides/your-data).
+- API Anthropic: Żądania są przechowywane przez 30 dni zgodnie z [Zasadami dotyczącymi danych Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage).
---
## Dla zespołów
-Zen świetnie sprawdza się także w zespołach. Możesz zapraszać członków zespołu, przypisywać role, zarządzać
+Zen działa świetnie także dla zespołów. Możesz zapraszać członków zespołu, przypisywać role, dobierać
modele, z których korzysta Twój zespół i nie tylko.
:::note
Obszary robocze są obecnie bezpłatne dla zespołów w ramach wersji beta.
:::
-Zarządzanie obszarem roboczym jest obecnie bezpłatne dla zespołów w ramach wersji beta. Będziemy
-wkrótce udostępnimy więcej szczegółów na temat cen.
+Zarządzanie obszarem roboczym jest obecnie bezpłatne dla zespołów w ramach wersji beta.
+Wkrótce udostępnimy więcej szczegółów na temat cen.
---
@@ -221,8 +236,8 @@ wkrótce udostępnimy więcej szczegółów na temat cen.
Możesz zapraszać członków zespołu do swojego obszaru roboczego i przypisywać role:
-- **Administrator**: Zarządzaj modelami, członkami, kluczami API i rozliczeniami
-- **Członek**: Zarządzaj tylko własnymi kluczami API
+- **Admin**: Zarządzanie modelami, członkami, kluczami API i rozliczeniami
+- **Członek**: Zarządzanie tylko własnymi kluczami API
Administratorzy mogą także ustawić miesięczne limity wydatków dla każdego członka, aby utrzymać koszty pod kontrolą.
@@ -233,7 +248,7 @@ Administratorzy mogą także ustawić miesięczne limity wydatków dla każdego
Administratorzy mogą włączać i wyłączać określone modele w obszarze roboczym. Żądania skierowane do wyłączonego modelu zwrócą błąd.
Jest to przydatne w przypadkach, gdy chcesz wyłączyć korzystanie z modelu, który
-collects data.
+zbiera dane.
---
@@ -253,6 +268,6 @@ i chcesz go używać zamiast tego, który zapewnia Zen.
Stworzyliśmy OpenCode Zen, aby:
1. **Testować** (Benchmark) najlepsze modele/dostawców dla agentów kodujących.
-2. Miej dostęp do opcji **najwyższej jakości**, a nie obniżaj wydajności ani nie kieruj się do tańszych dostawców.
-3. Przekaż wszelkie **obniżki cen**, sprzedając po kosztach; więc jedyną marżą jest pokrycie naszych opłat manipulacyjnych.
-4. Nie **nie blokuj**, umożliwiając używanie go z dowolnym innym agentem kodującym. I zawsze pozwalaj na korzystanie z opencode dowolnego innego dostawcy.
+2. Mieć dostęp do opcji **najwyższej jakości**, a nie obniżać wydajności ani nie kierować do tańszych dostawców.
+3. Przekazywać wszelkie **obniżki cen**, sprzedając po kosztach; więc jedyną marżą jest pokrycie naszych opłat manipulacyjnych.
+4. Nie **mieć blokady** (no lock-in), umożliwiając używanie go z dowolnym innym agentem kodującym. I zawsze pozwalać na korzystanie z dowolnego innego dostawcy w OpenCode.
diff --git a/packages/web/src/content/docs/pt-br/zen.mdx b/packages/web/src/content/docs/pt-br/zen.mdx
index ba029fb7fc..1ed92cbd78 100644
--- a/packages/web/src/content/docs/pt-br/zen.mdx
+++ b/packages/web/src/content/docs/pt-br/zen.mdx
@@ -55,6 +55,7 @@ Você também pode acessar nossos modelos através dos seguintes endpoints da AP
| Modelo | ID do Modelo | Endpoint | Pacote AI SDK |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ Nós suportamos um modelo de pagamento conforme o uso. Abaixo estão os preços
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ Por exemplo, digamos que você defina um limite de uso mensal de $20, o Zen não
---
+### Modelos obsoletos
+
+| Modelo | Data de descontinuação |
+| ---------------- | ---------------------- |
+| Qwen3 Coder 480B | 6 de fev. de 2026 |
+| Kimi K2 Thinking | 6 de mar. de 2026 |
+| Kimi K2 | 6 de mar. de 2026 |
+| MiniMax M2.1 | 15 de mar. de 2026 |
+| GLM 4.7 | 15 de mar. de 2026 |
+| GLM 4.6 | 15 de mar. de 2026 |
+
+---
+
## Privacidade
Todos os nossos modelos estão hospedados nos EUA. Nossos provedores seguem uma política de zero retenção e não usam seus dados para treinamento de modelos, com as seguintes exceções:
diff --git a/packages/web/src/content/docs/ru/zen.mdx b/packages/web/src/content/docs/ru/zen.mdx
index 078d1a3819..dff843d034 100644
--- a/packages/web/src/content/docs/ru/zen.mdx
+++ b/packages/web/src/content/docs/ru/zen.mdx
@@ -63,6 +63,7 @@ OpenCode Zen работает так же, как и любой другой п
| Модель | Идентификатор модели | Конечная точка | Пакет AI SDK |
| ------------------ | -------------------- | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -146,6 +147,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200 тыс. токенов) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200 тыс. токенов) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -191,6 +193,19 @@ https://opencode.ai/zen/v1/models
---
+### Устаревшие модели
+
+| Модель | Дата отключения |
+| ---------------- | ---------------- |
+| Qwen3 Coder 480B | 6 февр. 2026 г. |
+| Kimi K2 Thinking | 6 марта 2026 г. |
+| Kimi K2 | 6 марта 2026 г. |
+| MiniMax M2.1 | 15 марта 2026 г. |
+| GLM 4.7 | 15 марта 2026 г. |
+| GLM 4.6 | 15 марта 2026 г. |
+
+---
+
## Конфиденциальность
Все наши модели размещены в США. Наши поставщики придерживаются политики нулевого хранения и не используют ваши данные для обучения моделей, за следующими исключениями:
diff --git a/packages/web/src/content/docs/th/zen.mdx b/packages/web/src/content/docs/th/zen.mdx
index 7b9f172756..36b2090807 100644
--- a/packages/web/src/content/docs/th/zen.mdx
+++ b/packages/web/src/content/docs/th/zen.mdx
@@ -64,6 +64,7 @@ OpenCode Zen ทำงานเหมือนกับผู้ให้บร
| Model | Model ID | Endpoint | แพ็คเกจ AI SDK |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -121,12 +122,12 @@ https://opencode.ai/zen/v1/models
| --------------------------------- | ---------- | -------- | ------- | ---------- |
| Big Pickle | ฟรี | ฟรี | ฟรี | - |
| MiniMax M2.5 Free | ฟรี | ฟรี | ฟรี | - |
-| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | - |
+| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
| GLM 5 | $1.00 | $3.20 | $0.20 | - |
| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
-| Kimi K2.5 | $0.60 | $3.00 | $0.08 | - |
+| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
| Kimi K2 | $0.40 | $2.50 | - | - |
| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
@@ -147,6 +148,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -192,11 +194,24 @@ https://opencode.ai/zen/v1/models
---
+### โมเดลที่เลิกใช้แล้ว
+
+| Model | วันที่เลิกใช้ |
+| ---------------- | ------------- |
+| Qwen3 Coder 480B | 6 ก.พ. 2026 |
+| Kimi K2 Thinking | 6 มี.ค. 2026 |
+| Kimi K2 | 6 มี.ค. 2026 |
+| MiniMax M2.1 | 15 มี.ค. 2026 |
+| GLM 4.7 | 15 มี.ค. 2026 |
+| GLM 4.6 | 15 มี.ค. 2026 |
+
+---
+
## ความเป็นส่วนตัว
โมเดลทั้งหมดของเราโฮสต์ในสหรัฐอเมริกา ผู้ให้บริการของเราปฏิบัติตามนโยบายการเก็บรักษาเป็นศูนย์ และไม่ใช้ข้อมูลของคุณสำหรับการฝึกโมเดล โดยมีข้อยกเว้นต่อไปนี้:
-- Big Pickle: ในช่วงระยะเวลาว่าง ข้อมูลที่รวบรวมอาจนำไปใช้ในการปรับปรุงโมเดลได้
+- Big Pickle: ในช่วงระยะเวลาฟรี ข้อมูลที่รวบรวมอาจนำไปใช้ในการปรับปรุงโมเดลได้
- MiniMax M2.5 Free: ในช่วงระยะเวลาฟรี ข้อมูลที่รวบรวมอาจนำไปใช้ในการปรับปรุงโมเดล
- OpenAI API: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [นโยบายข้อมูลของ OpenAI](https://platform.openai.com/docs/guides/your-data)
- Anthropic API: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [นโยบายข้อมูลของ Anthropic](https://docs.anthropic.com/en/docs/claude-code/data-usage)
diff --git a/packages/web/src/content/docs/tr/zen.mdx b/packages/web/src/content/docs/tr/zen.mdx
index 9582a7b7dc..2b79bb9625 100644
--- a/packages/web/src/content/docs/tr/zen.mdx
+++ b/packages/web/src/content/docs/tr/zen.mdx
@@ -55,6 +55,7 @@ Modellerimize aşağıdaki API uç noktaları aracılığıyla da erişebilirsin
| Model | Model ID | Endpoint | AI SDK Package |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ Kullandıkça öde modelini destekliyoruz. Aşağıda **1 milyon token başına*
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ Ayrıca tüm çalışma alanı ve ekibinizin her üyesi için aylık kullanım l
---
+### Kullanımdan kaldırılan modeller
+
+| Model | Kullanımdan kaldırılma tarihi |
+| ---------------- | ----------------------------- |
+| Qwen3 Coder 480B | 6 Şub 2026 |
+| Kimi K2 Thinking | 6 Mar 2026 |
+| Kimi K2 | 6 Mar 2026 |
+| MiniMax M2.1 | 15 Mar 2026 |
+| GLM 4.7 | 15 Mar 2026 |
+| GLM 4.6 | 15 Mar 2026 |
+
+---
+
## Gizlilik
Tüm modellerimiz ABD'de barındırılmaktadır. Sağlayıcılarımız sıfır saklama politikasını izler ve aşağıdaki istisnalar dışında verilerinizi model eğitimi için kullanmaz:
diff --git a/packages/web/src/content/docs/zen.mdx b/packages/web/src/content/docs/zen.mdx
index 5ed2125cb1..330f90014d 100644
--- a/packages/web/src/content/docs/zen.mdx
+++ b/packages/web/src/content/docs/zen.mdx
@@ -62,44 +62,47 @@ You are charged per request and you can add credits to your account.
You can also access our models through the following API endpoints.
-| Model | Model ID | Endpoint | AI SDK Package |
-| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
-| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
-| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
-| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
-| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
-| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
-| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
-| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Model | Model ID | Endpoint | AI SDK Package |
+| ------------------- | ------------------- | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 Pro | gpt-5.4-pro | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.3 Codex Spark | gpt-5.3-codex-spark | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.1 | gpt-5.1 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.1 Codex | gpt-5.1-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.1 Codex Max | gpt-5.1-codex-max | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5.1 Codex Mini | gpt-5.1-codex-mini | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5 | gpt-5 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5 Codex | gpt-5-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| GPT 5 Nano | gpt-5-nano | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
+| Claude Opus 4.6 | claude-opus-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Opus 4.5 | claude-opus-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Opus 4.1 | claude-opus-4-1 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Sonnet 4.6 | claude-sonnet-4-6 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Sonnet 4.5 | claude-sonnet-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Sonnet 4 | claude-sonnet-4 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Haiku 4.5 | claude-haiku-4-5 | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Claude Haiku 3.5 | claude-3-5-haiku | `https://opencode.ai/zen/v1/messages` | `@ai-sdk/anthropic` |
+| Gemini 3.1 Pro | gemini-3.1-pro | `https://opencode.ai/zen/v1/models/gemini-3.1-pro` | `@ai-sdk/google` |
+| Gemini 3 Pro | gemini-3-pro | `https://opencode.ai/zen/v1/models/gemini-3-pro` | `@ai-sdk/google` |
+| Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` |
+| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| MiniMax M2.1 | minimax-m2.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM 4.7 | glm-4.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| GLM 4.6 | glm-4.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2 Thinking | kimi-k2-thinking | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Kimi K2 | kimi-k2 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Qwen3 Coder 480B | qwen3-coder | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
+| Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` |
The [model id](/docs/config/#models) in your OpenCode config
-uses the format `opencode/`. For example, for GPT 5.2 Codex, you would
-use `opencode/gpt-5.2-codex` in your config.
+uses the format `opencode/`. For example, for GPT 5.3 Codex, you would
+use `opencode/gpt-5.3-codex` in your config.
---
@@ -117,46 +120,49 @@ https://opencode.ai/zen/v1/models
We support a pay-as-you-go model. Below are the prices **per 1M tokens**.
-| Model | Input | Output | Cached Read | Cached Write |
-| --------------------------------- | ------ | ------ | ----------- | ------------ |
-| Big Pickle | Free | Free | Free | - |
-| MiniMax M2.5 Free | Free | Free | Free | - |
-| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
-| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
-| GLM 5 | $1.00 | $3.20 | $0.20 | - |
-| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
-| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
-| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
-| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
-| Kimi K2 | $0.40 | $2.50 | - | - |
-| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
-| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
-| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
-| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
-| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
-| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
-| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
-| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
-| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
-| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
-| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
-| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
-| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
-| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
-| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
-| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
-| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
-| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
-| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
-| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
-| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
-| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
-| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
-| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
-| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
-| GPT 5 | $1.07 | $8.50 | $0.107 | - |
-| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
-| GPT 5 Nano | Free | Free | Free | - |
+| Model | Input | Output | Cached Read | Cached Write |
+| --------------------------------- | ------ | ------- | ----------- | ------------ |
+| Big Pickle | Free | Free | Free | - |
+| MiniMax M2.5 Free | Free | Free | Free | - |
+| MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 |
+| MiniMax M2.1 | $0.30 | $1.20 | $0.10 | - |
+| GLM 5 | $1.00 | $3.20 | $0.20 | - |
+| GLM 4.7 | $0.60 | $2.20 | $0.10 | - |
+| GLM 4.6 | $0.60 | $2.20 | $0.10 | - |
+| Kimi K2.5 | $0.60 | $3.00 | $0.10 | - |
+| Kimi K2 Thinking | $0.40 | $2.50 | - | - |
+| Kimi K2 | $0.40 | $2.50 | - | - |
+| Qwen3 Coder 480B | $0.45 | $1.50 | - | - |
+| Claude Opus 4.6 (≤ 200K tokens) | $5.00 | $25.00 | $0.50 | $6.25 |
+| Claude Opus 4.6 (> 200K tokens) | $10.00 | $37.50 | $1.00 | $12.50 |
+| Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 |
+| Claude Opus 4.1 | $15.00 | $75.00 | $1.50 | $18.75 |
+| Claude Sonnet 4.6 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
+| Claude Sonnet 4.6 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
+| Claude Sonnet 4.5 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
+| Claude Sonnet 4.5 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
+| Claude Sonnet 4 (≤ 200K tokens) | $3.00 | $15.00 | $0.30 | $3.75 |
+| Claude Sonnet 4 (> 200K tokens) | $6.00 | $22.50 | $0.60 | $7.50 |
+| Claude Haiku 4.5 | $1.00 | $5.00 | $0.10 | $1.25 |
+| Claude Haiku 3.5 | $0.80 | $4.00 | $0.08 | $1.00 |
+| Gemini 3.1 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
+| Gemini 3.1 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
+| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
+| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
+| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 Pro | $30.00 | $180.00 | $30.00 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
+| GPT 5.3 Codex Spark | $1.75 | $14.00 | $0.175 | - |
+| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
+| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
+| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
+| GPT 5.1 | $1.07 | $8.50 | $0.107 | - |
+| GPT 5.1 Codex | $1.07 | $8.50 | $0.107 | - |
+| GPT 5.1 Codex Max | $1.25 | $10.00 | $0.125 | - |
+| GPT 5.1 Codex Mini | $0.25 | $2.00 | $0.025 | - |
+| GPT 5 | $1.07 | $8.50 | $0.107 | - |
+| GPT 5 Codex | $1.07 | $8.50 | $0.107 | - |
+| GPT 5 Nano | Free | Free | Free | - |
You might notice _Claude Haiku 3.5_ in your usage history. This is a [low cost model](/docs/config/#models) that's used to generate the titles of your sessions.
diff --git a/packages/web/src/content/docs/zh-cn/zen.mdx b/packages/web/src/content/docs/zh-cn/zen.mdx
index 0c6c6b9d95..098fb5e35b 100644
--- a/packages/web/src/content/docs/zh-cn/zen.mdx
+++ b/packages/web/src/content/docs/zh-cn/zen.mdx
@@ -55,6 +55,7 @@ OpenCode Zen 的工作方式与 OpenCode 中的任何其他提供商相同。
| 模型 | 模型 ID | 端点 | AI SDK 包 |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K tokens) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K tokens) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ https://opencode.ai/zen/v1/models
---
+### 已弃用模型
+
+| 模型 | 弃用日期 |
+| ---------------- | ------------------ |
+| Qwen3 Coder 480B | 2026 年 2 月 6 日 |
+| Kimi K2 Thinking | 2026 年 3 月 6 日 |
+| Kimi K2 | 2026 年 3 月 6 日 |
+| MiniMax M2.1 | 2026 年 3 月 15 日 |
+| GLM 4.7 | 2026 年 3 月 15 日 |
+| GLM 4.6 | 2026 年 3 月 15 日 |
+
+---
+
## 隐私
我们所有的模型都托管在美国。我们的提供商遵循零保留政策,不会将你的数据用于模型训练,但以下情况除外:
diff --git a/packages/web/src/content/docs/zh-tw/zen.mdx b/packages/web/src/content/docs/zh-tw/zen.mdx
index c38188280b..c0ef9d03bd 100644
--- a/packages/web/src/content/docs/zh-tw/zen.mdx
+++ b/packages/web/src/content/docs/zh-tw/zen.mdx
@@ -55,6 +55,7 @@ OpenCode Zen 的工作方式與 OpenCode 中的任何其他供應商相同。
| 模型 | 模型 ID | 端點 | AI SDK 套件 |
| ------------------ | ------------------ | -------------------------------------------------- | --------------------------- |
+| GPT 5.4 | gpt-5.4 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.3 Codex | gpt-5.3-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 | gpt-5.2 | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
| GPT 5.2 Codex | gpt-5.2-codex | `https://opencode.ai/zen/v1/responses` | `@ai-sdk/openai` |
@@ -136,6 +137,7 @@ https://opencode.ai/zen/v1/models
| Gemini 3 Pro (≤ 200K Token) | $2.00 | $12.00 | $0.20 | - |
| Gemini 3 Pro (> 200K Token) | $4.00 | $18.00 | $0.40 | - |
| Gemini 3 Flash | $0.50 | $3.00 | $0.05 | - |
+| GPT 5.4 | $2.50 | $15.00 | $0.25 | - |
| GPT 5.3 Codex | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 | $1.75 | $14.00 | $0.175 | - |
| GPT 5.2 Codex | $1.75 | $14.00 | $0.175 | - |
@@ -178,6 +180,19 @@ https://opencode.ai/zen/v1/models
---
+### 已棄用的模型
+
+| 模型 | 棄用日期 |
+| ---------------- | ------------------ |
+| Qwen3 Coder 480B | 2026 年 2 月 6 日 |
+| Kimi K2 Thinking | 2026 年 3 月 6 日 |
+| Kimi K2 | 2026 年 3 月 6 日 |
+| MiniMax M2.1 | 2026 年 3 月 15 日 |
+| GLM 4.7 | 2026 年 3 月 15 日 |
+| GLM 4.6 | 2026 年 3 月 15 日 |
+
+---
+
## 隱私
我們所有的模型都託管在美國。我們的供應商遵循零保留政策,不會將你的資料用於模型訓練,但以下情況除外:
diff --git a/sdks/vscode/package.json b/sdks/vscode/package.json
index a15de9fde4..64307f053d 100644
--- a/sdks/vscode/package.json
+++ b/sdks/vscode/package.json
@@ -2,7 +2,7 @@
"name": "opencode",
"displayName": "opencode",
"description": "opencode for VS Code",
- "version": "1.2.17",
+ "version": "1.2.18",
"publisher": "sst-dev",
"repository": {
"type": "git",
diff --git a/turbo.json b/turbo.json
index 4d31bc472b..57e4f11953 100644
--- a/turbo.json
+++ b/turbo.json
@@ -1,5 +1,5 @@
{
- "$schema": "https://turborepo.com/schema.json",
+ "$schema": "https://v2-8-13.turborepo.dev/schema.json",
"globalEnv": ["CI", "OPENCODE_DISABLE_SHARE"],
"globalPassThroughEnv": ["CI", "OPENCODE_DISABLE_SHARE"],
"tasks": {