diff --git a/.opencode/opencode.jsonc b/.opencode/opencode.jsonc
index dc0bee7c3f..88adf3762b 100644
--- a/.opencode/opencode.jsonc
+++ b/.opencode/opencode.jsonc
@@ -10,17 +10,5 @@
"options": {},
},
},
- "mcp": {
- "exa": {
- "type": "remote",
- "url": "https://mcp.exa.ai/mcp",
- },
- "morph": {
- "type": "local",
- "command": ["bunx", "@morphllm/morphmcp"],
- "environment": {
- "ENABLED_TOOLS": "warp_grep",
- },
- },
- },
+ "mcp": {},
}
diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts
index 94127e51ce..04f50b0316 100644
--- a/packages/opencode/src/agent/agent.ts
+++ b/packages/opencode/src/agent/agent.ts
@@ -157,6 +157,51 @@ export namespace Agent {
mode: "primary",
builtIn: true,
},
+ summary: {
+ name: "summary",
+ mode: "subagent",
+ options: {},
+ builtIn: true,
+ permission: agentPermission,
+ prompt: `You are a title generator. You output ONLY a thread title. Nothing else.
+
+
+Generate a brief title that would help the user find this conversation later.
+
+Follow all rules in
+Use the so you know what a good title looks like.
+Your output must be:
+- A single line
+- ≤50 characters
+- No explanations
+
+
+
+- Focus on the main topic or question the user needs to retrieve
+- Use -ing verbs for actions (Debugging, Implementing, Analyzing)
+- Keep exact: technical terms, numbers, filenames, HTTP codes
+- Remove: the, this, my, a, an
+- Never assume tech stack
+- Never use tools
+- NEVER respond to questions, just generate a title for the conversation
+- The title should NEVER include "summarizing" or "generating" when generating a title
+- DO NOT SAY YOU CANNOT GENERATE A TITLE OR COMPLAIN ABOUT THE INPUT
+- Always output something meaningful, even if the input is minimal.
+- If the user message is short or conversational (e.g. “hello”, “lol”, “whats up”, “hey”):
+ → create a title that reflects the user’s tone or intent (such as Greeting, Quick check-in, Light chat, Intro message, etc.)
+
+
+
+"hey" -> Greeting
+"debug 500 errors in production" → Debugging production 500 errors
+"refactor user service" → Refactoring user service
+"why is app.js failing" → Analyzing app.js failure
+"implement rate limiting" → Implementing rate limiting
+"how do I connect postgres to my API" → Connecting Postgres to API
+"best practices for React hooks" → React hooks best practices
+`,
+ tools: {},
+ },
plan: {
name: "plan",
options: {},
diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts
index 54fe37ed4e..de3c3dca32 100644
--- a/packages/opencode/src/session/compaction.ts
+++ b/packages/opencode/src/session/compaction.ts
@@ -129,7 +129,7 @@ export namespace SessionCompaction {
})
const agent = await Agent.get(input.agent)
const result = await processor.process({
- requestID: input.parentID,
+ user: input.messages.findLast((m) => m.info.id === input.parentID)!.info as MessageV2.User,
agent,
abort: input.abort,
sessionID: input.sessionID,
diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts
index cda310e3a9..8277f36b1a 100644
--- a/packages/opencode/src/session/llm.ts
+++ b/packages/opencode/src/session/llm.ts
@@ -8,6 +8,7 @@ import { Instance } from "@/project/instance"
import type { Agent } from "@/agent/agent"
import type { MessageV2 } from "./message-v2"
import { Plugin } from "@/plugin"
+import { SystemPrompt } from "./system"
export namespace LLM {
const log = Log.create({ service: "llm" })
@@ -22,6 +23,7 @@ export namespace LLM {
system: string[]
abort: AbortSignal
messages: ModelMessage[]
+ small?: boolean
tools: Record
retries?: number
}
@@ -29,9 +31,19 @@ export namespace LLM {
export type StreamOutput = StreamTextResult
export async function stream(input: StreamInput) {
+ const l = log
+ .clone()
+ .tag("providerID", input.model.providerID)
+ .tag("modelID", input.model.id)
+ .tag("sessionID", input.sessionID)
+ .tag("small", (input.small ?? false).toString())
+ l.info("stream", {
+ modelID: input.model.id,
+ providerID: input.model.providerID,
+ })
const [language, cfg] = await Promise.all([Provider.getLanguage(input.model), Config.get()])
- const [first, ...rest] = input.system
+ const [first, ...rest] = [...SystemPrompt.header(input.model.providerID), ...input.system]
const system = [first, rest.join("\n")]
const params = await Plugin.trigger(
@@ -49,13 +61,18 @@ export namespace LLM {
: undefined,
topP: input.agent.topP ?? ProviderTransform.topP(input.model),
options: pipe(
- ProviderTransform.options(input.model, input.sessionID),
+ mergeDeep(ProviderTransform.options(input.model, input.sessionID)),
+ input.small ? mergeDeep(ProviderTransform.smallOptions(input.model)) : mergeDeep({}),
mergeDeep(input.model.options),
mergeDeep(input.agent.options),
),
},
)
+ l.info("params", {
+ params,
+ })
+
const maxOutputTokens = ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
@@ -65,14 +82,14 @@ export namespace LLM {
return streamText({
onError(error) {
- log.error("stream error", {
+ l.error("stream error", {
error,
})
},
async experimental_repairToolCall(failed) {
const lower = failed.toolCall.toolName.toLowerCase()
if (lower !== failed.toolCall.toolName && input.tools[lower]) {
- log.info("repairing tool call", {
+ l.info("repairing tool call", {
tool: failed.toolCall.toolName,
repaired: lower,
})
@@ -94,6 +111,7 @@ export namespace LLM {
topP: params.topP,
providerOptions: ProviderTransform.providerOptions(input.model, params.options, input.messages),
activeTools: Object.keys(input.tools).filter((x) => x !== "invalid"),
+ tools: input.tools,
maxOutputTokens,
abortSignal: input.abort,
headers: {
diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts
index 96a0435deb..8c4f4ecbad 100644
--- a/packages/opencode/src/session/prompt.ts
+++ b/packages/opencode/src/session/prompt.ts
@@ -41,6 +41,8 @@ import { fn } from "@/util/fn"
import { SessionProcessor } from "./processor"
import { TaskTool } from "@/tool/task"
import { SessionStatus } from "./status"
+import { LLM } from "./llm"
+import { iife } from "@/util/iife"
// @ts-ignore
globalThis.AI_SDK_LOG_WARNINGS = false
@@ -281,7 +283,6 @@ export namespace SessionPrompt {
})
const model = await Provider.getModel(lastUser.model.providerID, lastUser.model.modelID)
- const language = await Provider.getLanguage(model)
const task = tasks.pop()
// pending subtask
@@ -427,7 +428,6 @@ export namespace SessionPrompt {
}
// normal processing
- const cfg = await Config.get()
const agent = await Agent.get(lastUser.agent)
const maxSteps = agent.maxSteps ?? Infinity
const isLastStep = step >= maxSteps
@@ -435,6 +435,7 @@ export namespace SessionPrompt {
messages: msgs,
agent,
})
+
const processor = SessionProcessor.create({
assistantMessage: (await Session.updateMessage({
id: Identifier.ascending("message"),
@@ -467,7 +468,6 @@ export namespace SessionPrompt {
model,
agent,
system: lastUser.system,
- isLastStep,
})
const tools = await resolveTools({
agent,
@@ -526,13 +526,9 @@ export namespace SessionPrompt {
return Provider.defaultModel()
}
- async function resolveSystemPrompt(input: {
- system?: string
- agent: Agent.Info
- model: Provider.Model
- isLastStep?: boolean
- }) {
- let system = SystemPrompt.header(input.model.providerID)
+ async function resolveSystemPrompt(input: { system?: string; agent: Agent.Info; model: Provider.Model }) {
+ using _ = log.time("system")
+ let system = []
system.push(
...(() => {
if (input.system) return [input.system]
@@ -542,14 +538,6 @@ export namespace SessionPrompt {
)
system.push(...(await SystemPrompt.environment()))
system.push(...(await SystemPrompt.custom()))
-
- if (input.isLastStep) {
- system.push(MAX_STEPS)
- }
-
- // max 2 system prompt messages for caching purposes
- const [first, ...rest] = system
- system = [first, rest.join("\n")]
return system
}
@@ -560,6 +548,7 @@ export namespace SessionPrompt {
tools?: Record
processor: SessionProcessor.Info
}) {
+ using _ = log.time("resolveTools")
const tools: Record = {}
const enabledTools = pipe(
input.agent.tools,
@@ -1319,28 +1308,24 @@ export namespace SessionPrompt {
input.history.filter((m) => m.info.role === "user" && !m.parts.every((p) => "synthetic" in p && p.synthetic))
.length === 1
if (!isFirst) return
- const cfg = await Config.get()
- const small =
- (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
- const language = await Provider.getLanguage(small)
- const provider = await Provider.getProvider(small.providerID)
- const options = pipe(
- {},
- mergeDeep(ProviderTransform.options(small, input.session.id, provider?.options)),
- mergeDeep(ProviderTransform.smallOptions(small)),
- mergeDeep(small.options),
- )
- await generateText({
- // use higher # for reasoning models since reasoning tokens eat up a lot of the budget
- maxOutputTokens: small.capabilities.reasoning ? 3000 : 20,
- providerOptions: ProviderTransform.providerOptions(small, options, []),
+ const agent = await Agent.get("summary")
+ if (!agent) return
+ const result = await LLM.stream({
+ agent,
+ user: input.message.info as MessageV2.User,
+ system: [agent.prompt!],
+ small: true,
+ tools: {},
+ model: await iife(async () => {
+ if (agent.model) return await Provider.getModel(agent.model.providerID, agent.model.modelID)
+ return (
+ (await Provider.getSmallModel(input.providerID)) ?? (await Provider.getModel(input.providerID, input.modelID))
+ )
+ }),
+ abort: new AbortController().signal,
+ sessionID: input.session.id,
+ retries: 2,
messages: [
- ...SystemPrompt.title(small.providerID).map(
- (x): ModelMessage => ({
- role: "system",
- content: x,
- }),
- ),
{
role: "user",
content: "Generate a title for this conversation:\n",
@@ -1364,32 +1349,19 @@ export namespace SessionPrompt {
},
]),
],
- headers: small.headers,
- model: language,
- experimental_telemetry: {
- isEnabled: cfg.experimental?.openTelemetry,
- metadata: {
- userId: cfg.username ?? "unknown",
- sessionId: input.session.id,
- },
- },
})
- .then((result) => {
- if (result.text)
- return Session.update(input.session.id, (draft) => {
- const cleaned = result.text
- .replace(/[\s\S]*?<\/think>\s*/g, "")
- .split("\n")
- .map((line) => line.trim())
- .find((line) => line.length > 0)
- if (!cleaned) return
+ const text = await result.text.catch((err) => log.error("failed to generate title", { error: err }))
+ if (text)
+ return Session.update(input.session.id, (draft) => {
+ const cleaned = text
+ .replace(/[\s\S]*?<\/think>\s*/g, "")
+ .split("\n")
+ .map((line) => line.trim())
+ .find((line) => line.length > 0)
+ if (!cleaned) return
- const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
- draft.title = title
- })
- })
- .catch((error) => {
- log.error("failed to generate title", { error, model: small.id })
+ const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
+ draft.title = title
})
}
}
diff --git a/packages/opencode/src/tool/bash.ts b/packages/opencode/src/tool/bash.ts
index 6b0b9d4104..cd04814bfe 100644
--- a/packages/opencode/src/tool/bash.ts
+++ b/packages/opencode/src/tool/bash.ts
@@ -50,35 +50,36 @@ const parser = lazy(async () => {
return p
})
+const getShell = lazy(() => {
+ const s = process.env.SHELL
+ if (s) {
+ const basename = path.basename(s)
+ if (!new Set(["fish", "nu"]).has(basename)) {
+ return s
+ }
+ }
+
+ if (process.platform === "darwin") {
+ return "/bin/zsh"
+ }
+
+ if (process.platform === "win32") {
+ // Let Bun / Node pick COMSPEC (usually cmd.exe)
+ // or explicitly:
+ return process.env.COMSPEC || true
+ }
+
+ const bash = Bun.which("bash")
+ if (bash) {
+ return bash
+ }
+
+ return true
+})
+
// TODO: we may wanna rename this tool so it works better on other shells
-
export const BashTool = Tool.define("bash", async () => {
- const shell = iife(() => {
- const s = process.env.SHELL
- if (s) {
- const basename = path.basename(s)
- if (!new Set(["fish", "nu"]).has(basename)) {
- return s
- }
- }
-
- if (process.platform === "darwin") {
- return "/bin/zsh"
- }
-
- if (process.platform === "win32") {
- // Let Bun / Node pick COMSPEC (usually cmd.exe)
- // or explicitly:
- return process.env.COMSPEC || true
- }
-
- const bash = Bun.which("bash")
- if (bash) {
- return bash
- }
-
- return true
- })
+ const shell = getShell()
log.info("bash tool using shell", { shell })
return {
diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts
index 7e440a78aa..647c742671 100644
--- a/packages/opencode/src/tool/registry.ts
+++ b/packages/opencode/src/tool/registry.ts
@@ -21,8 +21,11 @@ import { Plugin } from "../plugin"
import { WebSearchTool } from "./websearch"
import { CodeSearchTool } from "./codesearch"
import { Flag } from "@/flag/flag"
+import { Log } from "@/util/log"
export namespace ToolRegistry {
+ const log = Log.create({ service: "tool.registry" })
+
export const state = Instance.state(async () => {
const custom = [] as Tool.Info[]
const glob = new Bun.Glob("tool/*.{js,ts}")
@@ -119,10 +122,13 @@ export namespace ToolRegistry {
}
return true
})
- .map(async (t) => ({
- id: t.id,
- ...(await t.init()),
- })),
+ .map(async (t) => {
+ using _ = log.time(t.id)
+ return {
+ id: t.id,
+ ...(await t.init()),
+ }
+ }),
)
return result
}