fix(core): prevent agent loop from stopping after tool calls with OpenAI-compatible providers (#14973)

Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com>
Co-authored-by: Aiden Cline <aidenpcline@gmail.com>
pull/19953/head
Valentin Vivaldi 2026-04-01 23:34:01 -03:00 committed by GitHub
parent 2e8e278441
commit 733a3bd031
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 40 additions and 2 deletions

View File

@ -1362,9 +1362,18 @@ NOTE: At any point in time through this workflow you should feel free to ask the
} }
if (!lastUser) throw new Error("No user message found in stream. This should never happen.") if (!lastUser) throw new Error("No user message found in stream. This should never happen.")
const lastAssistantMsg = msgs.findLast(
(msg) => msg.info.role === "assistant" && msg.info.id === lastAssistant?.id,
)
// Some providers return "stop" even when the assistant message contains tool calls.
// Keep the loop running so tool results can be sent back to the model.
const hasToolCalls = lastAssistantMsg?.parts.some((part) => part.type === "tool") ?? false
if ( if (
lastAssistant?.finish && lastAssistant?.finish &&
!["tool-calls"].includes(lastAssistant.finish) && !["tool-calls"].includes(lastAssistant.finish) &&
!hasToolCalls &&
lastUser.id < lastAssistant.id lastUser.id < lastAssistant.id
) { ) {
log.info("exiting loop", { sessionID }) log.info("exiting loop", { sessionID })

View File

@ -3,7 +3,6 @@ import { expect, spyOn } from "bun:test"
import { Cause, Effect, Exit, Fiber, Layer } from "effect" import { Cause, Effect, Exit, Fiber, Layer } from "effect"
import path from "path" import path from "path"
import z from "zod" import z from "zod"
import type { Agent } from "../../src/agent/agent"
import { Agent as AgentSvc } from "../../src/agent/agent" import { Agent as AgentSvc } from "../../src/agent/agent"
import { Bus } from "../../src/bus" import { Bus } from "../../src/bus"
import { Command } from "../../src/command" import { Command } from "../../src/command"
@ -35,7 +34,7 @@ import { Log } from "../../src/util/log"
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner" import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture" import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture"
import { testEffect } from "../lib/effect" import { testEffect } from "../lib/effect"
import { TestLLMServer } from "../lib/llm-server" import { reply, TestLLMServer } from "../lib/llm-server"
Log.init({ print: false }) Log.init({ print: false })
@ -453,6 +452,36 @@ it.live("loop continues when finish is tool-calls", () =>
), ),
) )
it.live("loop continues when finish is stop but assistant has tool parts", () =>
provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) {
const prompt = yield* SessionPrompt.Service
const sessions = yield* Session.Service
const session = yield* sessions.create({
title: "Pinned",
permission: [{ permission: "*", pattern: "*", action: "allow" }],
})
yield* prompt.prompt({
sessionID: session.id,
agent: "build",
noReply: true,
parts: [{ type: "text", text: "hello" }],
})
yield* llm.push(reply().tool("first", { value: "first" }).stop())
yield* llm.text("second")
const result = yield* prompt.loop({ sessionID: session.id })
expect(yield* llm.calls).toBe(2)
expect(result.info.role).toBe("assistant")
if (result.info.role === "assistant") {
expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true)
expect(result.info.finish).toBe("stop")
}
}),
{ git: true, config: providerCfg },
),
)
it.live("failed subtask preserves metadata on error tool state", () => it.live("failed subtask preserves metadata on error tool state", () =>
provideTmpdirServer( provideTmpdirServer(
Effect.fnUntraced(function* ({ llm }) { Effect.fnUntraced(function* ({ llm }) {