test: finish HTTP mock processor coverage

test/processor-mock-server
Kit Langton 2026-03-31 20:36:18 -04:00
parent 7f6a5bb2c8
commit 8d2385ad49
2 changed files with 371 additions and 395 deletions

View File

@ -1,39 +1,12 @@
import { NodeHttpServer } from "@effect/platform-node"
import { NodeHttpServer, NodeHttpServerRequest } from "@effect/platform-node"
import * as Http from "node:http"
import { Deferred, Effect, Layer, ServiceMap, Stream } from "effect"
import * as HttpServer from "effect/unstable/http/HttpServer"
import { HttpRouter, HttpServerRequest, HttpServerResponse } from "effect/unstable/http"
type Usage = { input: number; output: number }
export type Usage = { input: number; output: number }
type Step =
| {
type: "text"
text: string
usage?: Usage
}
| {
type: "tool"
tool: string
input: unknown
}
| {
type: "fail"
message: string
}
| {
type: "httpError"
status: number
body: unknown
}
| {
type: "hang"
}
| {
type: "hold"
text: string
wait: PromiseLike<unknown>
}
type Line = Record<string, unknown>
type Hit = {
url: URL
@ -45,163 +18,293 @@ type Wait = {
ready: Deferred.Deferred<void>
}
function sse(lines: unknown[]) {
return HttpServerResponse.stream(
Stream.fromIterable([
[...lines.map((line) => `data: ${JSON.stringify(line)}`), "data: [DONE]"].join("\n\n") + "\n\n",
]).pipe(Stream.encodeText),
{ contentType: "text/event-stream" },
)
type Sse = {
type: "sse"
head: unknown[]
tail: unknown[]
wait?: PromiseLike<unknown>
hang?: boolean
error?: unknown
reset?: boolean
}
function text(step: Extract<Step, { type: "text" }>) {
const finish: Record<string, unknown> = {
type HttpError = {
type: "http-error"
status: number
body: unknown
}
export type Item = Sse | HttpError
const done = Symbol("done")
function line(input: unknown) {
if (input === done) return "data: [DONE]\n\n"
return `data: ${JSON.stringify(input)}\n\n`
}
function tokens(input?: Usage) {
if (!input) return
return {
prompt_tokens: input.input,
completion_tokens: input.output,
total_tokens: input.input + input.output,
}
}
function chunk(input: { delta?: Record<string, unknown>; finish?: string; usage?: Usage }) {
return {
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: {}, finish_reason: "stop" }],
}
if (step.usage) {
finish.usage = {
prompt_tokens: step.usage.input,
completion_tokens: step.usage.output,
total_tokens: step.usage.input + step.usage.output,
}
}
return sse([
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: { role: "assistant" } }],
},
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: { content: step.text } }],
},
finish,
])
choices: [
{
delta: input.delta ?? {},
...(input.finish ? { finish_reason: input.finish } : {}),
},
],
...(input.usage ? { usage: tokens(input.usage) } : {}),
} satisfies Line
}
function httpError(step: Extract<Step, { type: "httpError" }>) {
return HttpServerResponse.text(JSON.stringify(step.body), {
status: step.status,
function role() {
return chunk({ delta: { role: "assistant" } })
}
function textLine(value: string) {
return chunk({ delta: { content: value } })
}
function reasonLine(value: string) {
return chunk({ delta: { reasoning_content: value } })
}
function finishLine(reason: string, usage?: Usage) {
return chunk({ finish: reason, usage })
}
function toolStartLine(id: string, name: string) {
return chunk({
delta: {
tool_calls: [
{
index: 0,
id,
type: "function",
function: {
name,
arguments: "",
},
},
],
},
})
}
function toolArgsLine(value: string) {
return chunk({
delta: {
tool_calls: [
{
index: 0,
function: {
arguments: value,
},
},
],
},
})
}
function bytes(input: Iterable<unknown>) {
return Stream.fromIterable([...input].map(line)).pipe(Stream.encodeText)
}
function send(item: Sse) {
const head = bytes(item.head)
const tail = bytes([...item.tail, ...(item.hang || item.error ? [] : [done])])
const empty = Stream.fromIterable<Uint8Array>([])
const wait = item.wait
const body: Stream.Stream<Uint8Array, unknown> = wait
? Stream.concat(head, Stream.fromEffect(Effect.promise(() => wait)).pipe(Stream.flatMap(() => tail)))
: Stream.concat(head, tail)
let end: Stream.Stream<Uint8Array, unknown> = empty
if (item.error) end = Stream.concat(empty, Stream.fail(item.error))
else if (item.hang) end = Stream.concat(empty, Stream.never)
return HttpServerResponse.stream(Stream.concat(body, end), { contentType: "text/event-stream" })
}
const reset = Effect.fn("TestLLMServer.reset")(function* (item: Sse) {
const req = yield* HttpServerRequest.HttpServerRequest
const res = NodeHttpServerRequest.toServerResponse(req)
yield* Effect.sync(() => {
res.writeHead(200, { "content-type": "text/event-stream" })
for (const part of item.head) res.write(line(part))
for (const part of item.tail) res.write(line(part))
res.destroy(new Error("connection reset"))
})
yield* Effect.never
})
function fail(item: HttpError) {
return HttpServerResponse.text(JSON.stringify(item.body), {
status: item.status,
contentType: "application/json",
})
}
function tool(step: Extract<Step, { type: "tool" }>, seq: number) {
const id = `call_${seq}`
const args = JSON.stringify(step.input)
return sse([
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: { role: "assistant" } }],
},
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [
{
delta: {
tool_calls: [
{
index: 0,
id,
type: "function",
function: {
name: step.tool,
arguments: "",
},
},
],
},
},
],
},
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [
{
delta: {
tool_calls: [
{
index: 0,
function: {
arguments: args,
},
},
],
},
},
],
},
{
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: {}, finish_reason: "tool_calls" }],
},
])
export class Reply {
#head: unknown[] = [role()]
#tail: unknown[] = []
#usage: Usage | undefined
#finish: string | undefined
#wait: PromiseLike<unknown> | undefined
#hang = false
#error: unknown
#reset = false
#seq = 0
#id() {
this.#seq += 1
return `call_${this.#seq}`
}
text(value: string) {
this.#tail = [...this.#tail, textLine(value)]
return this
}
reason(value: string) {
this.#tail = [...this.#tail, reasonLine(value)]
return this
}
usage(value: Usage) {
this.#usage = value
return this
}
wait(value: PromiseLike<unknown>) {
this.#wait = value
return this
}
stop() {
this.#finish = "stop"
this.#hang = false
this.#error = undefined
this.#reset = false
return this
}
toolCalls() {
this.#finish = "tool_calls"
this.#hang = false
this.#error = undefined
this.#reset = false
return this
}
tool(name: string, input: unknown) {
const id = this.#id()
const args = JSON.stringify(input)
this.#tail = [...this.#tail, toolStartLine(id, name), toolArgsLine(args)]
return this.toolCalls()
}
pendingTool(name: string, input: unknown) {
const id = this.#id()
const args = JSON.stringify(input)
const size = Math.max(1, Math.floor(args.length / 2))
this.#tail = [...this.#tail, toolStartLine(id, name), toolArgsLine(args.slice(0, size))]
return this
}
hang() {
this.#finish = undefined
this.#hang = true
this.#error = undefined
this.#reset = false
return this
}
streamError(error: unknown = "boom") {
this.#finish = undefined
this.#hang = false
this.#error = error
this.#reset = false
return this
}
reset() {
this.#finish = undefined
this.#hang = false
this.#error = undefined
this.#reset = true
return this
}
item(): Item {
return {
type: "sse",
head: this.#head,
tail: this.#finish ? [...this.#tail, finishLine(this.#finish, this.#usage)] : this.#tail,
wait: this.#wait,
hang: this.#hang,
error: this.#error,
reset: this.#reset,
}
}
}
function fail(step: Extract<Step, { type: "fail" }>) {
return HttpServerResponse.stream(
Stream.fromIterable([
'data: {"id":"chatcmpl-test","object":"chat.completion.chunk","choices":[{"delta":{"role":"assistant"}}]}\n\n',
]).pipe(Stream.encodeText, Stream.concat(Stream.fail(new Error(step.message)))),
{ contentType: "text/event-stream" },
)
export function reply() {
return new Reply()
}
function hang() {
return HttpServerResponse.stream(
Stream.fromIterable([
'data: {"id":"chatcmpl-test","object":"chat.completion.chunk","choices":[{"delta":{"role":"assistant"}}]}\n\n',
]).pipe(Stream.encodeText, Stream.concat(Stream.never)),
{ contentType: "text/event-stream" },
)
export function httpError(status: number, body: unknown): Item {
return {
type: "http-error",
status,
body,
}
}
function hold(step: Extract<Step, { type: "hold" }>) {
return HttpServerResponse.stream(
Stream.fromIterable([
'data: {"id":"chatcmpl-test","object":"chat.completion.chunk","choices":[{"delta":{"role":"assistant"}}]}\n\n',
]).pipe(
Stream.encodeText,
Stream.concat(
Stream.fromEffect(Effect.promise(() => step.wait)).pipe(
Stream.flatMap(() =>
Stream.fromIterable([
`data: ${JSON.stringify({
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: { content: step.text } }],
})}\n\n`,
`data: ${JSON.stringify({
id: "chatcmpl-test",
object: "chat.completion.chunk",
choices: [{ delta: {}, finish_reason: "stop" }],
})}\n\n`,
"data: [DONE]\n\n",
]).pipe(Stream.encodeText),
),
),
),
),
{ contentType: "text/event-stream" },
)
export function raw(input: {
chunks?: unknown[]
head?: unknown[]
tail?: unknown[]
wait?: PromiseLike<unknown>
hang?: boolean
error?: unknown
reset?: boolean
}): Item {
return {
type: "sse",
head: input.head ?? input.chunks ?? [],
tail: input.tail ?? [],
wait: input.wait,
hang: input.hang,
error: input.error,
reset: input.reset,
}
}
function item(input: Item | Reply) {
return input instanceof Reply ? input.item() : input
}
namespace TestLLMServer {
export interface Service {
readonly url: string
readonly push: (...input: (Item | Reply)[]) => Effect.Effect<void>
readonly text: (value: string, opts?: { usage?: Usage }) => Effect.Effect<void>
readonly tool: (tool: string, input: unknown) => Effect.Effect<void>
readonly fail: (message?: string) => Effect.Effect<void>
readonly tool: (name: string, input: unknown) => Effect.Effect<void>
readonly toolHang: (name: string, input: unknown) => Effect.Effect<void>
readonly reason: (value: string, opts?: { text?: string; usage?: Usage }) => Effect.Effect<void>
readonly fail: (message?: unknown) => Effect.Effect<void>
readonly error: (status: number, body: unknown) => Effect.Effect<void>
readonly hang: Effect.Effect<void>
readonly hold: (text: string, wait: PromiseLike<unknown>) => Effect.Effect<void>
readonly hold: (value: string, wait: PromiseLike<unknown>) => Effect.Effect<void>
readonly hits: Effect.Effect<Hit[]>
readonly calls: Effect.Effect<number>
readonly wait: (count: number) => Effect.Effect<void>
@ -218,12 +321,11 @@ export class TestLLMServer extends ServiceMap.Service<TestLLMServer, TestLLMServ
const router = yield* HttpRouter.HttpRouter
let hits: Hit[] = []
let list: Step[] = []
let seq = 0
let list: Item[] = []
let waits: Wait[] = []
const push = (step: Step) => {
list = [...list, step]
const queue = (...input: (Item | Reply)[]) => {
list = [...list, ...input.map(item)]
}
const notify = Effect.fnUntraced(function* () {
@ -234,11 +336,10 @@ export class TestLLMServer extends ServiceMap.Service<TestLLMServer, TestLLMServ
})
const pull = () => {
const step = list[0]
if (!step) return { step: undefined, seq }
seq += 1
const first = list[0]
if (!first) return
list = list.slice(1)
return { step, seq }
return first
}
yield* router.add(
@ -247,22 +348,22 @@ export class TestLLMServer extends ServiceMap.Service<TestLLMServer, TestLLMServ
Effect.gen(function* () {
const req = yield* HttpServerRequest.HttpServerRequest
const next = pull()
if (!next.step) return HttpServerResponse.text("unexpected request", { status: 500 })
const json = yield* req.json.pipe(Effect.orElseSucceed(() => ({})))
if (!next) return HttpServerResponse.text("unexpected request", { status: 500 })
const body = yield* req.json.pipe(Effect.orElseSucceed(() => ({})))
hits = [
...hits,
{
url: new URL(req.originalUrl, "http://localhost"),
body: json && typeof json === "object" ? (json as Record<string, unknown>) : {},
body: body && typeof body === "object" ? (body as Record<string, unknown>) : {},
},
]
yield* notify()
if (next.step.type === "text") return text(next.step)
if (next.step.type === "tool") return tool(next.step, next.seq)
if (next.step.type === "fail") return fail(next.step)
if (next.step.type === "httpError") return httpError(next.step)
if (next.step.type === "hang") return hang()
return hold(next.step)
if (next.type === "sse" && next.reset) {
yield* reset(next)
return HttpServerResponse.empty()
}
if (next.type === "sse") return send(next)
return fail(next)
}),
)
@ -273,23 +374,37 @@ export class TestLLMServer extends ServiceMap.Service<TestLLMServer, TestLLMServ
server.address._tag === "TcpAddress"
? `http://127.0.0.1:${server.address.port}/v1`
: `unix://${server.address.path}/v1`,
push: Effect.fn("TestLLMServer.push")(function* (...input: (Item | Reply)[]) {
queue(...input)
}),
text: Effect.fn("TestLLMServer.text")(function* (value: string, opts?: { usage?: Usage }) {
push({ type: "text", text: value, usage: opts?.usage })
const out = reply().text(value)
if (opts?.usage) out.usage(opts.usage)
queue(out.stop().item())
}),
tool: Effect.fn("TestLLMServer.tool")(function* (tool: string, input: unknown) {
push({ type: "tool", tool, input })
tool: Effect.fn("TestLLMServer.tool")(function* (name: string, input: unknown) {
queue(reply().tool(name, input).item())
}),
fail: Effect.fn("TestLLMServer.fail")(function* (message = "boom") {
push({ type: "fail", message })
toolHang: Effect.fn("TestLLMServer.toolHang")(function* (name: string, input: unknown) {
queue(reply().pendingTool(name, input).hang().item())
}),
reason: Effect.fn("TestLLMServer.reason")(function* (value: string, opts?: { text?: string; usage?: Usage }) {
const out = reply().reason(value)
if (opts?.text) out.text(opts.text)
if (opts?.usage) out.usage(opts.usage)
queue(out.stop().item())
}),
fail: Effect.fn("TestLLMServer.fail")(function* (message: unknown = "boom") {
queue(reply().streamError(message).item())
}),
error: Effect.fn("TestLLMServer.error")(function* (status: number, body: unknown) {
queue(httpError(status, body))
}),
hang: Effect.gen(function* () {
push({ type: "hang" })
queue(reply().hang().item())
}).pipe(Effect.withSpan("TestLLMServer.hang")),
error: Effect.fn("TestLLMServer.error")(function* (status: number, body: unknown) {
push({ type: "httpError", status, body })
}),
hold: Effect.fn("TestLLMServer.hold")(function* (text: string, wait: PromiseLike<unknown>) {
push({ type: "hold", text, wait })
hold: Effect.fn("TestLLMServer.hold")(function* (value: string, wait: PromiseLike<unknown>) {
queue(reply().wait(wait).text(value).stop().item())
}),
hits: Effect.sync(() => [...hits]),
calls: Effect.sync(() => hits.length),
@ -303,8 +418,5 @@ export class TestLLMServer extends ServiceMap.Service<TestLLMServer, TestLLMServ
pending: Effect.sync(() => list.length),
})
}),
).pipe(
Layer.provide(HttpRouter.layer), //
Layer.provide(NodeHttpServer.layer(() => Http.createServer(), { port: 0 })),
)
).pipe(Layer.provide(HttpRouter.layer), Layer.provide(NodeHttpServer.layer(() => Http.createServer(), { port: 0 })))
}

View File

@ -1,7 +1,6 @@
import { NodeFileSystem } from "@effect/platform-node"
import { expect } from "bun:test"
import { APICallError, jsonSchema, tool } from "ai"
import { Cause, Effect, Exit, Fiber, Layer, ServiceMap, Stream } from "effect"
import { Cause, Effect, Exit, Fiber, Layer } from "effect"
import path from "path"
import type { Agent } from "../../src/agent/agent"
import { Agent as AgentSvc } from "../../src/agent/agent"
@ -20,19 +19,12 @@ import { SessionStatus } from "../../src/session/status"
import { Snapshot } from "../../src/snapshot"
import { Log } from "../../src/util/log"
import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner"
import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture"
import { provideTmpdirServer } from "../fixture/fixture"
import { testEffect } from "../lib/effect"
import { TestLLMServer } from "../lib/llm-server"
import { reply, TestLLMServer } from "../lib/llm-server"
Log.init({ print: false })
const DEBUG = process.env.OPENCODE_TEST_DEBUG === "1"
function trace(label: string, value: unknown) {
if (!DEBUG) return
console.log(label, JSON.stringify(value, null, 2))
}
const ref = {
providerID: ProviderID.make("test"),
modelID: ModelID.make("test-model"),
@ -168,109 +160,6 @@ const env = Layer.mergeAll(TestLLMServer.layer, SessionProcessor.layer.pipe(Laye
const it = testEffect(env)
// ---------------------------------------------------------------------------
// TestLLM kept only for the reasoning test
// TODO: reasoning events not available via OpenAI-compatible SSE
// ---------------------------------------------------------------------------
type Script = Stream.Stream<LLM.Event, unknown> | ((input: LLM.StreamInput) => Stream.Stream<LLM.Event, unknown>)
class TestLLM extends ServiceMap.Service<
TestLLM,
{
readonly push: (stream: Script) => Effect.Effect<void>
readonly reply: (...items: LLM.Event[]) => Effect.Effect<void>
readonly calls: Effect.Effect<number>
readonly inputs: Effect.Effect<LLM.StreamInput[]>
}
>()("@test/SessionProcessorLLM") {}
function reasoningUsage(input = 1, output = 1, total = input + output) {
return {
inputTokens: input,
outputTokens: output,
totalTokens: total,
inputTokenDetails: {
noCacheTokens: undefined,
cacheReadTokens: undefined,
cacheWriteTokens: undefined,
},
outputTokenDetails: {
textTokens: undefined,
reasoningTokens: undefined,
},
}
}
const reasoningLlm = Layer.unwrap(
Effect.gen(function* () {
const queue: Script[] = []
const inputs: LLM.StreamInput[] = []
let calls = 0
const push = Effect.fn("TestLLM.push")((item: Script) => {
queue.push(item)
return Effect.void
})
const reply = Effect.fn("TestLLM.reply")((...items: LLM.Event[]) => push(Stream.make(...items)))
return Layer.mergeAll(
Layer.succeed(
LLM.Service,
LLM.Service.of({
stream: (input) => {
calls += 1
inputs.push(input)
const item = queue.shift() ?? Stream.empty
return typeof item === "function" ? item(input) : item
},
}),
),
Layer.succeed(
TestLLM,
TestLLM.of({
push,
reply,
calls: Effect.sync(() => calls),
inputs: Effect.sync(() => [...inputs]),
}),
),
)
}),
)
const reasoningDeps = Layer.mergeAll(
Session.defaultLayer,
Snapshot.defaultLayer,
AgentSvc.defaultLayer,
Permission.layer,
Plugin.defaultLayer,
Config.defaultLayer,
status,
reasoningLlm,
).pipe(Layer.provideMerge(infra))
const reasoningEnv = SessionProcessor.layer.pipe(Layer.provideMerge(reasoningDeps))
const reasoningIt = testEffect(reasoningEnv)
function reasoningModel(context: number): Provider.Model {
return {
id: "test-model",
providerID: "test",
name: "Test",
limit: { context, output: 10 },
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
capabilities: {
toolcall: true,
attachment: false,
reasoning: false,
temperature: true,
input: { text: true, image: false, audio: false, video: false },
output: { text: true, image: false, audio: false, video: false },
},
api: { npm: "@ai-sdk/anthropic" },
options: {},
} as Provider.Model
}
const boot = Effect.fn("test.boot")(function* () {
const processors = yield* SessionProcessor.Service
const session = yield* Session.Service
@ -367,12 +256,6 @@ it.live("session.processor effect tests stop after token overflow requests compa
const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
trace("overflow", {
value,
parts: parts.map((part) => part.type),
inputs: yield* llm.inputs,
})
expect(value).toBe("compact")
expect(parts.some((part) => part.type === "text" && part.text === "after")).toBe(true)
expect(parts.some((part) => part.type === "step-finish")).toBe(true)
@ -381,57 +264,66 @@ it.live("session.processor effect tests stop after token overflow requests compa
),
)
// TODO: reasoning events not available via OpenAI-compatible SSE
reasoningIt.live("session.processor effect tests reset reasoning state across retries", () =>
provideTmpdirInstance(
(dir) =>
it.live("session.processor effect tests capture reasoning from http mock", () =>
provideTmpdirServer(
({ dir, llm }) =>
Effect.gen(function* () {
const test = yield* TestLLM
const processors = yield* SessionProcessor.Service
const session = yield* Session.Service
const { processors, session, provider } = yield* boot()
yield* test.push(
Stream.fromIterable<LLM.Event>([
{ type: "start" },
{ type: "reasoning-start", id: "r" },
{ type: "reasoning-delta", id: "r", text: "one" },
]).pipe(
Stream.concat(
Stream.fail(
new APICallError({
message: "boom",
url: "https://example.com/v1/chat/completions",
requestBodyValues: {},
statusCode: 503,
responseHeaders: { "retry-after-ms": "0" },
responseBody: '{"error":"boom"}',
isRetryable: true,
}),
),
),
),
)
yield* test.reply(
{ type: "start" },
{ type: "reasoning-start", id: "r" },
{ type: "reasoning-delta", id: "r", text: "two" },
{ type: "reasoning-end", id: "r" },
{
type: "finish-step",
finishReason: "stop",
rawFinishReason: "stop",
response: { id: "res", modelId: "test-model", timestamp: new Date() },
providerMetadata: undefined,
usage: reasoningUsage(),
},
{ type: "finish", finishReason: "stop", rawFinishReason: "stop", totalUsage: reasoningUsage() },
)
yield* llm.push(reply().reason("think").text("done").stop())
const chat = yield* session.create({})
const parent = yield* user(chat.id, "reason")
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
const mdl = reasoningModel(100)
const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
const handle = yield* processors.create({
assistantMessage: msg,
sessionID: chat.id,
model: mdl,
})
const value = yield* handle.process({
user: {
id: parent.id,
sessionID: chat.id,
role: "user",
time: parent.time,
agent: parent.agent,
model: { providerID: ref.providerID, modelID: ref.modelID },
} satisfies MessageV2.User,
sessionID: chat.id,
model: mdl,
agent: agent(),
system: [],
messages: [{ role: "user", content: "reason" }],
tools: {},
})
const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
const reasoning = parts.find((part): part is MessageV2.ReasoningPart => part.type === "reasoning")
const text = parts.find((part): part is MessageV2.TextPart => part.type === "text")
expect(value).toBe("continue")
expect(yield* llm.calls).toBe(1)
expect(reasoning?.text).toBe("think")
expect(text?.text).toBe("done")
}),
{ git: true, config: (url) => providerCfg(url) },
),
)
it.live("session.processor effect tests reset reasoning state across retries", () =>
provideTmpdirServer(
({ dir, llm }) =>
Effect.gen(function* () {
const { processors, session, provider } = yield* boot()
yield* llm.push(reply().reason("one").reset(), reply().reason("two").stop())
const chat = yield* session.create({})
const parent = yield* user(chat.id, "reason")
const msg = yield* assistant(chat.id, parent.id, path.resolve(dir))
const mdl = yield* provider.getModel(ref.providerID, ref.modelID)
const handle = yield* processors.create({
assistantMessage: msg,
sessionID: chat.id,
@ -459,11 +351,11 @@ reasoningIt.live("session.processor effect tests reset reasoning state across re
const reasoning = parts.filter((part): part is MessageV2.ReasoningPart => part.type === "reasoning")
expect(value).toBe("continue")
expect(yield* test.calls).toBe(2)
expect(yield* llm.calls).toBe(2)
expect(reasoning.some((part) => part.text === "two")).toBe(true)
expect(reasoning.some((part) => part.text === "onetwo")).toBe(false)
}),
{ git: true },
{ git: true, config: (url) => providerCfg(url) },
),
)
@ -502,13 +394,6 @@ it.live("session.processor effect tests do not retry unknown json errors", () =>
tools: {},
})
trace("unknown-error", {
value,
calls: yield* llm.calls,
inputs: yield* llm.inputs,
error: handle.message.error,
})
expect(value).toBe("stop")
expect(yield* llm.calls).toBe(1)
expect(handle.message.error?.name).toBe("APIError")
@ -664,9 +549,8 @@ it.live("session.processor effect tests mark pending tools as aborted on cleanup
({ dir, llm }) =>
Effect.gen(function* () {
const { processors, session, provider } = yield* boot()
const wait = new Promise<{ output: string }>(() => {})
yield* llm.tool("bash", { cmd: "pwd" })
yield* llm.toolHang("bash", { cmd: "pwd" })
const chat = yield* session.create({})
const parent = yield* user(chat.id, "tool abort")
@ -693,26 +577,16 @@ it.live("session.processor effect tests mark pending tools as aborted on cleanup
agent: agent(),
system: [],
messages: [{ role: "user", content: "tool abort" }],
tools: {
bash: tool({
description: "Run shell commands",
inputSchema: jsonSchema({
type: "object",
properties: {
cmd: { type: "string" },
},
required: ["cmd"],
}),
execute: async () => wait,
}),
},
tools: {},
})
.pipe(Effect.forkChild)
yield* llm.wait(1)
yield* Effect.promise(async () => {
const end = Date.now() + 500
while (!handle.partFromToolCall("call_1") && Date.now() < end) {
while (Date.now() < end) {
const parts = await MessageV2.parts(msg.id)
if (parts.some((part) => part.type === "tool")) return
await Bun.sleep(10)
}
})
@ -725,16 +599,6 @@ it.live("session.processor effect tests mark pending tools as aborted on cleanup
const parts = yield* Effect.promise(() => MessageV2.parts(msg.id))
const call = parts.find((part): part is MessageV2.ToolPart => part.type === "tool")
trace("tool-abort", {
calls: yield* llm.calls,
inputs: yield* llm.inputs,
pending: handle.partFromToolCall("call_1"),
parts: parts.map((part) => ({
type: part.type,
...(part.type === "tool" ? { state: part.state.status } : {}),
})),
})
expect(Exit.isFailure(exit)).toBe(true)
if (Exit.isFailure(exit)) {
expect(Cause.hasInterruptsOnly(exit.cause)).toBe(true)