From 002bcaebf7159618f83636d7647dd13990f617d3 Mon Sep 17 00:00:00 2001 From: "Jeffrey.Cao" Date: Wed, 8 Apr 2026 13:20:32 +0800 Subject: [PATCH] feat(copilot): add phase handling to OpenAI responses and implement tests --- .../convert-to-openai-responses-input.ts | 10 +- .../responses/openai-responses-api-types.ts | 1 + .../openai-responses-language-model.ts | 30 ++- .../openai-responses-language-model.test.ts | 186 ++++++++++++++++++ 4 files changed, 225 insertions(+), 2 deletions(-) create mode 100644 packages/opencode/test/provider/copilot/openai-responses-language-model.test.ts diff --git a/packages/opencode/src/provider/sdk/copilot/responses/convert-to-openai-responses-input.ts b/packages/opencode/src/provider/sdk/copilot/responses/convert-to-openai-responses-input.ts index 83e46015dd..00c5e07d1c 100644 --- a/packages/opencode/src/provider/sdk/copilot/responses/convert-to-openai-responses-input.ts +++ b/packages/opencode/src/provider/sdk/copilot/responses/convert-to-openai-responses-input.ts @@ -124,10 +124,18 @@ export async function convertToOpenAIResponsesInput({ for (const part of content) { switch (part.type) { case "text": { + const opts = part.providerOptions?.openai as + | { + itemId?: string + phase?: "commentary" | "final_answer" + } + | undefined + input.push({ role: "assistant", content: [{ type: "output_text", text: part.text }], - id: (part.providerOptions?.openai?.itemId as string) ?? undefined, + id: opts?.itemId, + ...(opts?.phase != null && { phase: opts.phase }), }) break } diff --git a/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-api-types.ts b/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-api-types.ts index dfdd066750..624c84cfd3 100644 --- a/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-api-types.ts +++ b/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-api-types.ts @@ -47,6 +47,7 @@ export type OpenAIResponsesAssistantMessage = { role: "assistant" content: Array<{ type: "output_text"; text: string }> id?: string + phase?: "commentary" | "final_answer" | null } export type OpenAIResponsesFunctionCall = { diff --git a/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-language-model.ts b/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-language-model.ts index 4606af7a15..6cd23b2894 100644 --- a/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-language-model.ts +++ b/packages/opencode/src/provider/sdk/copilot/responses/openai-responses-language-model.ts @@ -106,6 +106,8 @@ const imageGenerationCallItem = z.object({ result: z.string(), }) +const phaseSchema = z.enum(["commentary", "final_answer"]).nullish() + /** * `top_logprobs` request body argument can be set to an integer between * 0 and 20 specifying the number of most likely tokens to return at each @@ -423,6 +425,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { type: z.literal("message"), role: z.literal("assistant"), id: z.string(), + phase: phaseSchema, content: z.array( z.object({ type: z.literal("output_text"), @@ -584,6 +587,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { providerMetadata: { openai: { itemId: part.id, + ...(part.phase != null && { phase: part.phase }), }, }, }) @@ -850,6 +854,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { // Copilot may change item_id across text deltas; normalize to one id. let currentTextId: string | null = null + let activeMessagePhase: "commentary" | "final_answer" | undefined + let serviceTier: string | undefined return { @@ -949,12 +955,14 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { } else if (value.item.type === "message") { // Start a stable text part for this assistant message currentTextId = value.item.id + activeMessagePhase = value.item.phase ?? undefined controller.enqueue({ type: "text-start", id: value.item.id, providerMetadata: { openai: { itemId: value.item.id, + ...(value.item.phase != null && { phase: value.item.phase }), }, }, }) @@ -1106,10 +1114,18 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { }, }) } else if (value.item.type === "message") { + const phase = value.item.phase ?? activeMessagePhase + activeMessagePhase = undefined if (currentTextId) { controller.enqueue({ type: "text-end", id: currentTextId, + providerMetadata: { + openai: { + itemId: currentTextId, + ...(phase != null && { phase }), + }, + }, }) currentTextId = null } @@ -1300,8 +1316,18 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 { flush(controller) { // Close any dangling text part if (currentTextId) { - controller.enqueue({ type: "text-end", id: currentTextId }) + controller.enqueue({ + type: "text-end", + id: currentTextId, + providerMetadata: { + openai: { + itemId: currentTextId, + ...(activeMessagePhase != null && { phase: activeMessagePhase }), + }, + }, + }) currentTextId = null + activeMessagePhase = undefined } const providerMetadata: SharedV3ProviderMetadata = { @@ -1401,6 +1427,7 @@ const responseOutputItemAddedSchema = z.object({ z.object({ type: z.literal("message"), id: z.string(), + phase: phaseSchema, }), z.object({ type: z.literal("reasoning"), @@ -1463,6 +1490,7 @@ const responseOutputItemDoneSchema = z.object({ z.object({ type: z.literal("message"), id: z.string(), + phase: phaseSchema, }), z.object({ type: z.literal("reasoning"), diff --git a/packages/opencode/test/provider/copilot/openai-responses-language-model.test.ts b/packages/opencode/test/provider/copilot/openai-responses-language-model.test.ts new file mode 100644 index 0000000000..4487bb412a --- /dev/null +++ b/packages/opencode/test/provider/copilot/openai-responses-language-model.test.ts @@ -0,0 +1,186 @@ +import { OpenAIResponsesLanguageModel } from "@/provider/sdk/copilot/responses/openai-responses-language-model" +import { describe, expect, mock, test } from "bun:test" +import type { LanguageModelV3Prompt } from "@ai-sdk/provider" + +const prompt: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }] + +async function read(stream: ReadableStream) { + const reader = stream.getReader() + const out: T[] = [] + while (true) { + const chunk = await reader.read() + if (chunk.done) return out + out.push(chunk.value) + } +} + +function model(fetch: ReturnType) { + return new OpenAIResponsesLanguageModel("gpt-5", { + provider: "copilot.responses", + url: ({ path }) => `https://api.test.com${path}`, + headers: () => ({ Authorization: "Bearer test-token" }), + fetch: fetch as any, + }) +} + +describe("OpenAIResponsesLanguageModel", () => { + test("accepts null phase in non-stream response", async () => { + const fetch = mock( + async () => + new Response( + JSON.stringify({ + id: "resp_1", + created_at: 1, + error: null, + model: "gpt-5", + output: [ + { + type: "message", + role: "assistant", + id: "msg_1", + phase: null, + content: [ + { + type: "output_text", + text: "Hello", + logprobs: null, + annotations: [], + }, + ], + }, + ], + service_tier: null, + incomplete_details: null, + usage: { + input_tokens: 1, + input_tokens_details: { cached_tokens: null }, + output_tokens: 1, + output_tokens_details: { reasoning_tokens: null }, + }, + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ), + ) + + const out = await model(fetch).doGenerate({ + prompt, + }) + + expect(out.content).toMatchObject([ + { + type: "text", + text: "Hello", + providerMetadata: { + openai: { + itemId: "msg_1", + }, + }, + }, + ]) + }) + + test("keeps stable itemId on stream text-end when done id changes", async () => { + const fetch = mock(async () => { + const body = new ReadableStream({ + start(controller) { + const lines = [ + { + type: "response.created", + sequence_number: 1, + response: { + id: "resp_1", + created_at: 1, + model: "gpt-5", + service_tier: null, + }, + }, + { + type: "response.output_item.added", + sequence_number: 2, + output_index: 0, + item: { + type: "message", + id: "msg_added", + phase: "final_answer", + }, + }, + { + type: "response.output_text.delta", + sequence_number: 3, + item_id: "msg_delta", + delta: "Hello", + logprobs: null, + }, + { + type: "response.output_item.done", + sequence_number: 4, + output_index: 0, + item: { + type: "message", + id: "msg_done", + phase: "final_answer", + }, + }, + { + type: "response.completed", + sequence_number: 5, + response: { + incomplete_details: null, + service_tier: null, + usage: { + input_tokens: 1, + input_tokens_details: { cached_tokens: null }, + output_tokens: 1, + output_tokens_details: { reasoning_tokens: null }, + }, + }, + }, + ] + + for (const line of lines) { + controller.enqueue(new TextEncoder().encode(`data: ${JSON.stringify(line)}\n\n`)) + } + controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n")) + controller.close() + }, + }) + + return new Response(body, { + status: 200, + headers: { "Content-Type": "text/event-stream" }, + }) + }) + + const out = await model(fetch).doStream({ + prompt, + includeRawChunks: false, + }) + const parts = await read(out.stream) + + expect(parts.filter((part) => part.type === "text-start" || part.type === "text-end")).toMatchObject([ + { + type: "text-start", + id: "msg_added", + providerMetadata: { + openai: { + itemId: "msg_added", + phase: "final_answer", + }, + }, + }, + { + type: "text-end", + id: "msg_added", + providerMetadata: { + openai: { + itemId: "msg_added", + phase: "final_answer", + }, + }, + }, + ]) + }) +})