pull/21464/merge
JeffreyCao 2026-04-08 14:00:44 +08:00 committed by GitHub
commit 30afa58019
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 225 additions and 2 deletions

View File

@ -124,10 +124,18 @@ export async function convertToOpenAIResponsesInput({
for (const part of content) {
switch (part.type) {
case "text": {
const opts = part.providerOptions?.openai as
| {
itemId?: string
phase?: "commentary" | "final_answer"
}
| undefined
input.push({
role: "assistant",
content: [{ type: "output_text", text: part.text }],
id: (part.providerOptions?.openai?.itemId as string) ?? undefined,
id: opts?.itemId,
...(opts?.phase != null && { phase: opts.phase }),
})
break
}

View File

@ -47,6 +47,7 @@ export type OpenAIResponsesAssistantMessage = {
role: "assistant"
content: Array<{ type: "output_text"; text: string }>
id?: string
phase?: "commentary" | "final_answer" | null
}
export type OpenAIResponsesFunctionCall = {

View File

@ -106,6 +106,8 @@ const imageGenerationCallItem = z.object({
result: z.string(),
})
const phaseSchema = z.enum(["commentary", "final_answer"]).nullish()
/**
* `top_logprobs` request body argument can be set to an integer between
* 0 and 20 specifying the number of most likely tokens to return at each
@ -423,6 +425,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
type: z.literal("message"),
role: z.literal("assistant"),
id: z.string(),
phase: phaseSchema,
content: z.array(
z.object({
type: z.literal("output_text"),
@ -584,6 +587,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
providerMetadata: {
openai: {
itemId: part.id,
...(part.phase != null && { phase: part.phase }),
},
},
})
@ -850,6 +854,8 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
// Copilot may change item_id across text deltas; normalize to one id.
let currentTextId: string | null = null
let activeMessagePhase: "commentary" | "final_answer" | undefined
let serviceTier: string | undefined
return {
@ -949,12 +955,14 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
} else if (value.item.type === "message") {
// Start a stable text part for this assistant message
currentTextId = value.item.id
activeMessagePhase = value.item.phase ?? undefined
controller.enqueue({
type: "text-start",
id: value.item.id,
providerMetadata: {
openai: {
itemId: value.item.id,
...(value.item.phase != null && { phase: value.item.phase }),
},
},
})
@ -1106,10 +1114,18 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
},
})
} else if (value.item.type === "message") {
const phase = value.item.phase ?? activeMessagePhase
activeMessagePhase = undefined
if (currentTextId) {
controller.enqueue({
type: "text-end",
id: currentTextId,
providerMetadata: {
openai: {
itemId: currentTextId,
...(phase != null && { phase }),
},
},
})
currentTextId = null
}
@ -1300,8 +1316,18 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
flush(controller) {
// Close any dangling text part
if (currentTextId) {
controller.enqueue({ type: "text-end", id: currentTextId })
controller.enqueue({
type: "text-end",
id: currentTextId,
providerMetadata: {
openai: {
itemId: currentTextId,
...(activeMessagePhase != null && { phase: activeMessagePhase }),
},
},
})
currentTextId = null
activeMessagePhase = undefined
}
const providerMetadata: SharedV3ProviderMetadata = {
@ -1401,6 +1427,7 @@ const responseOutputItemAddedSchema = z.object({
z.object({
type: z.literal("message"),
id: z.string(),
phase: phaseSchema,
}),
z.object({
type: z.literal("reasoning"),
@ -1463,6 +1490,7 @@ const responseOutputItemDoneSchema = z.object({
z.object({
type: z.literal("message"),
id: z.string(),
phase: phaseSchema,
}),
z.object({
type: z.literal("reasoning"),

View File

@ -0,0 +1,186 @@
import { OpenAIResponsesLanguageModel } from "@/provider/sdk/copilot/responses/openai-responses-language-model"
import { describe, expect, mock, test } from "bun:test"
import type { LanguageModelV3Prompt } from "@ai-sdk/provider"
const prompt: LanguageModelV3Prompt = [{ role: "user", content: [{ type: "text", text: "Hello" }] }]
async function read<T>(stream: ReadableStream<T>) {
const reader = stream.getReader()
const out: T[] = []
while (true) {
const chunk = await reader.read()
if (chunk.done) return out
out.push(chunk.value)
}
}
function model(fetch: ReturnType<typeof mock>) {
return new OpenAIResponsesLanguageModel("gpt-5", {
provider: "copilot.responses",
url: ({ path }) => `https://api.test.com${path}`,
headers: () => ({ Authorization: "Bearer test-token" }),
fetch: fetch as any,
})
}
describe("OpenAIResponsesLanguageModel", () => {
test("accepts null phase in non-stream response", async () => {
const fetch = mock(
async () =>
new Response(
JSON.stringify({
id: "resp_1",
created_at: 1,
error: null,
model: "gpt-5",
output: [
{
type: "message",
role: "assistant",
id: "msg_1",
phase: null,
content: [
{
type: "output_text",
text: "Hello",
logprobs: null,
annotations: [],
},
],
},
],
service_tier: null,
incomplete_details: null,
usage: {
input_tokens: 1,
input_tokens_details: { cached_tokens: null },
output_tokens: 1,
output_tokens_details: { reasoning_tokens: null },
},
}),
{
status: 200,
headers: { "Content-Type": "application/json" },
},
),
)
const out = await model(fetch).doGenerate({
prompt,
})
expect(out.content).toMatchObject([
{
type: "text",
text: "Hello",
providerMetadata: {
openai: {
itemId: "msg_1",
},
},
},
])
})
test("keeps stable itemId on stream text-end when done id changes", async () => {
const fetch = mock(async () => {
const body = new ReadableStream({
start(controller) {
const lines = [
{
type: "response.created",
sequence_number: 1,
response: {
id: "resp_1",
created_at: 1,
model: "gpt-5",
service_tier: null,
},
},
{
type: "response.output_item.added",
sequence_number: 2,
output_index: 0,
item: {
type: "message",
id: "msg_added",
phase: "final_answer",
},
},
{
type: "response.output_text.delta",
sequence_number: 3,
item_id: "msg_delta",
delta: "Hello",
logprobs: null,
},
{
type: "response.output_item.done",
sequence_number: 4,
output_index: 0,
item: {
type: "message",
id: "msg_done",
phase: "final_answer",
},
},
{
type: "response.completed",
sequence_number: 5,
response: {
incomplete_details: null,
service_tier: null,
usage: {
input_tokens: 1,
input_tokens_details: { cached_tokens: null },
output_tokens: 1,
output_tokens_details: { reasoning_tokens: null },
},
},
},
]
for (const line of lines) {
controller.enqueue(new TextEncoder().encode(`data: ${JSON.stringify(line)}\n\n`))
}
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
controller.close()
},
})
return new Response(body, {
status: 200,
headers: { "Content-Type": "text/event-stream" },
})
})
const out = await model(fetch).doStream({
prompt,
includeRawChunks: false,
})
const parts = await read(out.stream)
expect(parts.filter((part) => part.type === "text-start" || part.type === "text-end")).toMatchObject([
{
type: "text-start",
id: "msg_added",
providerMetadata: {
openai: {
itemId: "msg_added",
phase: "final_answer",
},
},
},
{
type: "text-end",
id: "msg_added",
providerMetadata: {
openai: {
itemId: "msg_added",
phase: "final_answer",
},
},
},
])
})
})