Merge branch 'dev' into kit/e2e-golden-path

pull/20593/head
Kit Langton 2026-04-02 12:06:58 -04:00 committed by GitHub
commit 490fd31706
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 15 additions and 6 deletions

View File

@ -47,6 +47,8 @@ function LimitsGraph(props: { href: string }) {
const models = [
{ id: "glm", name: "GLM-5", req: 1150, d: "120ms" },
{ id: "kimi", name: "Kimi K2.5", req: 1850, d: "240ms" },
{ id: "mimo-v2-pro", name: "MiMo-V2-Pro", req: 1290, d: "150ms" },
{ id: "mimo-v2-omni", name: "MiMo-V2-Omni", req: 2150, d: "270ms" },
{ id: "minimax-m2.7", name: "MiniMax M2.7", req: 14000, d: "330ms" },
{ id: "minimax-m2.5", name: "MiniMax M2.5", req: 20000, d: "360ms" },
]

View File

@ -103,7 +103,7 @@ export const oxfmt: Info = {
export const biome: Info = {
name: "biome",
command: ["bun", "x", "@biomejs/biome", "check", "--write", "$FILE"],
command: ["bun", "x", "@biomejs/biome", "format", "--write", "$FILE"],
environment: {
BUN_BE_BUN: "1",
},

View File

@ -112,6 +112,7 @@ export namespace CopilotModels {
): Promise<Record<string, Model>> {
const data = await fetch(`${baseURL}/models`, {
headers,
signal: AbortSignal.timeout(5_000),
}).then(async (res) => {
if (!res.ok) {
throw new Error(`Failed to fetch models: ${res.status}`)

View File

@ -65,6 +65,8 @@ The current list of models includes:
- **GLM-5**
- **Kimi K2.5**
- **MiMo-V2-Pro**
- **MiMo-V2-Omni**
- **MiniMax M2.5**
- **MiniMax M2.7**
@ -84,17 +86,19 @@ Limits are defined in dollar value. This means your actual request count depends
The table below provides an estimated request count based on typical Go usage patterns:
| | GLM-5 | Kimi K2.5 | MiniMax M2.7 | MiniMax M2.5 |
| ------------------- | ----- | --------- | ------------ | ------------ |
| requests per 5 hour | 1,150 | 1,850 | 14,000 | 20,000 |
| requests per week | 2,880 | 4,630 | 35,000 | 50,000 |
| requests per month | 5,750 | 9,250 | 70,000 | 100,000 |
| | GLM-5 | Kimi K2.5 | MiMo-V2-Pro | MiMo-V2-Omni | MiniMax M2.7 | MiniMax M2.5 |
| ------------------- | ----- | --------- | ----------- | ------------ | ------------ | ------------ |
| requests per 5 hour | 1,150 | 1,850 | 1,290 | 2,150 | 14,000 | 20,000 |
| requests per week | 2,880 | 4,630 | 3,225 | 5,450 | 35,000 | 50,000 |
| requests per month | 5,750 | 9,250 | 6,450 | 10,900 | 70,000 | 100,000 |
Estimates are based on observed average request patterns:
- GLM-5 — 700 input, 52,000 cached, 150 output tokens per request
- Kimi K2.5 — 870 input, 55,000 cached, 200 output tokens per request
- MiniMax M2.7/M2.5 — 300 input, 55,000 cached, 125 output tokens per request
- MiMo-V2-Pro — 350 input, 41,000 cached, 250 output tokens per request
- MiMo-V2-Omni — 1000 input, 60,000 cached, 140 output tokens per request
You can track your current usage in the **<a href={console}>console</a>**.
@ -122,6 +126,8 @@ You can also access Go models through the following API endpoints.
| ------------ | ------------ | ------------------------------------------------ | --------------------------- |
| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` |
| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |
| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` |