From 72c117de6b95d03588ed9eb3285179fc648f7f1a Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Mon, 11 May 2026 22:24:57 +1000 Subject: [PATCH 01/49] refactor: migrate ai-groq + ai-openrouter onto @tanstack/openai-base (#543) Adds protected `callChatCompletion`, `callChatCompletionStream`, `extractReasoning`, and `transformStructuredOutput` hooks to `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the shared stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates. ai-groq drops `groq-sdk` in favour of the OpenAI SDK pointed at api.groq.com/openai/v1; ai-openrouter keeps `@openrouter/sdk` via hook overrides. ai-ollama remains on BaseTextAdapter (native API has a different wire format). Co-Authored-By: Claude Opus 4.7 (1M context) --- .../migrate-groq-openrouter-to-openai-base.md | 15 + packages/typescript/ai-groq/package.json | 3 +- .../typescript/ai-groq/src/adapters/text.ts | 607 +--------- .../typescript/ai-groq/src/utils/client.ts | 37 +- .../typescript/ai-groq/src/utils/index.ts | 4 +- .../ai-groq/tests/groq-adapter.test.ts | 49 +- .../typescript/ai-openrouter/package.json | 3 +- .../ai-openrouter/src/adapters/text.ts | 1011 ++++++----------- .../tests/openrouter-adapter.test.ts | 9 +- .../src/adapters/chat-completions-text.ts | 181 ++- packages/typescript/openai-base/src/index.ts | 9 + pnpm-lock.yaml | 76 +- 12 files changed, 644 insertions(+), 1360 deletions(-) create mode 100644 .changeset/migrate-groq-openrouter-to-openai-base.md diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md new file mode 100644 index 000000000..ff5012565 --- /dev/null +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -0,0 +1,15 @@ +--- +'@tanstack/openai-base': minor +'@tanstack/ai-groq': patch +'@tanstack/ai-openrouter': patch +--- + +Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. + +`@tanstack/openai-base` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING_* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. + +`@tanstack/ai-groq` drops the `groq-sdk` dependency in favour of the OpenAI SDK pointed at `https://api.groq.com/openai/v1` (the same pattern as `ai-grok` against xAI). The Groq-specific quirk where streaming usage arrives under `chunk.x_groq.usage` is preserved via a small `processStreamChunks` wrapper that promotes it to the standard `chunk.usage` slot. + +`@tanstack/ai-openrouter` keeps `@openrouter/sdk` (the source of truth for OpenRouter's typed provider routing, plugins, and metadata) but routes the SDK call through the base via overridden hooks. A small request shape converter (`max_tokens` → `maxCompletionTokens`, etc.) and chunk shape adapter (camelCase → snake_case for the base's reader) bridge the SDKs. No public API changes; provider routing, app attribution headers (`httpReferer`, `appTitle`), reasoning variants (`:thinking`), and `RequestAbortedError` handling are preserved. + +`ai-ollama` remains on `BaseTextAdapter` — its native API uses a different wire format from Chat Completions (different chunk shape, request shape, tool-call streaming, and reasoning surface) and doesn't fit the OpenAI base without rebuilding most of the processing it would otherwise inherit. Migrating it remains a separate effort. diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index ee3a18a5b..9899b1d46 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -52,7 +52,6 @@ }, "dependencies": { "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-base": "workspace:*", - "groq-sdk": "^0.37.0" + "@tanstack/openai-base": "workspace:*" } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 34f44ba81..4d8ba131b 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -1,72 +1,44 @@ -import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { validateTextProviderOptions } from '../text/text-provider-options' -import { convertToolsToProviderFormat } from '../tools' -import { - createGroqClient, - generateId, - getGroqApiKeyFromEnv, - makeGroqStructuredOutputCompatible, - transformNullsToUndefined, -} from '../utils' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' +import { getGroqApiKeyFromEnv, withGroqDefaults } from '../utils/client' +import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' +import type { Modality, TextOptions } from '@tanstack/ai' +import type { ChatCompletionChunk } from '@tanstack/openai-base' import type { GROQ_CHAT_MODELS, GroqChatModelToolCapabilitiesByName, ResolveInputModalities, ResolveProviderOptions, } from '../model-meta' -import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' -import type { InternalLogger } from '@tanstack/ai/adapter-internals' -import type GROQ_SDK from 'groq-sdk' -import type { ChatCompletionCreateParamsStreaming } from 'groq-sdk/resources/chat/completions' -import type { - ContentPart, - Modality, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' -import type { - ExternalTextProviderOptions, - InternalTextProviderOptions, -} from '../text/text-provider-options' -import type { - ChatCompletionContentPart, - ChatCompletionMessageParam, - GroqImageMetadata, - GroqMessageMetadataByModality, -} from '../message-types' +import type { GroqMessageMetadataByModality } from '../message-types' import type { GroqClientConfig } from '../utils' -type GroqTextProviderOptions = ExternalTextProviderOptions - type ResolveToolCapabilities = TModel extends keyof GroqChatModelToolCapabilitiesByName ? NonNullable : readonly [] -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * Configuration for Groq text adapter */ export interface GroqTextConfig extends GroqClientConfig {} /** - * Alias for TextProviderOptions for external use + * Re-export of the public provider options type */ export type { ExternalTextProviderOptions as GroqTextProviderOptions } from '../text/text-provider-options' /** * Groq Text (Chat) Adapter * - * Tree-shakeable adapter for Groq chat/text completion functionality. - * Uses the Groq SDK which provides an OpenAI-compatible Chat Completions API. + * Tree-shakeable adapter for Groq chat/text completion. Groq exposes an + * OpenAI-compatible Chat Completions endpoint at `/openai/v1`, so we drive + * it with the OpenAI SDK via a `baseURL` override (the same pattern as + * `ai-grok`). + * + * Quirk: when usage is present on a stream, Groq historically delivered it + * under `chunk.x_groq.usage` rather than `chunk.usage`. The override below + * promotes it to the standard location so the base's RUN_FINISHED usage + * accounting works unchanged. */ export class GroqTextAdapter< TModel extends (typeof GROQ_CHAT_MODELS)[number], @@ -75,7 +47,7 @@ export class GroqTextAdapter< ResolveInputModalities, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends BaseTextAdapter< +> extends OpenAICompatibleChatCompletionsTextAdapter< TModel, TProviderOptions, TInputModalities, @@ -85,526 +57,61 @@ export class GroqTextAdapter< readonly kind = 'text' as const readonly name = 'groq' as const - private client: GROQ_SDK - constructor(config: GroqTextConfig, model: TModel) { - super({}, model) - this.client = createGroqClient(config) + super(withGroqDefaults(config), model, 'groq') } - async *chatStream( - options: TextOptions, - ): AsyncIterable { - const requestParams = this.mapTextOptionsToGroq(options) - const timestamp = Date.now() - const { logger } = options - - const aguiState = { - runId: options.runId ?? generateId(this.name), - threadId: options.threadId ?? generateId(this.name), - messageId: generateId(this.name), - timestamp, - hasEmittedRunStarted: false, - } - - try { - logger.request( - `activity=chat provider=groq model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, - { provider: 'groq', model: this.model }, - ) - const stream = await this.client.chat.completions.create({ - ...requestParams, - stream: true, - }) - - yield* this.processGroqStreamChunks(stream, options, aguiState, logger) - } catch (error: unknown) { - const err = error as Error & { code?: string } - - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: options.model, - timestamp, - }) - } - - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - message: err.message || 'Unknown error', - code: err.code, - error: { - message: err.message || 'Unknown error', - code: err.code, - }, - }) - - logger.errors('groq.chatStream fatal', { - error, - source: 'groq.chatStream', - }) - } + protected override makeStructuredOutputCompatible( + schema: Record, + originalRequired?: Array, + ): Record { + return makeGroqStructuredOutputCompatible(schema, originalRequired) } - /** - * Generate structured output using Groq's JSON Schema response format. - * Uses stream: false to get the complete response in one call. - * - * Groq has strict requirements for structured output: - * - All properties must be in the `required` array - * - Optional fields should have null added to their type union - * - additionalProperties must be false for all objects - * - * The outputSchema is already JSON Schema (converted in the ai layer). - * We apply Groq-specific transformations for structured output compatibility. - */ - async structuredOutput( - options: StructuredOutputOptions, - ): Promise> { - const { chatOptions, outputSchema } = options - const requestParams = this.mapTextOptionsToGroq(chatOptions) - const { logger } = chatOptions - - const jsonSchema = makeGroqStructuredOutputCompatible( - outputSchema, - outputSchema.required || [], - ) - - try { - logger.request( - `activity=chat provider=groq model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, - { provider: 'groq', model: this.model }, - ) - const response = await this.client.chat.completions.create({ - ...requestParams, - stream: false, - response_format: { - type: 'json_schema', - json_schema: { - name: 'structured_output', - schema: jsonSchema, - strict: true, - }, - }, - }) - - const rawText = response.choices[0]?.message.content || '' - - let parsed: unknown - try { - parsed = JSON.parse(rawText) - } catch { - throw new Error( - `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, - ) - } - - const transformed = transformNullsToUndefined(parsed) - - return { - data: transformed, - rawText, - } - } catch (error: unknown) { - logger.errors('groq.structuredOutput fatal', { - error, - source: 'groq.structuredOutput', - }) - throw error - } - } - - /** - * Processes streaming chunks from the Groq API and yields AG-UI stream events. - * Handles text content deltas, tool call assembly, and lifecycle events. - */ - private async *processGroqStreamChunks( - stream: AsyncIterable, + protected override async *processStreamChunks( + stream: AsyncIterable, options: TextOptions, aguiState: { runId: string - threadId: string messageId: string timestamp: number hasEmittedRunStarted: boolean }, - logger: InternalLogger, - ): AsyncIterable { - let accumulatedContent = '' - const timestamp = aguiState.timestamp - let hasEmittedTextMessageStart = false - - const toolCallsInProgress = new Map< - number, - { - id: string - name: string - arguments: string - started: boolean - } - >() - - try { - for await (const chunk of stream) { - logger.provider(`provider=groq`, { chunk }) - const choice = chunk.choices[0] - - if (!choice) continue - - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: chunk.model || options.model, - timestamp, - }) - } - - const delta = choice.delta - const deltaContent = delta.content - const deltaToolCalls = delta.tool_calls - - if (deltaContent) { - if (!hasEmittedTextMessageStart) { - hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - role: 'assistant', - }) - } - - accumulatedContent += deltaContent - - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - delta: deltaContent, - content: accumulatedContent, - }) - } - - if (deltaToolCalls) { - for (const toolCallDelta of deltaToolCalls) { - const index = toolCallDelta.index - - if (!toolCallsInProgress.has(index)) { - toolCallsInProgress.set(index, { - id: toolCallDelta.id || '', - name: toolCallDelta.function?.name || '', - arguments: '', - started: false, - }) - } - - const toolCall = toolCallsInProgress.get(index)! - - if (toolCallDelta.id) { - toolCall.id = toolCallDelta.id - } - if (toolCallDelta.function?.name) { - toolCall.name = toolCallDelta.function.name - } - if (toolCallDelta.function?.arguments) { - toolCall.arguments += toolCallDelta.function.arguments - } - - if (toolCall.id && toolCall.name && !toolCall.started) { - toolCall.started = true - yield asChunk({ - type: 'TOOL_CALL_START', - toolCallId: toolCall.id, - toolCallName: toolCall.name, - toolName: toolCall.name, - model: chunk.model || options.model, - timestamp, - index, - }) - } - - if (toolCallDelta.function?.arguments && toolCall.started) { - yield asChunk({ - type: 'TOOL_CALL_ARGS', - toolCallId: toolCall.id, - model: chunk.model || options.model, - timestamp, - delta: toolCallDelta.function.arguments, - }) - } - } - } - - if (choice.finish_reason) { - if ( - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ) { - for (const [, toolCall] of toolCallsInProgress) { - if (!toolCall.started || !toolCall.id || !toolCall.name) { - continue - } - - let parsedInput: unknown = {} - try { - parsedInput = toolCall.arguments - ? JSON.parse(toolCall.arguments) - : {} - } catch { - parsedInput = {} - } - - yield asChunk({ - type: 'TOOL_CALL_END', - toolCallId: toolCall.id, - toolCallName: toolCall.name, - toolName: toolCall.name, - model: chunk.model || options.model, - timestamp, - input: parsedInput, - }) - } - } - - const computedFinishReason = - choice.finish_reason === 'tool_calls' || - toolCallsInProgress.size > 0 - ? 'tool_calls' - : choice.finish_reason === 'length' - ? 'length' - : 'stop' - - if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', - messageId: aguiState.messageId, - model: chunk.model || options.model, - timestamp, - }) - } - - const groqUsage = chunk.x_groq?.usage - - yield asChunk({ - type: 'RUN_FINISHED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: chunk.model || options.model, - timestamp, - usage: groqUsage - ? { - promptTokens: groqUsage.prompt_tokens || 0, - completionTokens: groqUsage.completion_tokens || 0, - totalTokens: groqUsage.total_tokens || 0, - } - : undefined, - finishReason: computedFinishReason, - }) - } - } - } catch (error: unknown) { - const err = error as Error & { code?: string } - logger.errors('groq stream ended with error', { - error, - source: 'groq.processGroqStreamChunks', - }) - - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - message: err.message || 'Unknown error occurred', - code: err.code, - error: { - message: err.message || 'Unknown error occurred', - code: err.code, - }, - }) - } - } - - /** - * Maps common TextOptions to Groq-specific Chat Completions request parameters. - */ - private mapTextOptionsToGroq( - options: TextOptions, - ): ChatCompletionCreateParamsStreaming { - const modelOptions = options.modelOptions as - | Omit< - InternalTextProviderOptions, - 'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p' - > - | undefined - - if (modelOptions) { - validateTextProviderOptions({ - ...modelOptions, - model: options.model, - }) - } - - const tools = options.tools - ? convertToolsToProviderFormat(options.tools) - : undefined - - const messages: Array = [] - - if (options.systemPrompts && options.systemPrompts.length > 0) { - messages.push({ - role: 'system', - content: options.systemPrompts.join('\n'), - }) - } - - for (const message of options.messages) { - messages.push(this.convertMessageToGroq(message)) - } - - return { - model: options.model, - messages, - temperature: options.temperature, - max_tokens: options.maxTokens, - top_p: options.topP, - tools, - stream: true, - } - } - - /** - * Converts a TanStack AI ModelMessage to a Groq ChatCompletionMessageParam. - * Handles tool, assistant, and user messages including multimodal content. - */ - private convertMessageToGroq( - message: ModelMessage, - ): ChatCompletionMessageParam { - if (message.role === 'tool') { - return { - role: 'tool', - tool_call_id: message.toolCallId || '', - content: - typeof message.content === 'string' - ? message.content - : JSON.stringify(message.content), - } - } - - if (message.role === 'assistant') { - const toolCalls = message.toolCalls?.map((tc) => ({ - id: tc.id, - type: 'function' as const, - function: { - name: tc.function.name, - arguments: - typeof tc.function.arguments === 'string' - ? tc.function.arguments - : JSON.stringify(tc.function.arguments), - }, - })) - - return { - role: 'assistant', - content: this.extractTextContent(message.content), - ...(toolCalls && toolCalls.length > 0 ? { tool_calls: toolCalls } : {}), - } - } - - const contentParts = this.normalizeContent(message.content) - - if (contentParts.length === 1 && contentParts[0]?.type === 'text') { - return { - role: 'user', - content: contentParts[0].content, - } - } - - const parts: Array = [] - for (const part of contentParts) { - if (part.type === 'text') { - parts.push({ type: 'text', text: part.content }) - } else if (part.type === 'image') { - const imageMetadata = part.metadata as GroqImageMetadata | undefined - const imageValue = part.source.value - const imageUrl = - part.source.type === 'data' && !imageValue.startsWith('data:') - ? `data:${part.source.mimeType};base64,${imageValue}` - : imageValue - parts.push({ - type: 'image_url', - image_url: { - url: imageUrl, - detail: imageMetadata?.detail || 'auto', - }, - }) - } - } - - return { - role: 'user', - content: parts.length > 0 ? parts : '', - } - } - - /** - * Normalizes message content to an array of ContentPart. - * Handles backward compatibility with string content. - */ - private normalizeContent( - content: string | null | Array, - ): Array { - if (content === null) { - return [] - } - if (typeof content === 'string') { - return [{ type: 'text', content: content }] - } - return content + ) { + yield* super.processStreamChunks( + promoteGroqUsage(stream), + options, + aguiState, + ) } +} - /** - * Extracts text content from a content value that may be string, null, or ContentPart array. - */ - private extractTextContent( - content: string | null | Array, - ): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content +/** + * Promotes Groq's non-standard `x_groq.usage` to the standard `chunk.usage` + * slot the base reads. Pass-through for chunks that already carry usage at + * the documented location. + */ +async function* promoteGroqUsage( + stream: AsyncIterable, +): AsyncIterable { + for await (const chunk of stream) { + const groqChunk = chunk as typeof chunk & { + x_groq?: { usage?: ChatCompletionChunk['usage'] } + } + if (!chunk.usage && groqChunk.x_groq?.usage) { + yield { ...chunk, usage: groqChunk.x_groq.usage } + } else { + yield chunk } - return content - .filter((p) => p.type === 'text') - .map((p) => p.content) - .join('') } } /** * Creates a Groq text adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') - * @param apiKey - Your Groq API key - * @param config - Optional additional configuration - * @returns Configured Groq text adapter instance with resolved types * * @example * ```typescript * const adapter = createGroqText('llama-3.3-70b-versatile', "gsk_..."); - * // adapter has type-safe providerOptions for llama-3.3-70b-versatile * ``` */ export function createGroqText< @@ -618,27 +125,11 @@ export function createGroqText< } /** - * Creates a Groq text adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `GROQ_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'llama-3.3-70b-versatile', 'openai/gpt-oss-120b') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Groq text adapter instance with resolved types - * @throws Error if GROQ_API_KEY is not found in environment + * Creates a Groq text adapter with API key from `GROQ_API_KEY`. * * @example * ```typescript - * // Automatically uses GROQ_API_KEY from environment * const adapter = groqText('llama-3.3-70b-versatile'); - * - * const stream = chat({ - * adapter, - * messages: [{ role: "user", content: "Hello!" }] - * }); * ``` */ export function groqText( diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index 4e4f64580..082e347e0 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,29 +1,32 @@ -import { generateId as _generateId, getApiKeyFromEnv } from '@tanstack/ai-utils' -import Groq_SDK from 'groq-sdk' -import type { ClientOptions } from 'groq-sdk' +import { getApiKeyFromEnv } from '@tanstack/ai-utils' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' -export interface GroqClientConfig extends ClientOptions { - apiKey: string -} - -/** - * Creates a Groq SDK client instance - */ -export function createGroqClient(config: GroqClientConfig): Groq_SDK { - return new Groq_SDK(config) -} +export interface GroqClientConfig extends OpenAICompatibleClientConfig {} /** * Gets Groq API key from environment variables * @throws Error if GROQ_API_KEY is not found */ export function getGroqApiKeyFromEnv(): string { - return getApiKeyFromEnv('GROQ_API_KEY') + try { + return getApiKeyFromEnv('GROQ_API_KEY') + } catch { + throw new Error( + 'GROQ_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } } /** - * Generates a unique ID with a prefix + * Returns a Groq client config with Groq's OpenAI-compatible base URL + * applied when not already set. The Groq endpoint accepts the OpenAI SDK + * verbatim, so the base adapter can drive it without a separate SDK. */ -export function generateId(prefix: string): string { - return _generateId(prefix) +export function withGroqDefaults( + config: GroqClientConfig, +): OpenAICompatibleClientConfig { + return { + ...config, + baseURL: config.baseURL || 'https://api.groq.com/openai/v1', + } } diff --git a/packages/typescript/ai-groq/src/utils/index.ts b/packages/typescript/ai-groq/src/utils/index.ts index 17899f56a..ad3497219 100644 --- a/packages/typescript/ai-groq/src/utils/index.ts +++ b/packages/typescript/ai-groq/src/utils/index.ts @@ -1,9 +1,9 @@ export { - createGroqClient, getGroqApiKeyFromEnv, - generateId, + withGroqDefaults, type GroqClientConfig, } from './client' +export { generateId } from '@tanstack/ai-utils' export { makeGroqStructuredOutputCompatible, transformNullsToUndefined, diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index a053aeea8..2f34fe368 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -8,22 +8,26 @@ import { type Mock, } from 'vitest' import { resolveDebugOption } from '@tanstack/ai/adapter-internals' -import { createGroqText, groqText } from '../src/adapters/text' +import { + createGroqText as _realCreateGroqText, + groqText as _realGroqText, +} from '../src/adapters/text' import type { StreamChunk, Tool } from '@tanstack/ai' // Test helper: a silent logger for test chatStream calls. const testLogger = resolveDebugOption(false) -// Declare mockCreate at module level -let mockCreate: Mock<(...args: Array) => unknown> - -// Mock the Groq SDK -vi.mock('groq-sdk', () => { +// Stub the OpenAI SDK so adapter construction doesn't open a real network +// handle. The per-test mock client is injected post-construction via +// `setupMockSdkClient` (mirrors the ai-grok pattern). We avoid relying on +// vi.mock to intercept transitive openai imports — the built openai-base +// dist resolves `openai` independently and is unaffected by vi.mock here. +vi.mock('openai', () => { return { default: class { chat = { completions: { - create: (...args: Array) => mockCreate(...args), + create: vi.fn(), }, } }, @@ -47,18 +51,41 @@ function createAsyncIterable(chunks: Array): AsyncIterable { } } -// Helper to setup the mock SDK client for streaming responses +// Sets up a mock client on the most recently created adapter. Tests use the +// existing call order: `setupMockSdkClient(chunks)` first, then `const adapter +// = createGroqText(...)`. The wrapped factories below apply the pending +// mock to the returned adapter so it intercepts subsequent chatStream/ +// structuredOutput calls. +let pendingMockCreate: + | Mock<(...args: Array) => unknown> + | undefined + function setupMockSdkClient( streamChunks: Array>, nonStreamResponse?: Record, -) { - mockCreate = vi.fn().mockImplementation((params) => { +): Mock<(...args: Array) => unknown> { + pendingMockCreate = vi.fn().mockImplementation((params) => { if (params.stream) { return Promise.resolve(createAsyncIterable(streamChunks)) } return Promise.resolve(nonStreamResponse) }) + return pendingMockCreate +} + +function applyPendingMock(adapter: T): T { + if (pendingMockCreate) { + ;(adapter as any).client = { + chat: { completions: { create: pendingMockCreate } }, + } + pendingMockCreate = undefined + } + return adapter } +const createGroqText: typeof _realCreateGroqText = (model, apiKey, config) => + applyPendingMock(_realCreateGroqText(model, apiKey, config)) +const groqText: typeof _realGroqText = (model, config) => + applyPendingMock(_realGroqText(model, config)) const weatherTool: Tool = { name: 'lookup_weather', @@ -422,7 +449,7 @@ describe('Groq AG-UI event emission', () => { }, } - mockCreate = vi.fn().mockResolvedValue(errorIterable) + pendingMockCreate = vi.fn().mockResolvedValue(errorIterable) const adapter = createGroqText('llama-3.3-70b-versatile', 'test-api-key') const chunks: Array = [] diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index a8c82d4ec..6ead29809 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -44,7 +44,8 @@ ], "dependencies": { "@openrouter/sdk": "0.12.14", - "@tanstack/ai-utils": "workspace:*" + "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 29427171c..a43d60620 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -1,46 +1,33 @@ import { OpenRouter } from '@openrouter/sdk' -import { RequestAbortedError } from '@openrouter/sdk/models/errors' -import { convertSchemaToJsonSchema } from '@tanstack/ai' -import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' import { convertToolsToProviderFormat } from '../tools' -import { - getOpenRouterApiKeyFromEnv, - generateId as utilGenerateId, -} from '../utils' +import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' +import type { + ChatCompletion, + ChatCompletionChunk, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, +} from '@tanstack/openai-base' +import type { + ChatContentItems, + ChatMessages, + ChatRequest, + ChatStreamChoice, + ChatStreamChunk, +} from '@openrouter/sdk/models' import type { OPENROUTER_CHAT_MODELS, OpenRouterChatModelToolCapabilitiesByName, OpenRouterModelInputModalitiesByName, OpenRouterModelOptionsByName, } from '../model-meta' -import type { - StructuredOutputOptions, - StructuredOutputResult, -} from '@tanstack/ai/adapters' -import type { - ContentPart, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' +import type { ContentPart, ModelMessage, TextOptions } from '@tanstack/ai' import type { ExternalTextProviderOptions } from '../text/text-provider-options' import type { OpenRouterImageMetadata, OpenRouterMessageMetadataByModality, } from '../message-types' -import type { - ChatContentItems, - ChatMessages, - ChatRequest, - ChatStreamChoice, - ChatUsage, -} from '@openrouter/sdk/models' - -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk export interface OpenRouterConfig extends SDKOptions {} export type OpenRouterTextModels = (typeof OPENROUTER_CHAT_MODELS)[number] @@ -62,38 +49,37 @@ type ResolveToolCapabilities = ? NonNullable : readonly [] -// Internal buffer for accumulating streamed tool calls -interface ToolCallBuffer { - id: string - name: string - arguments: string - started: boolean // Track if TOOL_CALL_START has been emitted -} - -// AG-UI lifecycle state tracking -interface AGUIState { - runId: string - threadId: string - messageId: string - stepId: string | null - reasoningMessageId: string | null - hasClosedReasoning: boolean - hasEmittedRunStarted: boolean - hasEmittedTextMessageStart: boolean - hasEmittedTextMessageEnd: boolean - hasEmittedRunFinished: boolean - hasEmittedStepStarted: boolean - deferredUsage: - | { promptTokens: number; completionTokens: number; totalTokens: number } - | undefined - computedFinishReason: string | undefined -} - +/** + * OpenRouter Text (Chat) Adapter. + * + * Extends the OpenAI Chat Completions base so it shares the stream + * accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and + * lifecycle gates with the rest of the OpenAI-compatible providers. + * + * The wire format is identical to OpenAI's Chat Completions, but the + * `@openrouter/sdk` SDK exposes a different call shape — `client.chat.send + * ({ chatRequest })` with camelCase fields. We override the two SDK-call + * hooks (`callChatCompletion` / `callChatCompletionStream`) to bridge that, + * plus a small chunk-shape adapter on the way back, and `extractReasoning` + * to surface OpenRouter's reasoning deltas through the base's REASONING_* + * lifecycle. + * + * Behaviour preserved from the pre-migration implementation: + * - Provider routing surface (`provider`, `models`, `plugins`, `variant`, + * `transforms`) passes through `modelOptions`. + * - App attribution headers (`httpReferer`, `appTitle`) and base URL + * overrides flow through the SDK `SDKOptions` constructor. + * - `RequestAbortedError` from the SDK propagates up — the base's + * `chatStream` wraps unknown errors into a single RUN_ERROR event via + * `toRunErrorPayload`, so the abort lifecycle is unchanged. + * - Model variant suffixing (e.g. `:thinking`, `:free`) via + * `modelOptions.variant`. + */ export class OpenRouterTextAdapter< TModel extends OpenRouterTextModels, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends BaseTextAdapter< +> extends OpenAICompatibleChatCompletionsTextAdapter< TModel, ResolveProviderOptions, ResolveInputModalities, @@ -103,690 +89,335 @@ export class OpenRouterTextAdapter< readonly kind = 'text' as const readonly name = 'openrouter' as const - private client: OpenRouter + /** OpenRouter SDK client. The base's `this.client` (an OpenAI client) is + * unused because we override the SDK-call hooks below. */ + protected orClient: OpenRouter constructor(config: OpenRouterConfig, model: TModel) { - super({}, model) - this.client = new OpenRouter(config) + // The base needs an OpenAICompatibleClientConfig to construct an OpenAI + // client we never use. The OpenRouter SDK supports a Promise-returning + // apiKey getter; the OpenAI SDK's constructor here is a no-op for our + // purposes, so any string suffices. + const apiKey = + typeof config.apiKey === 'string' ? config.apiKey : 'unused' + super( + { apiKey, baseURL: 'https://openrouter.ai/api/v1' }, + model, + 'openrouter', + ) + this.orClient = new OpenRouter(config) } - async *chatStream( - options: TextOptions>, - ): AsyncIterable { - const timestamp = Date.now() - const toolCallBuffers = new Map() - let accumulatedReasoning = '' - let accumulatedContent = '' - let responseId: string | null = null - let currentModel = options.model - const { logger } = options - // AG-UI lifecycle tracking - const aguiState: AGUIState = { - runId: options.runId ?? this.generateId(), - threadId: options.threadId ?? this.generateId(), - messageId: this.generateId(), - stepId: null, - reasoningMessageId: null, - hasClosedReasoning: false, - hasEmittedRunStarted: false, - hasEmittedTextMessageStart: false, - hasEmittedTextMessageEnd: false, - hasEmittedRunFinished: false, - hasEmittedStepStarted: false, - deferredUsage: undefined, - computedFinishReason: undefined, - } - - try { - const requestParams = this.mapTextOptionsToSDK(options) - logger.request( - `activity=chat provider=openrouter model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, - { provider: 'openrouter', model: this.model }, - ) - const stream = await this.client.chat.send( - { chatRequest: { ...requestParams, stream: true } }, - { signal: options.request?.signal }, - ) - - for await (const chunk of stream) { - logger.provider(`provider=openrouter`, { chunk }) - if (chunk.id) responseId = chunk.id - if (chunk.model) currentModel = chunk.model - - // Emit RUN_STARTED on first chunk - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: currentModel || options.model, - timestamp, - }) - } - - if (chunk.error) { - // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: currentModel || options.model, - timestamp, - message: chunk.error.message || 'Unknown error', - code: String(chunk.error.code), - error: { - message: chunk.error.message || 'Unknown error', - code: String(chunk.error.code), - }, - }) - continue - } - - for (const choice of chunk.choices) { - yield* this.processChoice( - choice, - toolCallBuffers, - { - id: responseId || this.generateId(), - model: currentModel, - timestamp, - }, - { reasoning: accumulatedReasoning, content: accumulatedContent }, - (r, c) => { - accumulatedReasoning = r - accumulatedContent = c - }, - chunk.usage, - aguiState, - ) - } - } - - // Emit RUN_FINISHED after the stream ends so we capture usage from - // any chunk (some SDKs send usage on a separate trailing chunk). - if (aguiState.hasEmittedRunFinished && aguiState.computedFinishReason) { - yield asChunk({ - type: 'RUN_FINISHED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: currentModel || options.model, - timestamp, - usage: aguiState.deferredUsage, - finishReason: aguiState.computedFinishReason, - }) - } - } catch (error) { - logger.errors('openrouter.chatStream fatal', { - error, - source: 'openrouter.chatStream', - }) - // Emit RUN_STARTED if not yet emitted (error on first call) - if (!aguiState.hasEmittedRunStarted) { - aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', - runId: aguiState.runId, - threadId: aguiState.threadId, - model: options.model, - timestamp, - }) - } - - if (error instanceof RequestAbortedError) { - // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - message: 'Request aborted', - code: 'aborted', - error: { - message: 'Request aborted', - code: 'aborted', - }, - }) - return - } - - // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - message: (error as Error).message || 'Unknown error', - error: { - message: (error as Error).message || 'Unknown error', - }, - }) - } + // ──────────────────────────────────────────────────────────────────────── + // SDK call hooks — adapt OpenAI snake_case params to OpenRouter camelCase + // and adapt the returned shape back to the OpenAI structural contract the + // base's processStreamChunks reads. + // ──────────────────────────────────────────────────────────────────────── + + protected override async callChatCompletionStream( + params: ChatCompletionCreateParamsStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise> { + const chatRequest = toOpenRouterRequest(params, true) + const stream = (await this.orClient.chat.send( + { chatRequest: { ...chatRequest, stream: true } }, + { signal: requestOptions.signal ?? undefined }, + )) as AsyncIterable + return adaptOpenRouterStreamChunks(stream) } - async structuredOutput( - options: StructuredOutputOptions>, - ): Promise> { - const { chatOptions, outputSchema } = options - const { logger } = chatOptions - - const requestParams = this.mapTextOptionsToSDK(chatOptions) + protected override async callChatCompletion( + params: ChatCompletionCreateParamsNonStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise { + const chatRequest = toOpenRouterRequest(params, false) + const response = await this.orClient.chat.send( + { chatRequest: { ...chatRequest, stream: false } }, + { signal: requestOptions.signal ?? undefined }, + ) + // The base only reads `response.choices[0]?.message.content`. The SDK's + // non-streaming response carries that under the same path. + return response as unknown as ChatCompletion + } - // OpenRouter uses OpenAI-style strict JSON schema. Upstream providers - // (OpenAI especially) reject schemas that aren't strict-compatible — all - // properties required, additionalProperties: false, optional fields - // nullable. Apply that transformation before sending. - const strictSchema = convertSchemaToJsonSchema(outputSchema, { - forStructuredOutput: true, - }) + // ──────────────────────────────────────────────────────────────────────── + // Reasoning hook — surface OpenRouter's `delta.reasoningDetails` through + // the base's REASONING_* lifecycle. + // ──────────────────────────────────────────────────────────────────────── - try { - logger.request( - `activity=chat provider=openrouter model=${this.model} messages=${chatOptions.messages.length} tools=${chatOptions.tools?.length ?? 0} stream=false`, - { provider: 'openrouter', model: this.model }, - ) - const result = await this.client.chat.send( - { - chatRequest: { - ...requestParams, - stream: false, - responseFormat: { - type: 'json_schema', - jsonSchema: { - name: 'structured_output', - schema: strictSchema, - strict: true, - }, - }, - }, - }, - { signal: chatOptions.request?.signal }, - ) - const content = result.choices[0]?.message.content - const rawText = typeof content === 'string' ? content : '' - if (!rawText) { - throw new Error('Structured output response contained no content') - } - const parsed = JSON.parse(rawText) - return { data: parsed, rawText } - } catch (error: unknown) { - logger.errors('openrouter.structuredOutput fatal', { - error, - source: 'openrouter.structuredOutput', - }) - if (error instanceof RequestAbortedError) { - throw new Error('Structured output generation aborted') - } - if (error instanceof SyntaxError) { - throw new Error( - `Failed to parse structured output as JSON: ${error.message}`, - ) - } - const err = error as Error - throw new Error( - `Structured output generation failed: ${err.message || 'Unknown error occurred'}`, - ) - } + /** OpenRouter historically returns nulls in structured-output results as + * literal nulls rather than absent fields; preserve that behaviour. */ + protected override transformStructuredOutput(parsed: unknown): unknown { + return parsed } - protected override generateId(): string { - return utilGenerateId(this.name) + protected override extractReasoning( + chunk: ChatCompletionChunk, + ): { text: string } | undefined { + // The chunk-adapter stashes the raw reasoning deltas on a non-standard + // field so we don't need to round-trip them through camelCase ↔ + // snake_case on the OpenAI Chat Completions chunk schema. + const reasoning = (chunk as unknown as { _reasoningText?: string }) + ._reasoningText + return reasoning ? { text: reasoning } : undefined } - private *processChoice( - choice: ChatStreamChoice, - toolCallBuffers: Map, - meta: { id: string; model: string; timestamp: number }, - accumulated: { reasoning: string; content: string }, - updateAccumulated: (reasoning: string, content: string) => void, - usage: ChatUsage | undefined, - aguiState: AGUIState, - ): Iterable { - const delta = choice.delta - const finishReason = choice.finishReason - - if (delta.reasoningDetails) { - for (const detail of delta.reasoningDetails) { - if (detail.type === 'reasoning.text') { - const text = detail.text || '' - - // Emit STEP_STARTED and REASONING events on first reasoning content - if (!aguiState.hasEmittedStepStarted) { - aguiState.hasEmittedStepStarted = true - aguiState.stepId = this.generateId() - aguiState.reasoningMessageId = this.generateId() - - // Spec REASONING events - yield asChunk({ - type: 'REASONING_START', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', - messageId: aguiState.reasoningMessageId, - role: 'reasoning' as const, - model: meta.model, - timestamp: meta.timestamp, - }) - - // Legacy STEP events (kept during transition) - yield asChunk({ - type: 'STEP_STARTED', - stepName: aguiState.stepId, - stepId: aguiState.stepId, - model: meta.model, - timestamp: meta.timestamp, - stepType: 'thinking', - }) - } - - accumulated.reasoning += text - updateAccumulated(accumulated.reasoning, accumulated.content) - - // Spec REASONING content event - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', - messageId: aguiState.reasoningMessageId!, - delta: text, - model: meta.model, - timestamp: meta.timestamp, - }) - continue - } - if (detail.type === 'reasoning.summary') { - const text = detail.summary || '' - - // Emit STEP_STARTED and REASONING events on first reasoning content - if (!aguiState.hasEmittedStepStarted) { - aguiState.hasEmittedStepStarted = true - aguiState.stepId = this.generateId() - aguiState.reasoningMessageId = this.generateId() - - // Spec REASONING events - yield asChunk({ - type: 'REASONING_START', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', - messageId: aguiState.reasoningMessageId, - role: 'reasoning' as const, - model: meta.model, - timestamp: meta.timestamp, - }) - - // Legacy STEP events (kept during transition) - yield asChunk({ - type: 'STEP_STARTED', - stepName: aguiState.stepId, - stepId: aguiState.stepId, - model: meta.model, - timestamp: meta.timestamp, - stepType: 'thinking', - }) - } + // ──────────────────────────────────────────────────────────────────────── + // Message conversion — OpenRouter uses camelCase (`toolCallId`, + // `toolCalls`, `imageUrl`, `inputAudio`, `videoUrl`). We override + // `convertMessage` and `convertContentPart` so the base's + // `mapOptionsToRequest` flows through to the SDK without a second pass. + // ──────────────────────────────────────────────────────────────────────── - accumulated.reasoning += text - updateAccumulated(accumulated.reasoning, accumulated.content) - - // Spec REASONING content event - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', - messageId: aguiState.reasoningMessageId!, - delta: text, - model: meta.model, - timestamp: meta.timestamp, - }) - continue - } - } + protected override convertMessage(message: ModelMessage): any { + if (message.role === 'tool') { + return { + role: 'tool', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + toolCallId: message.toolCallId || '', + } satisfies ChatMessages } - if (delta.content) { - // Close reasoning before text starts - if (aguiState.reasoningMessageId && !aguiState.hasClosedReasoning) { - aguiState.hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - yield asChunk({ - type: 'REASONING_END', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - - // Legacy: single STEP_FINISHED to close the STEP_STARTED - if (aguiState.stepId) { - yield asChunk({ - type: 'STEP_FINISHED', - stepName: aguiState.stepId, - stepId: aguiState.stepId, - model: meta.model, - timestamp: meta.timestamp, - content: accumulated.reasoning, - }) - } - } - - // Emit TEXT_MESSAGE_START on first text content - if (!aguiState.hasEmittedTextMessageStart) { - aguiState.hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', - messageId: aguiState.messageId, - model: meta.model, - timestamp: meta.timestamp, - role: 'assistant', - }) - } - - accumulated.content += delta.content - updateAccumulated(accumulated.reasoning, accumulated.content) - - // Emit AG-UI TEXT_MESSAGE_CONTENT - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', - messageId: aguiState.messageId, - model: meta.model, - timestamp: meta.timestamp, - delta: delta.content, - content: accumulated.content, - }) + if (message.role === 'assistant') { + return { + role: 'assistant', + content: + typeof message.content === 'string' + ? message.content + : message.content + ? JSON.stringify(message.content) + : undefined, + toolCalls: message.toolCalls, + } satisfies ChatMessages } - if (delta.toolCalls) { - for (const tc of delta.toolCalls) { - const existing = toolCallBuffers.get(tc.index) - if (!existing) { - if (!tc.id) { - continue - } - toolCallBuffers.set(tc.index, { - id: tc.id, - name: tc.function?.name ?? '', - arguments: tc.function?.arguments ?? '', - started: false, - }) - } else { - if (tc.function?.name) existing.name = tc.function.name - if (tc.function?.arguments) - existing.arguments += tc.function.arguments - } - - // Get the current buffer (existing or newly created) - const buffer = toolCallBuffers.get(tc.index)! - - // Emit TOOL_CALL_START when we have id and name - if (buffer.id && buffer.name && !buffer.started) { - buffer.started = true - yield asChunk({ - type: 'TOOL_CALL_START', - toolCallId: buffer.id, - toolCallName: buffer.name, - toolName: buffer.name, - model: meta.model, - timestamp: meta.timestamp, - index: tc.index, - }) - } - - // Emit TOOL_CALL_ARGS for argument deltas - if (tc.function?.arguments && buffer.started) { - yield asChunk({ - type: 'TOOL_CALL_ARGS', - toolCallId: buffer.id, - model: meta.model, - timestamp: meta.timestamp, - delta: tc.function.arguments, - }) - } - } + // user + const contentParts = this.normalizeContent(message.content) + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } satisfies ChatMessages } - if (delta.refusal) { - // Emit AG-UI RUN_ERROR for refusal - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: meta.model, - timestamp: meta.timestamp, - message: delta.refusal, - code: 'refusal', - error: { message: delta.refusal, code: 'refusal' }, - }) + const parts: Array = [] + for (const part of contentParts) { + const converted = this.convertContentPartToOpenRouter(part) + if (converted) parts.push(converted) } + return { + role: 'user', + content: parts.length ? parts : [{ type: 'text', text: '' }], + } satisfies ChatMessages + } - if (finishReason) { - // Capture usage from whichever chunk provides it (may arrive on a - // later duplicate finishReason chunk from the SDK). - if (usage) { - aguiState.deferredUsage = { - promptTokens: usage.promptTokens || 0, - completionTokens: usage.completionTokens || 0, - totalTokens: usage.totalTokens || 0, + /** OpenRouter content-part converter (camelCase imageUrl/inputAudio/videoUrl). */ + private convertContentPartToOpenRouter( + part: ContentPart, + ): ChatContentItems | null { + switch (part.type) { + case 'text': + return { type: 'text', text: part.content } + case 'image': { + const meta = part.metadata as OpenRouterImageMetadata | undefined + const value = part.source.value + const url = + part.source.type === 'data' && !value.startsWith('data:') + ? `data:${part.source.mimeType};base64,${value}` + : value + return { + type: 'image_url', + imageUrl: { url, detail: meta?.detail || 'auto' }, } } - - // Guard: only emit finish events once. OpenAI-compatible APIs often - // send two chunks with finishReason (one for the finish, one carrying - // usage data). Without this guard TEXT_MESSAGE_END and RUN_FINISHED - // would be emitted twice. - if (!aguiState.hasEmittedRunFinished) { - aguiState.hasEmittedRunFinished = true - - // Emit all completed tool calls when finish reason indicates tool usage - if (finishReason === 'tool_calls' || toolCallBuffers.size > 0) { - for (const [, tc] of toolCallBuffers.entries()) { - // Parse arguments for TOOL_CALL_END - let parsedInput: unknown = {} - try { - parsedInput = tc.arguments ? JSON.parse(tc.arguments) : {} - } catch { - parsedInput = {} - } - - // Emit AG-UI TOOL_CALL_END - yield asChunk({ - type: 'TOOL_CALL_END', - toolCallId: tc.id, - toolCallName: tc.name, - toolName: tc.name, - model: meta.model, - timestamp: meta.timestamp, - input: parsedInput, - }) - } - - toolCallBuffers.clear() - } - - aguiState.computedFinishReason = - finishReason === 'tool_calls' - ? 'tool_calls' - : finishReason === 'length' - ? 'length' - : 'stop' - - // Close reasoning events if still open - if (aguiState.reasoningMessageId && !aguiState.hasClosedReasoning) { - aguiState.hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - yield asChunk({ - type: 'REASONING_END', - messageId: aguiState.reasoningMessageId, - model: meta.model, - timestamp: meta.timestamp, - }) - - // Legacy: single STEP_FINISHED to close the STEP_STARTED - if (aguiState.stepId) { - yield asChunk({ - type: 'STEP_FINISHED', - stepName: aguiState.stepId, - stepId: aguiState.stepId, - model: meta.model, - timestamp: meta.timestamp, - content: accumulated.reasoning, - }) - } - } - - // Emit TEXT_MESSAGE_END if we had text content - if (aguiState.hasEmittedTextMessageStart) { - aguiState.hasEmittedTextMessageEnd = true - yield asChunk({ - type: 'TEXT_MESSAGE_END', - messageId: aguiState.messageId, - model: meta.model, - timestamp: meta.timestamp, - }) + case 'audio': + return { + type: 'input_audio', + inputAudio: { data: part.source.value, format: 'mp3' }, } - } + case 'video': + return { type: 'video_url', videoUrl: { url: part.source.value } } + case 'document': + // SDK doesn't have a document_url type — surface as text so the + // model at least sees the URL rather than dropping the part. + return { type: 'text', text: `[Document: ${part.source.value}]` } + default: + return null } } - private mapTextOptionsToSDK( - options: TextOptions>, - ): ChatRequest { - const modelOptions = options.modelOptions - - const messages = this.convertMessages(options.messages) - + /** Override request mapping to apply OpenRouter's `:variant` model suffix + * and route tools through OpenRouter's converter (function tools + + * branded web_search tool). The base writes snake_case fields here; the + * SDK-call hooks convert them just before sending. */ + protected override mapOptionsToRequest( + options: TextOptions, + ): ChatCompletionCreateParamsStreaming { + const modelOptions = options.modelOptions as + | (Record & { variant?: string }) + | undefined + const variantSuffix = modelOptions?.variant ? `:${modelOptions.variant}` : '' + + const messages: Array = [] if (options.systemPrompts?.length) { - messages.unshift({ + messages.push({ role: 'system', content: options.systemPrompts.join('\n'), }) } + for (const m of options.messages) { + messages.push(this.convertMessage(m)) + } - // Spread modelOptions first, then conditionally override with explicit - // top-level options so undefined values don't clobber modelOptions. Fixes - // #310, where the reverse order silently dropped user-set values. - const request: ChatRequest = { - ...modelOptions, - model: - options.model + - (modelOptions?.variant ? `:${modelOptions.variant}` : ''), + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + // Keep modelOptions first so explicit top-level options (set below) win + // when defined but `undefined` doesn't clobber values the caller set in + // modelOptions. Fixes the same merge-order regression openai/grok handle. + return { + ...(modelOptions as Record), + model: options.model + variantSuffix, messages, ...(options.temperature !== undefined && { temperature: options.temperature, }), ...(options.maxTokens !== undefined && { - maxCompletionTokens: options.maxTokens, + max_tokens: options.maxTokens, }), - ...(options.topP !== undefined && { topP: options.topP }), - tools: options.tools - ? convertToolsToProviderFormat(options.tools) - : undefined, - } + ...(options.topP !== undefined && { top_p: options.topP }), + ...(tools && tools.length > 0 && { tools }), + stream: true, + } as ChatCompletionCreateParamsStreaming + } +} - return request +// ────────────────────────────────────────────────────────────────────────── +// Helpers: convert OpenAI Chat Completions params ↔ OpenRouter ChatRequest +// ────────────────────────────────────────────────────────────────────────── + +/** + * Convert the base's snake_case params shape to the OpenRouter SDK's + * camelCase ChatRequest. Only the fields the base actually writes need + * mapping — modelOptions already flows through in OpenRouter (camelCase) + * shape because the public option types derive from `ChatRequest`. + */ +function toOpenRouterRequest( + params: + | ChatCompletionCreateParamsStreaming + | ChatCompletionCreateParamsNonStreaming, + isStreaming: boolean, +): ChatRequest { + const p = params as Record + const out: Record = { ...p } + + // The base injects these snake_case fields. Rewrite to camelCase. + if ('max_tokens' in p) { + out.maxCompletionTokens = p.max_tokens + delete out.max_tokens + } + if ('top_p' in p) { + out.topP = p.top_p + delete out.top_p + } + if ('stream_options' in p) { + out.streamOptions = p.stream_options + delete out.stream_options + } + if ('response_format' in p && p.response_format) { + const rf = p.response_format + out.responseFormat = + rf.type === 'json_schema' && rf.json_schema + ? { + type: 'json_schema', + jsonSchema: rf.json_schema, + } + : rf + delete out.response_format } - private convertMessages(messages: Array): Array { - return messages.map((msg) => { - if (msg.role === 'tool') { - return { - role: 'tool' as const, - content: - typeof msg.content === 'string' - ? msg.content - : JSON.stringify(msg.content), - toolCallId: msg.toolCallId || '', - } - } + // Streaming flag is set per-call by the SDK call hook, not here. + delete out.stream + if (!isStreaming) delete out.streamOptions - if (msg.role === 'user') { - const content = this.convertContentParts(msg.content) - return { - role: 'user' as const, - content: - content.length === 1 && content[0]?.type === 'text' - ? (content[0] as { type: 'text'; text: string }).text - : content, + return out as ChatRequest +} + +/** + * Adapt OpenRouter's stream chunks (camelCase, with `reasoningDetails`) into + * the OpenAI Chat Completions chunk shape the base's `processStreamChunks` + * reads. Reasoning text is stashed on `_reasoningText` for the + * `extractReasoning` override to consume. + */ +async function* adaptOpenRouterStreamChunks( + stream: AsyncIterable, +): AsyncIterable { + for await (const chunk of stream) { + // Flatten any reasoning deltas in the chunk into a single string. + let reasoningText = '' + const adaptedChoices = chunk.choices.map((c: ChatStreamChoice) => { + const delta = c.delta as Record + if (Array.isArray(delta.reasoningDetails)) { + for (const d of delta.reasoningDetails) { + if (d?.type === 'reasoning.text' && typeof d.text === 'string') { + reasoningText += d.text + } else if ( + d?.type === 'reasoning.summary' && + typeof d.summary === 'string' + ) { + reasoningText += d.summary + } } } - - // assistant role return { - role: 'assistant' as const, - content: - typeof msg.content === 'string' - ? msg.content - : msg.content - ? JSON.stringify(msg.content) - : undefined, - toolCalls: msg.toolCalls, + index: (c as { index?: number }).index ?? 0, + delta: { + content: delta.content, + tool_calls: delta.toolCalls?.map((tc: any) => ({ + index: tc.index, + id: tc.id, + type: tc.type ?? 'function', + function: tc.function, + })), + refusal: delta.refusal, + role: delta.role, + }, + finish_reason: c.finishReason, } }) - } - private convertContentParts( - content: string | null | Array, - ): Array { - if (!content) return [{ type: 'text', text: '' }] - if (typeof content === 'string') return [{ type: 'text', text: content }] + const usage = (chunk as any).usage + const adapted: any = { + id: chunk.id || '', + object: 'chat.completion.chunk', + created: 0, + model: chunk.model || '', + choices: adaptedChoices, + ...(usage && { + usage: { + prompt_tokens: usage.promptTokens || 0, + completion_tokens: usage.completionTokens || 0, + total_tokens: usage.totalTokens || 0, + }, + }), + // Stash reasoning text for the extractReasoning hook. The base only + // reads documented Chat Completions fields, so an additional field is + // safe to pass alongside. + _reasoningText: reasoningText || undefined, + } - const parts: Array = [] - for (const part of content) { - switch (part.type) { - case 'text': - parts.push({ type: 'text', text: part.content }) - break - case 'image': { - const meta = part.metadata as OpenRouterImageMetadata | undefined - // For base64 data, construct a data URI using the mimeType from source - const imageValue = part.source.value - const imageUrl = - part.source.type === 'data' && !imageValue.startsWith('data:') - ? `data:${part.source.mimeType};base64,${imageValue}` - : imageValue - parts.push({ - type: 'image_url', - imageUrl: { - url: imageUrl, - detail: meta?.detail || 'auto', - }, - }) - break - } - case 'audio': - parts.push({ - type: 'input_audio', - inputAudio: { - data: part.source.value, - format: 'mp3', - }, - }) - break - case 'video': - parts.push({ - type: 'video_url', - videoUrl: { url: part.source.value }, - }) - break - case 'document': - // SDK doesn't have document_url type, pass as custom - parts.push({ - type: 'text', - text: `[Document: ${part.source.value}]`, - }) - break - } + // Surface upstream errors so the base can route them to RUN_ERROR. + if ((chunk as any).error) { + throw Object.assign(new Error((chunk as any).error.message || 'OpenRouter stream error'), { + code: (chunk as any).error.code, + }) } - return parts.length ? parts : [{ type: 'text', text: '' }] + + yield adapted as ChatCompletionChunk } } diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index 206d16525..9ca4978f2 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1151,6 +1151,9 @@ describe('OpenRouter structured output', () => { const adapter = createAdapter() + // The shared base re-throws the underlying error rather than wrapping it + // with a "Structured output generation failed:" prefix — the prefix only + // existed in the pre-migration OpenRouter adapter. await expect( adapter.structuredOutput({ chatOptions: { @@ -1160,7 +1163,7 @@ describe('OpenRouter structured output', () => { }, outputSchema: { type: 'object' }, }), - ).rejects.toThrow('Structured output generation failed: Server error') + ).rejects.toThrow('Server error') }) it('handles empty content gracefully', async () => { @@ -1177,6 +1180,8 @@ describe('OpenRouter structured output', () => { setupMockSdkClient([], nonStreamResponse) const adapter = createAdapter() + // The shared base surfaces the JSON parse failure rather than a separate + // "no content" error — empty content fails JSON.parse() in the base. await expect( adapter.structuredOutput({ chatOptions: { @@ -1186,7 +1191,7 @@ describe('OpenRouter structured output', () => { }, outputSchema: { type: 'object' }, }), - ).rejects.toThrow('Structured output response contained no content') + ).rejects.toThrow('Failed to parse structured output as JSON') }) }) diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index ac014f619..3da9bfd28 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -86,7 +86,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, ) - const stream = await this.client.chat.completions.create( + const stream = await this.callChatCompletionStream( { ...requestParams, stream: true, @@ -165,7 +165,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, { provider: this.name, model: this.model }, ) - const response = await this.client.chat.completions.create( + const response = await this.callChatCompletion( { ...cleanParams, stream: false, @@ -195,8 +195,10 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } // Transform null values to undefined to match original Zod schema expectations - // Provider returns null for optional fields we made nullable in the schema - const transformed = transformNullsToUndefined(parsed) + // Provider returns null for optional fields we made nullable in the schema. + // Subclasses can override `transformStructuredOutput` to skip this — e.g. + // OpenRouter historically passed nulls through unchanged. + const transformed = this.transformStructuredOutput(parsed) return { data: transformed, @@ -224,6 +226,67 @@ export class OpenAICompatibleChatCompletionsTextAdapter< return makeStructuredOutputCompatible(schema, originalRequired) } + /** + * Performs the non-streaming Chat Completions network call. The default + * uses the OpenAI SDK (`client.chat.completions.create`), which covers any + * provider whose endpoint accepts the OpenAI SDK verbatim (e.g. xAI/Grok, + * Groq with a `baseURL` override, DeepSeek, Together, Fireworks). + * + * Override in subclasses whose SDK has a different call shape — for + * example `@openrouter/sdk` exposes `client.chat.send({ chatRequest })` + * with camelCase fields. The override is responsible for converting the + * params shape on the way in and returning an object structurally + * compatible with `ChatCompletion` (the base only reads documented fields + * like `response.choices[0].message.content`). + */ + protected async callChatCompletion( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + requestOptions: ReturnType, + ): Promise { + return this.client.chat.completions.create(params, requestOptions) + } + + /** + * Performs the streaming Chat Completions network call. Same pattern as + * {@link callChatCompletion} — default uses the OpenAI SDK; override for + * providers whose SDK exposes a different streaming entry point. Returns + * an `AsyncIterable` because the base's + * {@link processStreamChunks} only needs structural iteration over chunks. + */ + protected async callChatCompletionStream( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, + requestOptions: ReturnType, + ): Promise< + AsyncIterable + > { + return this.client.chat.completions.create(params, requestOptions) + } + + /** + * Extract reasoning content from a stream chunk. Default returns + * `undefined` because OpenAI Chat Completions doesn't carry reasoning in + * the chunk format. Providers that DO carry reasoning on this wire (e.g. + * OpenRouter's `delta.reasoningDetails`) override this to yield reasoning + * text — the base then folds it into a single REASONING_* lifecycle + * without each subclass duplicating `processStreamChunks`. + */ + protected extractReasoning( + _chunk: OpenAI_SDK.Chat.Completions.ChatCompletionChunk, + ): { text: string } | undefined { + return undefined + } + + /** + * Final shaping pass applied to parsed structured-output JSON before it is + * returned to the caller. Default converts `null` values to `undefined` so + * the result aligns with the original Zod schema's optional-field + * semantics. Subclasses with different conventions (OpenRouter historically + * preserves nulls) can override. + */ + protected transformStructuredOutput(parsed: unknown): unknown { + return transformNullsToUndefined(parsed) + } + /** * Processes streamed chunks from the Chat Completions API and yields AG-UI events. * Override this in subclasses to handle provider-specific stream behavior. @@ -265,6 +328,17 @@ export class OpenAICompatibleChatCompletionsTextAdapter< started: boolean // Track if TOOL_CALL_START has been emitted } >() + + // Reasoning lifecycle (driven by extractReasoning() hook — see method + // docs). The base wire format (OpenAI Chat Completions) has no reasoning, + // so these stay unused for openai/grok/groq. OpenRouter etc. opt in. + let reasoningMessageId: string | undefined + let hasClosedReasoning = false + // Legacy STEP_STARTED/STEP_FINISHED pair emitted alongside REASONING_* + // for back-compat with consumers (UI, devtools) that haven't migrated + // to the spec REASONING_* events yet. + let stepId: string | undefined + let accumulatedReasoning = '' // Track whether ANY tool call lifecycle was actually completed across the // entire stream. Lets us downgrade a `tool_calls` finish_reason to `stop` // when the upstream signalled tool calls but never produced a complete @@ -306,6 +380,49 @@ export class OpenAICompatibleChatCompletionsTextAdapter< }) } + // Reasoning content (extractReasoning() hook). Run before reading + // choice/delta so reasoning-only chunks (no `choices`) still drive + // the REASONING_* lifecycle on providers that send reasoning out of + // band. The base default returns undefined. + const reasoning = this.extractReasoning(chunk) + if (reasoning && reasoning.text) { + if (!reasoningMessageId) { + reasoningMessageId = generateId(this.name) + stepId = generateId(this.name) + yield asChunk({ + type: 'REASONING_START', + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp, + }) + yield asChunk({ + type: 'REASONING_MESSAGE_START', + messageId: reasoningMessageId, + role: 'reasoning' as const, + model: chunk.model || options.model, + timestamp, + }) + // Legacy STEP_STARTED (single emission, paired with the + // STEP_FINISHED below when reasoning closes). + yield asChunk({ + type: 'STEP_STARTED', + stepName: stepId, + stepId, + model: chunk.model || options.model, + timestamp, + stepType: 'thinking', + }) + } + accumulatedReasoning += reasoning.text + yield asChunk({ + type: 'REASONING_MESSAGE_CONTENT', + messageId: reasoningMessageId, + delta: reasoning.text, + model: chunk.model || options.model, + timestamp, + }) + } + const choice = chunk.choices[0] if (!choice) continue @@ -316,6 +433,34 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // Handle content delta if (deltaContent) { + // Close reasoning before text starts so consumers see a clean + // REASONING_END before any TEXT_MESSAGE_START. + if (reasoningMessageId && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp, + }) + if (stepId) { + yield asChunk({ + type: 'STEP_FINISHED', + stepName: stepId, + stepId, + model: chunk.model || options.model, + timestamp, + content: accumulatedReasoning, + }) + } + } + // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true @@ -526,6 +671,34 @@ export class OpenAICompatibleChatCompletionsTextAdapter< }) } + // Close any reasoning lifecycle that text never closed (no text + // content arrived, or the stream cut off before text started). + if (reasoningMessageId && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: lastModel || options.model, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: lastModel || options.model, + timestamp, + }) + if (stepId) { + yield asChunk({ + type: 'STEP_FINISHED', + stepName: stepId, + stepId, + model: lastModel || options.model, + timestamp, + content: accumulatedReasoning, + }) + } + } + // Map upstream finish_reason to AG-UI's narrower vocabulary while // preserving the upstream value when it falls outside the AG-UI set. // Collapsing length / content_filter to 'stop' would hide why the diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts index ab15140ea..df6491e18 100644 --- a/packages/typescript/openai-base/src/index.ts +++ b/packages/typescript/openai-base/src/index.ts @@ -3,6 +3,15 @@ export { createOpenAICompatibleClient } from './utils/client' export type { OpenAICompatibleClientConfig } from './types/config' export * from './tools/index' export { OpenAICompatibleChatCompletionsTextAdapter } from './adapters/chat-completions-text' +// Re-export the OpenAI SDK types subclasses need when overriding the +// `callChatCompletion*` / `processStreamChunks` hooks, so they don't need +// to declare `openai` as a direct dependency. +export type { + ChatCompletion, + ChatCompletionChunk, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, +} from 'openai/resources/chat/completions' export { convertFunctionToolToChatCompletionsFormat, convertToolsToChatCompletionsFormat, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index cc34515d4..48c977659 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1235,9 +1235,6 @@ importers: '@tanstack/openai-base': specifier: workspace:* version: link:../openai-base - groq-sdk: - specifier: ^0.37.0 - version: 0.37.0 zod: specifier: ^4.0.0 version: 4.3.6 @@ -1346,6 +1343,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base devDependencies: '@tanstack/ai': specifier: workspace:* @@ -6511,15 +6511,9 @@ packages: '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} - '@types/node-fetch@2.6.13': - resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} - '@types/node@12.20.55': resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} - '@types/node@18.19.130': - resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} - '@types/node@20.19.26': resolution: {integrity: sha512-0l6cjgF0XnihUpndDhk+nyD3exio3iKaYROSgvh/qSevPXax3L8p5DBRFjbvalnwatGgHEQn2R88y2fA3g4irg==} @@ -6945,10 +6939,6 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} - agentkeepalive@4.6.0: - resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} - engines: {node: '>= 8.0.0'} - ajv-draft-04@1.0.0: resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==} peerDependencies: @@ -8306,9 +8296,6 @@ packages: resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} engines: {node: '>=14'} - form-data-encoder@1.7.2: - resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} - form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -8318,10 +8305,6 @@ packages: engines: {node: '>=18.3.0'} hasBin: true - formdata-node@4.4.1: - resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} - engines: {node: '>= 12.20'} - formdata-polyfill@4.0.10: resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} engines: {node: '>=12.20.0'} @@ -8503,9 +8486,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - groq-sdk@0.37.0: - resolution: {integrity: sha512-lT72pcT8b/X5XrzdKf+rWVzUGW1OQSKESmL8fFN5cTbsf02gq6oFam4SVeNtzELt9cYE2Pt3pdGgSImuTbHFDg==} - gtoken@8.0.0: resolution: {integrity: sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==} engines: {node: '>=18'} @@ -8684,9 +8664,6 @@ packages: resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} engines: {node: '>=16.17.0'} - humanize-ms@1.2.1: - resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} - iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -11305,9 +11282,6 @@ packages: unctx@2.5.0: resolution: {integrity: sha512-p+Rz9x0R7X+CYDkT+Xg8/GhpcShTlU8n+cf9OtOEf7zEQsNcCZO1dPKNRDqvUTaq+P32PMMkxWHwfrxkqfqAYg==} - undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - undici-types@5.28.4: resolution: {integrity: sha512-3OeMF5Lyowe8VW0skf5qaIE7Or3yS9LS7fvMUI0gg4YxpIBVg0L8BxCmROw2CcYhSkpR68Epz7CGc8MPj94Uww==} @@ -11979,10 +11953,6 @@ packages: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} - web-streams-polyfill@4.0.0-beta.3: - resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} - engines: {node: '>= 14'} - web-vitals@5.1.0: resolution: {integrity: sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==} @@ -17022,17 +16992,8 @@ snapshots: '@types/ms@2.1.0': {} - '@types/node-fetch@2.6.13': - dependencies: - '@types/node': 24.10.3 - form-data: 4.0.5 - '@types/node@12.20.55': {} - '@types/node@18.19.130': - dependencies: - undici-types: 5.26.5 - '@types/node@20.19.26': dependencies: undici-types: 6.21.0 @@ -17606,10 +17567,6 @@ snapshots: agent-base@7.1.4: {} - agentkeepalive@4.6.0: - dependencies: - humanize-ms: 1.2.1 - ajv-draft-04@1.0.0(ajv@8.13.0): optionalDependencies: ajv: 8.13.0 @@ -19152,8 +19109,6 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 - form-data-encoder@1.7.2: {} - form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -19166,11 +19121,6 @@ snapshots: dependencies: fd-package-json: 2.0.0 - formdata-node@4.4.1: - dependencies: - node-domexception: 1.0.0 - web-streams-polyfill: 4.0.0-beta.3 - formdata-polyfill@4.0.10: dependencies: fetch-blob: 3.2.0 @@ -19372,18 +19322,6 @@ snapshots: graceful-fs@4.2.11: {} - groq-sdk@0.37.0: - dependencies: - '@types/node': 18.19.130 - '@types/node-fetch': 2.6.13 - abort-controller: 3.0.0 - agentkeepalive: 4.6.0 - form-data-encoder: 1.7.2 - formdata-node: 4.4.1 - node-fetch: 2.7.0 - transitivePeerDependencies: - - encoding - gtoken@8.0.0: dependencies: gaxios: 7.1.3 @@ -19645,10 +19583,6 @@ snapshots: human-signals@5.0.0: {} - humanize-ms@1.2.1: - dependencies: - ms: 2.1.3 - iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -23048,8 +22982,6 @@ snapshots: magic-string: 0.30.21 unplugin: 2.3.11 - undici-types@5.26.5: {} - undici-types@5.28.4: {} undici-types@6.21.0: {} @@ -23715,8 +23647,6 @@ snapshots: web-streams-polyfill@3.3.3: {} - web-streams-polyfill@4.0.0-beta.3: {} - web-vitals@5.1.0: {} webdriver-bidi-protocol@0.4.1: {} From 1c8e1f48d7edf45f6333eba8e0e580d060cde2d0 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Mon, 11 May 2026 12:26:06 +0000 Subject: [PATCH 02/49] ci: apply automated fixes --- .../migrate-groq-openrouter-to-openai-base.md | 2 +- .../ai-groq/tests/groq-adapter.test.ts | 4 +--- .../ai-openrouter/src/adapters/text.ts | 16 ++++++++++------ .../src/adapters/chat-completions-text.ts | 4 +--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md index ff5012565..0cbd390e4 100644 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -6,7 +6,7 @@ Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. -`@tanstack/openai-base` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING_* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. +`@tanstack/openai-base` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. `@tanstack/ai-groq` drops the `groq-sdk` dependency in favour of the OpenAI SDK pointed at `https://api.groq.com/openai/v1` (the same pattern as `ai-grok` against xAI). The Groq-specific quirk where streaming usage arrives under `chunk.x_groq.usage` is preserved via a small `processStreamChunks` wrapper that promotes it to the standard `chunk.usage` slot. diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index 2f34fe368..def98d8da 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -56,9 +56,7 @@ function createAsyncIterable(chunks: Array): AsyncIterable { // = createGroqText(...)`. The wrapped factories below apply the pending // mock to the returned adapter so it intercepts subsequent chatStream/ // structuredOutput calls. -let pendingMockCreate: - | Mock<(...args: Array) => unknown> - | undefined +let pendingMockCreate: Mock<(...args: Array) => unknown> | undefined function setupMockSdkClient( streamChunks: Array>, diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index a43d60620..bb34aeb00 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -98,8 +98,7 @@ export class OpenRouterTextAdapter< // client we never use. The OpenRouter SDK supports a Promise-returning // apiKey getter; the OpenAI SDK's constructor here is a no-op for our // purposes, so any string suffices. - const apiKey = - typeof config.apiKey === 'string' ? config.apiKey : 'unused' + const apiKey = typeof config.apiKey === 'string' ? config.apiKey : 'unused' super( { apiKey, baseURL: 'https://openrouter.ai/api/v1' }, model, @@ -259,7 +258,9 @@ export class OpenRouterTextAdapter< const modelOptions = options.modelOptions as | (Record & { variant?: string }) | undefined - const variantSuffix = modelOptions?.variant ? `:${modelOptions.variant}` : '' + const variantSuffix = modelOptions?.variant + ? `:${modelOptions.variant}` + : '' const messages: Array = [] if (options.systemPrompts?.length) { @@ -412,9 +413,12 @@ async function* adaptOpenRouterStreamChunks( // Surface upstream errors so the base can route them to RUN_ERROR. if ((chunk as any).error) { - throw Object.assign(new Error((chunk as any).error.message || 'OpenRouter stream error'), { - code: (chunk as any).error.code, - }) + throw Object.assign( + new Error((chunk as any).error.message || 'OpenRouter stream error'), + { + code: (chunk as any).error.code, + }, + ) } yield adapted as ChatCompletionChunk diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 3da9bfd28..02a93c64b 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -256,9 +256,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< protected async callChatCompletionStream( params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, requestOptions: ReturnType, - ): Promise< - AsyncIterable - > { + ): Promise> { return this.client.chat.completions.create(params, requestOptions) } From b320df54ce8856bc67c81ec5d34bfad8c1ec9437 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 12 May 2026 09:21:40 +1000 Subject: [PATCH 03/49] fix(openai-base, ai-openrouter, ai): silent failures in chat-completions migration Addresses regressions and pre-existing silent failures surfaced by reviewing #545: - `@tanstack/ai`: `toRunErrorPayload` normalizes `AbortError` / `APIUserAbortError` / `RequestAbortedError` to `{ code: 'aborted' }` so consumers can discriminate user-initiated cancellation without matching provider-specific message strings. - `@tanstack/openai-base`: `structuredOutput` throws a distinct "response contained no content" error instead of cascading into a misleading JSON-parse error on an empty string; the post-loop tool-args drain now logs malformed JSON via `logger.errors` so truncated streams don't silently invoke tools with `{}`. - `@tanstack/ai-openrouter`: `stream_options.include_usage` is camelCased to `includeUsage` (Zod was silently stripping it, leaving `RUN_FINISHED.usage` always undefined on streaming); mid-stream `chunk.error.code` is stringified so provider codes (401/429/500) survive `toRunErrorPayload`; assistant `toolCalls[].function.arguments` is stringified to match the SDK's `string` contract; `convertMessage` now mirrors the base's fail-loud guards (empty user content, unsupported content parts). Co-Authored-By: Claude Opus 4.7 (1M context) --- .../migrate-groq-openrouter-to-openai-base.md | 7 +- .../ai-openrouter/src/adapters/text.ts | 72 +++++- .../tests/openrouter-adapter.test.ts | 229 +++++++++++++++++- .../ai/src/activities/error-payload.ts | 18 ++ .../typescript/ai/tests/error-payload.test.ts | 45 ++++ .../src/adapters/chat-completions-text.ts | 31 ++- .../tests/chat-completions-text.test.ts | 109 +++++++++ 7 files changed, 495 insertions(+), 16 deletions(-) diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md index 0cbd390e4..dd48aba33 100644 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -2,14 +2,19 @@ '@tanstack/openai-base': minor '@tanstack/ai-groq': patch '@tanstack/ai-openrouter': patch +'@tanstack/ai': patch --- Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. `@tanstack/openai-base` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. +`@tanstack/openai-base` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. + +`@tanstack/ai` normalizes abort-shaped errors (`AbortError`, `APIUserAbortError`, `RequestAbortedError`) to a stable `{ message: 'Request aborted', code: 'aborted' }` payload in `toRunErrorPayload`, so consumers can discriminate user-initiated cancellation from other failures without matching on provider-specific message strings. + `@tanstack/ai-groq` drops the `groq-sdk` dependency in favour of the OpenAI SDK pointed at `https://api.groq.com/openai/v1` (the same pattern as `ai-grok` against xAI). The Groq-specific quirk where streaming usage arrives under `chunk.x_groq.usage` is preserved via a small `processStreamChunks` wrapper that promotes it to the standard `chunk.usage` slot. -`@tanstack/ai-openrouter` keeps `@openrouter/sdk` (the source of truth for OpenRouter's typed provider routing, plugins, and metadata) but routes the SDK call through the base via overridden hooks. A small request shape converter (`max_tokens` → `maxCompletionTokens`, etc.) and chunk shape adapter (camelCase → snake_case for the base's reader) bridge the SDKs. No public API changes; provider routing, app attribution headers (`httpReferer`, `appTitle`), reasoning variants (`:thinking`), and `RequestAbortedError` handling are preserved. +`@tanstack/ai-openrouter` keeps `@openrouter/sdk` (the source of truth for OpenRouter's typed provider routing, plugins, and metadata) but routes the SDK call through the base via overridden hooks. A small request shape converter (`max_tokens` → `maxCompletionTokens`, etc.) and chunk shape adapter (camelCase → snake_case for the base's reader) bridge the SDKs. No public API changes; provider routing, app attribution headers (`httpReferer`, `appTitle`), reasoning variants (`:thinking`), and `RequestAbortedError` handling are preserved. Fixes: `stream_options.include_usage` is now correctly camelCased to `includeUsage` so streaming `RUN_FINISHED.usage` is populated (previously silently dropped by the SDK Zod schema); mid-stream `chunk.error.code` is stringified so provider error codes (401, 429, 500, …) survive the `toRunErrorPayload` narrow; assistant `toolCalls[].function.arguments` is stringified to match the SDK's `string` contract; and `convertMessage` now mirrors the base's fail-loud guards (throws on empty user content and unsupported content parts) instead of silently sending empty paid requests. `ai-ollama` remains on `BaseTextAdapter` — its native API uses a different wire format from Chat Completions (different chunk shape, request shape, tool-call streaming, and reasoning surface) and doesn't fit the OpenAI base without rebuilding most of the processing it would otherwise inherit. Migrating it remains a separate effort. diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index bb34aeb00..db7677cab 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -181,6 +181,21 @@ export class OpenRouterTextAdapter< } if (message.role === 'assistant') { + // Stringify object-shaped tool-call arguments to match the SDK's + // `ChatToolCall.function.arguments: string` contract. Without this an + // assistant message that carries already-parsed args (common after a + // multi-turn run) would either serialise as `[object Object]` or be + // rejected by the SDK's Zod schema with an opaque validation error. + const toolCalls = message.toolCalls?.map((tc) => ({ + ...tc, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) return { role: 'assistant', content: @@ -189,27 +204,51 @@ export class OpenRouterTextAdapter< : message.content ? JSON.stringify(message.content) : undefined, - toolCalls: message.toolCalls, + toolCalls, } satisfies ChatMessages } - // user + // user — mirror the base's fail-loud behaviour on empty and unsupported + // content. Silently sending an empty string would mask a real caller bug + // and produce a paid request with no input. const contentParts = this.normalizeContent(message.content) if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + const text = contentParts[0].content + if (text.length === 0) { + throw new Error( + `User message for ${this.name} has empty text content. ` + + `Empty user messages would produce a paid request with no input; ` + + `provide non-empty content or omit the message.`, + ) + } return { role: 'user', - content: contentParts[0].content, + content: text, } satisfies ChatMessages } const parts: Array = [] for (const part of contentParts) { const converted = this.convertContentPartToOpenRouter(part) - if (converted) parts.push(converted) + if (!converted) { + throw new Error( + `Unsupported content part type for ${this.name}: ${part.type}. ` + + `Override convertContentPartToOpenRouter to handle this type, ` + + `or remove it from the message.`, + ) + } + parts.push(converted) + } + if (parts.length === 0) { + throw new Error( + `User message for ${this.name} has no content parts. ` + + `Empty user messages would produce a paid request with no input; ` + + `provide at least one text/image/audio part or omit the message.`, + ) } return { role: 'user', - content: parts.length ? parts : [{ type: 'text', text: '' }], + content: parts, } satisfies ChatMessages } @@ -326,7 +365,20 @@ function toOpenRouterRequest( delete out.top_p } if ('stream_options' in p) { - out.streamOptions = p.stream_options + const so = p.stream_options as Record | undefined + if (so && typeof so === 'object') { + // The SDK's ChatStreamOptions schema uses camelCase keys and Zod + // strips unknowns at parse time — without this rename the base's + // include_usage flag would be silently dropped and RUN_FINISHED.usage + // would always be undefined for streaming OpenRouter calls. + const { include_usage, ...rest } = so + out.streamOptions = { + ...rest, + ...(include_usage !== undefined && { includeUsage: include_usage }), + } + } else { + out.streamOptions = so + } delete out.stream_options } if ('response_format' in p && p.response_format) { @@ -412,11 +464,15 @@ async function* adaptOpenRouterStreamChunks( } // Surface upstream errors so the base can route them to RUN_ERROR. + // Stringify code: OpenRouter's chunk error.code is numeric (401, 429, + // 500, …) but `toRunErrorPayload` drops non-string codes, which would + // silently lose provider error codes from the RUN_ERROR payload. if ((chunk as any).error) { + const errObj = (chunk as any).error throw Object.assign( - new Error((chunk as any).error.message || 'OpenRouter stream error'), + new Error(errObj.message || 'OpenRouter stream error'), { - code: (chunk as any).error.code, + code: errObj.code != null ? String(errObj.code) : undefined, }, ) } diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index 9ca4978f2..d4dc528de 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -10,12 +10,19 @@ import type { StreamChunk, Tool } from '@tanstack/ai' const testLogger = resolveDebugOption(false) // Declare mockSend at module level let mockSend: any +// Captures the most recent OpenRouter SDK constructor config so tests can +// assert that app-attribution headers (httpReferer, appTitle, etc.) actually +// reach the SDK rather than being silently dropped by the adapter. +let lastOpenRouterConfig: any // Mock the SDK with a class defined inline // eslint-disable-next-line @typescript-eslint/require-await vi.mock('@openrouter/sdk', async () => { return { OpenRouter: class { + constructor(config?: unknown) { + lastOpenRouterConfig = config + } chat = { send: (...args: Array) => mockSend(...args), } @@ -789,6 +796,10 @@ describe('OpenRouter AG-UI event emission', () => { expect(runErrorChunk).toBeDefined() if (runErrorChunk?.type === 'RUN_ERROR') { expect(runErrorChunk.error?.message).toBe('Rate limit exceeded') + // Provider error codes arrive as numbers (429, 500, etc.) but + // toRunErrorPayload only retains string codes — the chunk adapter + // must stringify before throwing. + expect(runErrorChunk.error?.code).toBe('429') } }) @@ -1166,7 +1177,7 @@ describe('OpenRouter structured output', () => { ).rejects.toThrow('Server error') }) - it('handles empty content gracefully', async () => { + it('throws a clear "no content" error when the response is empty', async () => { const nonStreamResponse = { choices: [ { @@ -1180,8 +1191,9 @@ describe('OpenRouter structured output', () => { setupMockSdkClient([], nonStreamResponse) const adapter = createAdapter() - // The shared base surfaces the JSON parse failure rather than a separate - // "no content" error — empty content fails JSON.parse() in the base. + // Empty content must surface as a distinct error so the actual failure + // mode (the model returned no content) is visible in logs rather than + // being masked by a misleading JSON-parse error on an empty string. await expect( adapter.structuredOutput({ chatOptions: { @@ -1191,7 +1203,7 @@ describe('OpenRouter structured output', () => { }, outputSchema: { type: 'object' }, }), - ).rejects.toThrow('Failed to parse structured output as JSON') + ).rejects.toThrow('response contained no content') }) }) @@ -1669,3 +1681,212 @@ describe('OpenRouter STEP event consistency', () => { expect(stepFinished).toHaveLength(1) }) }) + +describe('OpenRouter SDK constructor wiring', () => { + beforeEach(() => { + vi.clearAllMocks() + lastOpenRouterConfig = undefined + }) + + it('forwards app-attribution headers (httpReferer, appTitle) to the SDK constructor', () => { + void createOpenRouterText('openai/gpt-4o-mini', 'test-key', { + httpReferer: 'https://app.example.com', + appTitle: 'TestApp', + } as any) + expect(lastOpenRouterConfig).toBeDefined() + expect(lastOpenRouterConfig.apiKey).toBe('test-key') + expect(lastOpenRouterConfig.httpReferer).toBe('https://app.example.com') + expect(lastOpenRouterConfig.appTitle).toBe('TestApp') + }) + + it('forwards serverURL overrides to the SDK constructor', () => { + void createOpenRouterText('openai/gpt-4o-mini', 'test-key', { + serverURL: 'https://custom.example.com/api/v1', + } as any) + expect(lastOpenRouterConfig.serverURL).toBe( + 'https://custom.example.com/api/v1', + ) + }) +}) + +describe('OpenRouter stream_options conversion', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('converts include_usage to includeUsage so the SDK preserves it', async () => { + const streamChunks = [ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'hi' }, finishReason: 'stop' }], + usage: { promptTokens: 1, completionTokens: 1, totalTokens: 2 }, + }, + ] + setupMockSdkClient(streamChunks) + const adapter = createAdapter() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const params = rawParams.chatRequest + // The SDK's outbound Zod schema strips unknown keys. Without the + // include_usage → includeUsage rename, the camelCase key would survive + // here but the wire-format serialisation would drop it entirely. + expect(params.streamOptions).toBeDefined() + expect(params.streamOptions.includeUsage).toBe(true) + expect(params.streamOptions).not.toHaveProperty('include_usage') + + const serialized = ChatRequest$outboundSchema.parse(params) + expect((serialized as any).stream_options).toEqual({ include_usage: true }) + }) + + it('propagates the abort signal to the SDK call', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'hi' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + const controller = new AbortController() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + request: { signal: controller.signal } as any, + })) { + // consume + } + + // The second argument to the SDK call must carry the signal so + // user-initiated aborts actually reach the SDK rather than letting the + // request continue burning tokens silently. + const [, options] = mockSend.mock.calls[0]! + expect(options).toEqual({ signal: controller.signal }) + }) + + it('maps RequestAbortedError from the SDK to RUN_ERROR with code: aborted', async () => { + const abortErr = Object.assign(new Error('Request aborted by client'), { + name: 'RequestAbortedError', + }) + mockSend = vi.fn().mockRejectedValueOnce(abortErr) + const adapter = createAdapter() + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(chunk) + } + + const runErr = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErr).toBeDefined() + if (runErr?.type === 'RUN_ERROR') { + expect(runErr.error?.code).toBe('aborted') + expect(runErr.error?.message).toBe('Request aborted') + } + }) +}) + +describe('OpenRouter convertMessage fail-loud guards', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('throws when a user message has empty text content', async () => { + setupMockSdkClient([]) + const adapter = createAdapter() + + // mapOptionsToRequest runs before chatStream's try block, so the + // fail-loud guard surfaces as a synchronous iterator throw — verifies + // we never made a paid request with an empty user message. + await expect(async () => { + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: '' }], + logger: testLogger, + })) { + // consume + } + }).rejects.toThrow(/empty text content/i) + expect(mockSend).not.toHaveBeenCalled() + }) + + it('throws on unsupported content-part types instead of dropping them', async () => { + setupMockSdkClient([]) + const adapter = createAdapter() + + await expect(async () => { + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { + role: 'user', + content: [{ type: 'mystery-type' as any, content: 'x' } as any], + }, + ], + logger: testLogger, + })) { + // consume + } + }).rejects.toThrow(/unsupported content part/i) + expect(mockSend).not.toHaveBeenCalled() + }) + + it('stringifies object-shaped assistant toolCalls.function.arguments', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'ok' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { role: 'user', content: 'hi' }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { + name: 'lookup_weather', + // Object args from a prior parsed turn — SDK expects string. + arguments: { location: 'Berlin' } as any, + }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_1', content: '{"temp":72}' }, + ], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const assistantMsg = rawParams.chatRequest.messages.find( + (m: any) => m.role === 'assistant', + ) + expect(assistantMsg).toBeDefined() + const args = assistantMsg.toolCalls[0].function.arguments + expect(typeof args).toBe('string') + expect(JSON.parse(args)).toEqual({ location: 'Berlin' }) + }) +}) diff --git a/packages/typescript/ai/src/activities/error-payload.ts b/packages/typescript/ai/src/activities/error-payload.ts index 396c5573d..33a6b8157 100644 --- a/packages/typescript/ai/src/activities/error-payload.ts +++ b/packages/typescript/ai/src/activities/error-payload.ts @@ -5,11 +5,29 @@ * Accepts Error instances, objects with string-ish `message`/`code`, or bare * strings; always returns a shape safe to serialize. Never leaks the full * error object (which may carry request/response state from an SDK). + * + * Abort-shaped errors (DOM `AbortError`, OpenAI `APIUserAbortError`, + * OpenRouter `RequestAbortedError`) are normalized to a stable + * `{ message: 'Request aborted', code: 'aborted' }` shape so callers can + * discriminate user-initiated cancellation from other failures without + * matching on provider-specific message strings. */ +const ABORT_ERROR_NAMES = new Set([ + 'AbortError', + 'APIUserAbortError', + 'RequestAbortedError', +]) + export function toRunErrorPayload( error: unknown, fallbackMessage = 'Unknown error occurred', ): { message: string; code: string | undefined } { + if (error && typeof error === 'object') { + const name = (error as { name?: unknown }).name + if (typeof name === 'string' && ABORT_ERROR_NAMES.has(name)) { + return { message: 'Request aborted', code: 'aborted' } + } + } if (error instanceof Error) { const codeField = (error as Error & { code?: unknown }).code return { diff --git a/packages/typescript/ai/tests/error-payload.test.ts b/packages/typescript/ai/tests/error-payload.test.ts index 1add82fc7..784a4901c 100644 --- a/packages/typescript/ai/tests/error-payload.test.ts +++ b/packages/typescript/ai/tests/error-payload.test.ts @@ -73,4 +73,49 @@ describe('toRunErrorPayload', () => { expect(payload).toEqual({ message: 'leaky', code: undefined }) expect(payload).not.toHaveProperty('request') }) + + describe('abort normalization', () => { + it('normalizes DOM AbortError to code: aborted', () => { + const err = new Error('The operation was aborted') + err.name = 'AbortError' + expect(toRunErrorPayload(err)).toEqual({ + message: 'Request aborted', + code: 'aborted', + }) + }) + + it('normalizes OpenAI APIUserAbortError', () => { + const err = new Error('Request was aborted.') + err.name = 'APIUserAbortError' + expect(toRunErrorPayload(err)).toEqual({ + message: 'Request aborted', + code: 'aborted', + }) + }) + + it('normalizes OpenRouter RequestAbortedError', () => { + const err = new Error('Request aborted by client: AbortError: ...') + err.name = 'RequestAbortedError' + expect(toRunErrorPayload(err)).toEqual({ + message: 'Request aborted', + code: 'aborted', + }) + }) + + it('normalizes abort-named plain objects (non-Error throws)', () => { + const obj = { name: 'AbortError', message: 'whatever' } + expect(toRunErrorPayload(obj)).toEqual({ + message: 'Request aborted', + code: 'aborted', + }) + }) + + it('does not normalize errors with similar-looking names', () => { + const err = Object.assign(new Error('hi'), { name: 'NotAbortError' }) + expect(toRunErrorPayload(err)).toEqual({ + message: 'hi', + code: undefined, + }) + }) + }) }) diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 02a93c64b..b15b2d8df 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -181,8 +181,16 @@ export class OpenAICompatibleChatCompletionsTextAdapter< extractRequestOptions(chatOptions.request), ) - // Extract text content from the response - const rawText = response.choices[0]?.message.content || '' + // Extract text content from the response. Fail loud on empty content + // rather than letting it cascade into a JSON-parse error on '' — the + // root cause (the model returned no content for the structured request) + // is then visible in logs. + const rawText = response.choices[0]?.message.content + if (typeof rawText !== 'string' || rawText.length === 0) { + throw new Error( + `${this.name}.structuredOutput: response contained no content`, + ) + } // Parse the JSON response let parsed: unknown @@ -640,7 +648,24 @@ export class OpenAICompatibleChatCompletionsTextAdapter< try { const parsed: unknown = JSON.parse(toolCall.arguments) parsedInput = parsed && typeof parsed === 'object' ? parsed : {} - } catch { + } catch (parseError) { + // Mirror the finish_reason path's logger call — a truncated + // stream emitting malformed tool-call JSON would otherwise + // silently invoke the tool with `{}`, the exact failure the + // finish_reason logger was added to prevent. + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (drain)`, + { + error: toRunErrorPayload( + parseError, + `tool ${toolCall.name} (${toolCall.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: toolCall.id, + toolName: toolCall.name, + rawArguments: toolCall.arguments, + }, + ) parsedInput = {} } } diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts index a4bca2114..7eac9c634 100644 --- a/packages/typescript/openai-base/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -738,6 +738,115 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { }), ).rejects.toThrow('Failed to parse structured output as JSON') }) + + it('throws a clear "no content" error when content is empty', async () => { + const nonStreamResponse = { + choices: [{ message: { content: '' } }], + } + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + // Empty content must surface as a distinct error rather than masquerade + // as a JSON-parse failure on an empty string. + await expect( + adapter.structuredOutput({ + chatOptions: { + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'Give me data' }], + }, + outputSchema: { type: 'object' }, + }), + ).rejects.toThrow('response contained no content') + }) + + it('throws a clear "no content" error when content is missing', async () => { + const nonStreamResponse = { + choices: [{ message: {} }], + } + setupMockSdkClient([], nonStreamResponse) + + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + await expect( + adapter.structuredOutput({ + chatOptions: { + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'Give me data' }], + }, + outputSchema: { type: 'object' }, + }), + ).rejects.toThrow('response contained no content') + }) + }) + + describe('drain-path tool args error handling', () => { + it('logs malformed JSON tool args via the logger when the stream ends without finish_reason', async () => { + // Simulates a truncated stream: tool call starts and accumulates + // malformed JSON, but no finish_reason chunk ever arrives. The drain + // block must still surface the parse failure rather than swallowing it. + const streamChunks = [ + { + id: 'chatcmpl-drain', + model: 'test-model', + choices: [ + { + delta: { + tool_calls: [ + { + index: 0, + id: 'call_drain', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', // truncated — invalid JSON + }, + }, + ], + }, + finish_reason: null, + }, + ], + }, + ] + + setupMockSdkClient(streamChunks) + const errorsSpy = vi.spyOn(testLogger, 'errors') + const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + testConfig, + 'test-model', + ) + + try { + for await (const _ of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'Weather?' }], + tools: [weatherTool], + })) { + // consume + } + + const drainCall = errorsSpy.mock.calls.find((c) => + String(c[0]).includes('(drain)'), + ) + expect(drainCall).toBeDefined() + const ctx = drainCall![1] as Record + expect(ctx.toolCallId).toBe('call_drain') + expect(ctx.toolName).toBe('lookup_weather') + expect(ctx.rawArguments).toBe('{"location":') + } finally { + errorsSpy.mockRestore() + } + }) }) describe('subclassing', () => { From d9a74c47533226f464f2ab970ee5b9061990ebf4 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 12 May 2026 12:40:52 +1000 Subject: [PATCH 04/49] feat(ai-openrouter, openai-base): OpenRouter Responses (beta) adapter Adds OpenRouterResponsesTextAdapter on top of @tanstack/openai-base's responses-text base, mirroring the chat-completions migration in #543. - openai-base: protected `callResponse` / `callResponseStream` hooks on OpenAICompatibleResponsesTextAdapter parallel to the existing `callChatCompletion*` hooks, so providers whose SDK has a different call shape can override without forking processStreamChunks. Re-exports the OpenAI Responses SDK types subclasses need. - ai-openrouter: new OpenRouterResponsesTextAdapter routing through `client.beta.responses.send({ responsesRequest })`. Emits the SDK's camelCase TS shape directly via overrides of convertMessagesToInput / convertContentPartToInput / mapOptionsToRequest, annotated with `Pick` so future SDK field renames break the build instead of silently producing Zod-stripped wire payloads. Bridges inbound stream events camel -> snake so the base's processStreamChunks reads documented fields unchanged. - Function tools only in v1; webSearchTool() throws with a clear error pointing at the chat-completions adapter. - Folds in the silent-failure lessons from 0171b18e (stringified error codes, stringified tool-call arguments, fail-loud on empty user content). - E2E: new `openrouter-responses` provider slot in feature-support / test-matrix / providers / types / api.summarize, reusing aimock's native `/v1/responses` handler. - 10 new unit tests covering request mapping (snake -> camel for top-level fields, function-call camelCasing in input[], variant suffix), stream-event bridge (text deltas, function-call lifecycle, response.failed, top-level error code stringification), webSearchTool() rejection, and SDK constructor wiring. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../src/adapters/responses-text.ts | 596 ++++++++++++++++++ .../typescript/ai-openrouter/src/index.ts | 9 + .../src/text/responses-provider-options.ts | 55 ++ .../openrouter-responses-adapter.test.ts | 451 +++++++++++++ .../src/adapters/responses-text.ts | 37 +- packages/typescript/openai-base/src/index.ts | 12 + testing/e2e/src/lib/feature-support.ts | 13 + testing/e2e/src/lib/providers.ts | 26 +- testing/e2e/src/lib/types.ts | 2 + testing/e2e/src/routes/api.summarize.ts | 2 + testing/e2e/tests/test-matrix.ts | 1 + 11 files changed, 1201 insertions(+), 3 deletions(-) create mode 100644 packages/typescript/ai-openrouter/src/adapters/responses-text.ts create mode 100644 packages/typescript/ai-openrouter/src/text/responses-provider-options.ts create mode 100644 packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts new file mode 100644 index 000000000..900a909fa --- /dev/null +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -0,0 +1,596 @@ +import { OpenRouter } from '@openrouter/sdk' +import { + OpenAICompatibleResponsesTextAdapter, + convertFunctionToolToResponsesFormat, +} from '@tanstack/openai-base' +import { isWebSearchTool } from '../tools/web-search-tool' +import { getOpenRouterApiKeyFromEnv } from '../utils' +import type { SDKOptions } from '@openrouter/sdk' +import type { + InputsUnion, + ResponsesRequest, + StreamEvents, +} from '@openrouter/sdk/models' +import type { + ResponseCreateParams, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseInputContent, + ResponseStreamEvent, + ResponsesFunctionTool, + ResponsesResponse, +} from '@tanstack/openai-base' +import type { ContentPart, ModelMessage, TextOptions, Tool } from '@tanstack/ai' +import type { ExternalResponsesProviderOptions } from '../text/responses-provider-options' +import type { + OPENROUTER_CHAT_MODELS, + OpenRouterChatModelToolCapabilitiesByName, + OpenRouterModelInputModalitiesByName, +} from '../model-meta' +import type { OpenRouterMessageMetadataByModality } from '../message-types' + +/** Element type of `ResponsesRequest.input` when it's the array form (the + * SDK union also allows a bare string). Pinning to the array element lets + * the convertMessagesToInput override narrow to the per-item discriminated + * union so a TS rename surfaces here. */ +type InputsItem = Extract>[number] + +export interface OpenRouterResponsesConfig extends SDKOptions {} +export type OpenRouterResponsesTextModels = + (typeof OPENROUTER_CHAT_MODELS)[number] +export type OpenRouterResponsesTextProviderOptions = + ExternalResponsesProviderOptions + +type ResolveInputModalities = + TModel extends keyof OpenRouterModelInputModalitiesByName + ? OpenRouterModelInputModalitiesByName[TModel] + : readonly ['text', 'image'] + +type ResolveToolCapabilities = + TModel extends keyof OpenRouterChatModelToolCapabilitiesByName + ? NonNullable + : readonly [] + +/** + * OpenRouter Responses (beta) Adapter. + * + * Extends the OpenAI Responses base so the streaming event lifecycle, + * structured-output flow, tool-call accumulator, and RUN_ERROR taxonomy are + * shared with the rest of the OpenAI-Responses-compatible providers (OpenAI, + * Azure, …). + * + * The wire format is OpenAI-Responses-compatible, but the `@openrouter/sdk` + * SDK exposes a different call shape — `client.beta.responses.send + * ({ responsesRequest })` with camelCase fields. We override the two + * SDK-call hooks (`callResponse` / `callResponseStream`) to bridge that, + * plus chunk and result shape adapters on the way back. + * + * Behaviour preserved from the chat-completions migration: + * - Provider routing surface (`provider`, `models`, `plugins`, + * `variant`) passes through `modelOptions`. + * - App attribution headers (`httpReferer`, `appTitle`) and base URL + * overrides flow through the SDK `SDKOptions` constructor. + * - Model variant suffixing (e.g. `:thinking`, `:free`) via + * `modelOptions.variant`. + * + * v1 routes function tools only. Passing a `webSearchTool()` brand throws + * — OpenRouter's Responses API exposes richer server-tool variants + * (WebSearchServerToolOpenRouter / Preview20250311WebSearchServerTool / + * …) that will land in a follow-up. + */ +export class OpenRouterResponsesTextAdapter< + TModel extends OpenRouterResponsesTextModels, + TToolCapabilities extends ReadonlyArray = + ResolveToolCapabilities, +> extends OpenAICompatibleResponsesTextAdapter< + TModel, + OpenRouterResponsesTextProviderOptions, + ResolveInputModalities, + OpenRouterMessageMetadataByModality, + TToolCapabilities +> { + readonly kind = 'text' as const + readonly name = 'openrouter-responses' as const + + /** OpenRouter SDK client. The base's `this.client` (an OpenAI client) is + * unused because we override the SDK-call hooks below. */ + protected orClient: OpenRouter + + constructor(config: OpenRouterResponsesConfig, model: TModel) { + // The base needs an OpenAICompatibleClientConfig to construct an OpenAI + // client we never use. The OpenRouter SDK supports a Promise-returning + // apiKey getter; the OpenAI SDK's constructor here is a no-op for our + // purposes, so any string suffices. + const apiKey = typeof config.apiKey === 'string' ? config.apiKey : 'unused' + super( + { apiKey, baseURL: 'https://openrouter.ai/api/v1' }, + model, + 'openrouter-responses', + ) + this.orClient = new OpenRouter(config) + } + + // ──────────────────────────────────────────────────────────────────────── + // SDK call hooks — the params we get here were built by our overridden + // mapOptionsToRequest / convertMessagesToInput / convertContentPartToInput + // already in OpenRouter's camelCase TS shape, so only a type cast bridges + // the base's static snake_case signature. The inbound result/stream still + // needs camel → snake reshaping because the base's processStreamChunks / + // extractTextFromResponse read documented snake_case fields like + // `response.usage.input_tokens` and `chunk.item_id`. + // ──────────────────────────────────────────────────────────────────────── + + protected override async callResponseStream( + params: ResponseCreateParamsStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise> { + const responsesRequest = params as unknown as Omit< + ResponsesRequest, + 'stream' + > + // The SDK's EventStream is an AsyncIterable; treat it + // structurally so we don't need to depend on the SDK's class export. + const stream = (await this.orClient.beta.responses.send( + { responsesRequest: { ...responsesRequest, stream: true } }, + { signal: requestOptions.signal ?? undefined }, + )) as unknown as AsyncIterable + return adaptOpenRouterResponsesStreamEvents(stream) + } + + protected override async callResponse( + params: ResponseCreateParamsNonStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise { + const responsesRequest = params as unknown as Omit< + ResponsesRequest, + 'stream' + > + const result = await this.orClient.beta.responses.send( + { responsesRequest: { ...responsesRequest, stream: false } }, + { signal: requestOptions.signal ?? undefined }, + ) + return adaptOpenRouterResponsesResult(result) + } + + // ──────────────────────────────────────────────────────────────────────── + // Request construction — emit OpenRouter's camelCase TS shape directly so + // a `Pick` annotation catches any field-name drift at + // compile time. Returned via `unknown as Omit` + // because the base's signature is the OpenAI snake_case type; the SDK call + // hooks above just pass the value through. + // ──────────────────────────────────────────────────────────────────────── + + protected override mapOptionsToRequest( + options: TextOptions, + ): Omit { + // Fail loud on webSearchTool() — v1 only routes function tools. + if (options.tools) { + for (const tool of options.tools) { + if (isWebSearchTool(tool as Tool)) { + throw new Error( + `OpenRouterResponsesTextAdapter does not yet support webSearchTool(). ` + + `Use the chat-completions adapter (openRouterText) for web search ` + + `tools, or pass function tools only to this adapter.`, + ) + } + } + } + + // Apply the same modelOptions/variant precedence as the chat adapter. + const modelOptions = options.modelOptions as + | (Partial & { variant?: string }) + | undefined + const variantSuffix = modelOptions?.variant + ? `:${modelOptions.variant}` + : '' + + // The override below returns Array — re-cast through the + // base's documented shape so this local has the type a Pick<…> expects. + const input = this.convertMessagesToInput(options.messages) as unknown as + | ResponsesRequest['input'] + | undefined + + // Reuse the openai-base function-tool converter. ResponsesFunctionTool + // already matches OpenRouter's ResponsesRequestToolFunction shape: + // `{ type:'function', name, parameters, description, strict }`. + const tools: Array | undefined = options.tools + ? options.tools.map((tool) => + convertFunctionToolToResponsesFormat( + tool, + this.makeStructuredOutputCompatible.bind(this), + ), + ) + : undefined + + // `Pick` is the static gate — if the SDK renames any + // of these keys in a future version this annotation breaks the build + // instead of silently producing a request the wire schema drops. + const built: Pick< + ResponsesRequest, + | 'model' + | 'input' + | 'instructions' + | 'metadata' + | 'temperature' + | 'topP' + | 'maxOutputTokens' + | 'tools' + | 'toolChoice' + | 'parallelToolCalls' + > = { + ...modelOptions, + model: options.model + variantSuffix, + ...(options.temperature !== undefined && { + temperature: options.temperature, + }), + ...(options.maxTokens !== undefined && { + maxOutputTokens: options.maxTokens, + }), + ...(options.topP !== undefined && { topP: options.topP }), + ...(options.metadata !== undefined && { metadata: options.metadata }), + ...(options.systemPrompts && + options.systemPrompts.length > 0 && { + instructions: options.systemPrompts.join('\n'), + }), + input, + ...(tools && + tools.length > 0 && { + tools: tools as unknown as ResponsesRequest['tools'], + }), + } + + return built as unknown as Omit + } + + // ──────────────────────────────────────────────────────────────────────── + // Message + content converters — emit OpenRouter's camelCase TS shape + // (`callId`, `imageUrl`, `inputAudio`, `videoUrl`, `fileData`, `fileUrl`) + // directly. The return-type cast through `unknown` bridges to the base's + // signature without giving up the OpenRouter-shape return inside. + // ──────────────────────────────────────────────────────────────────────── + + protected override convertMessagesToInput( + messages: Array, + ): ReturnType< + OpenAICompatibleResponsesTextAdapter['convertMessagesToInput'] + > { + const result: Array = [] + + for (const message of messages) { + if (message.role === 'tool') { + result.push({ + type: 'function_call_output', + callId: message.toolCallId || '', + output: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } as unknown as InputsItem) + continue + } + + if (message.role === 'assistant') { + if (message.toolCalls && message.toolCalls.length > 0) { + for (const toolCall of message.toolCalls) { + // Stringify object-shaped args to match the SDK's `arguments: + // string` contract — mirrors the chat adapter's fix (see + // commit 0171b18e). + const argumentsString = + typeof toolCall.function.arguments === 'string' + ? toolCall.function.arguments + : JSON.stringify(toolCall.function.arguments) + result.push({ + type: 'function_call', + callId: toolCall.id, + id: toolCall.id, + name: toolCall.function.name, + arguments: argumentsString, + } as unknown as InputsItem) + } + } + + if (message.content) { + const contentStr = this.extractTextContent(message.content) + if (contentStr) { + result.push({ + type: 'message', + role: 'assistant', + content: contentStr, + } as unknown as InputsItem) + } + } + continue + } + + // user — fail loud on empty / unsupported content (mirrors the base). + const contentParts = this.normalizeContent(message.content) + const inputContent: Array = [] + for (const part of contentParts) { + inputContent.push(this.convertContentPartToInput(part)) + } + if (inputContent.length === 0) { + throw new Error( + `User message for ${this.name} has no content parts. ` + + `Empty user messages would produce a paid request with no input; ` + + `provide at least one text/image/audio part or omit the message.`, + ) + } + result.push({ + type: 'message', + role: 'user', + content: inputContent, + } as unknown as InputsItem) + } + + return result as unknown as ReturnType< + OpenAICompatibleResponsesTextAdapter['convertMessagesToInput'] + > + } + + protected override convertContentPartToInput( + part: ContentPart, + ): ResponseInputContent { + switch (part.type) { + case 'text': + return { + type: 'input_text', + text: part.content, + } as ResponseInputContent + case 'image': { + const meta = part.metadata as + | { detail?: 'auto' | 'low' | 'high' } + | undefined + const value = part.source.value + const imageUrl = + part.source.type === 'data' && !value.startsWith('data:') + ? `data:${part.source.mimeType || 'application/octet-stream'};base64,${value}` + : value + return { + type: 'input_image', + imageUrl, + detail: meta?.detail || 'auto', + } as unknown as ResponseInputContent + } + case 'audio': { + if (part.source.type === 'url') { + // OpenRouter's `input_audio` carries `{ data, format }` not a URL — + // fall back to `input_file` for URLs so we don't silently drop the + // audio reference. + return { + type: 'input_file', + fileUrl: part.source.value, + } as unknown as ResponseInputContent + } + return { + type: 'input_audio', + inputAudio: { data: part.source.value, format: 'mp3' }, + } as unknown as ResponseInputContent + } + case 'video': + return { + type: 'input_video', + videoUrl: part.source.value, + } as unknown as ResponseInputContent + case 'document': { + if (part.source.type === 'url') { + return { + type: 'input_file', + fileUrl: part.source.value, + } as unknown as ResponseInputContent + } + const mime = part.source.mimeType || 'application/octet-stream' + const data = part.source.value.startsWith('data:') + ? part.source.value + : `data:${mime};base64,${part.source.value}` + return { + type: 'input_file', + fileData: data, + } as unknown as ResponseInputContent + } + default: + throw new Error( + `Unsupported content part type for ${this.name}: ${(part as { type: string }).type}`, + ) + } + } +} + +// ────────────────────────────────────────────────────────────────────────── +// Inbound stream-event bridge: OpenRouter SDK camelCase → OpenAI snake_case +// so the base's `processStreamChunks` reads documented fields unchanged. +// (Outbound conversion is no longer needed — the adapter overrides above +// emit OpenRouter camelCase directly.) +// ────────────────────────────────────────────────────────────────────────── + +/** + * Adapt OpenRouter's streaming events (camelCase, with extended event types) + * into the OpenAI Responses event shape the base's `processStreamChunks` + * reads. Reshapes the nested `response` payload for terminal events + * (`response.completed`, `response.failed`, `response.incomplete`, + * `response.created`) into snake_case so reads like + * `chunk.response.incomplete_details?.reason` and + * `chunk.response.usage.input_tokens` work unchanged. + */ +async function* adaptOpenRouterResponsesStreamEvents( + stream: AsyncIterable, +): AsyncIterable { + for await (const event of stream) { + const e = event as Record + switch (e.type) { + case 'response.created': + case 'response.in_progress': + case 'response.completed': + case 'response.failed': + case 'response.incomplete': { + yield { + type: e.type, + response: toSnakeResponseResult(e.response), + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + case 'response.output_text.delta': + case 'response.output_text.done': + case 'response.reasoning_text.delta': + case 'response.reasoning_text.done': + case 'response.reasoning_summary_text.delta': + case 'response.reasoning_summary_text.done': { + yield { + type: e.type, + item_id: e.itemId, + output_index: e.outputIndex, + content_index: e.contentIndex, + delta: e.delta, + text: e.text, + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + case 'response.content_part.added': + case 'response.content_part.done': { + yield { + type: e.type, + item_id: e.itemId, + output_index: e.outputIndex, + content_index: e.contentIndex, + part: toSnakeContentPart(e.part), + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + case 'response.output_item.added': + case 'response.output_item.done': { + yield { + type: e.type, + item: toSnakeOutputItem(e.item), + output_index: e.outputIndex, + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + case 'response.function_call_arguments.delta': + case 'response.function_call_arguments.done': { + yield { + type: e.type, + item_id: e.itemId, + output_index: e.outputIndex, + delta: e.delta, + arguments: e.arguments, + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + case 'error': { + // Stringify code so provider codes (401/429/500/…) survive + // `toRunErrorPayload`, mirroring the chat-completions fix in + // commit 0171b18e. + yield { + type: 'error', + message: e.message, + code: e.code != null ? String(e.code) : undefined, + param: e.param, + sequence_number: e.sequenceNumber, + } as unknown as ResponseStreamEvent + break + } + default: { + // Pass through unknown event types with sequenceNumber renamed so + // the base's debug logging still sees a usable `type`. Forwarding + // verbatim is safer than dropping silently — a new event type + // OpenRouter ships shouldn't be discarded by us. + const { sequenceNumber, ...rest } = e + yield { + ...rest, + ...(sequenceNumber !== undefined && { + sequence_number: sequenceNumber, + }), + } as unknown as ResponseStreamEvent + } + } + } +} + +/** Convert a non-streaming `OpenResponsesResult` so the base's + * `extractTextFromResponse` (which iterates `response.output[].content` for + * `type === 'output_text'`) reads it unchanged. */ +function adaptOpenRouterResponsesResult(result: unknown): ResponsesResponse { + return toSnakeResponseResult(result) as ResponsesResponse +} + +function toSnakeResponseResult(r: any): Record { + if (!r || typeof r !== 'object') return r + return { + ...r, + model: r.model, + incomplete_details: r.incompleteDetails ?? null, + ...(r.usage && { + usage: { + input_tokens: r.usage.inputTokens ?? 0, + output_tokens: r.usage.outputTokens ?? 0, + total_tokens: r.usage.totalTokens ?? 0, + ...(r.usage.inputTokensDetails && { + input_tokens_details: r.usage.inputTokensDetails, + }), + ...(r.usage.outputTokensDetails && { + output_tokens_details: r.usage.outputTokensDetails, + }), + }, + }), + output: Array.isArray(r.output) + ? r.output.map((it: any) => toSnakeOutputItem(it)) + : r.output, + ...(r.error && { + error: { message: r.error.message, code: r.error.code }, + }), + } +} + +function toSnakeOutputItem(item: any): any { + if (!item || typeof item !== 'object') return item + switch (item.type) { + case 'function_call': + return { + type: 'function_call', + id: item.id, + call_id: item.callId, + name: item.name, + arguments: item.arguments, + ...(item.status !== undefined && { status: item.status }), + } + case 'message': + return { + ...item, + // content parts already use { type:'output_text', text } — no rename + // needed; refusal has `refusal` either way. + } + default: + return item + } +} + +function toSnakeContentPart(part: any): any { + if (!part || typeof part !== 'object') return part + // Both output_text and refusal already share the same key names across + // SDKs (`text`, `refusal`, `type`). Pass through. + return part +} + +export function createOpenRouterResponsesText< + TModel extends OpenRouterResponsesTextModels, +>( + model: TModel, + apiKey: string, + config?: Omit, +): OpenRouterResponsesTextAdapter> { + return new OpenRouterResponsesTextAdapter({ apiKey, ...config }, model) +} + +export function openRouterResponsesText< + TModel extends OpenRouterResponsesTextModels, +>( + model: TModel, + config?: Omit, +): OpenRouterResponsesTextAdapter> { + const apiKey = getOpenRouterApiKeyFromEnv() + return createOpenRouterResponsesText(model, apiKey, config) +} diff --git a/packages/typescript/ai-openrouter/src/index.ts b/packages/typescript/ai-openrouter/src/index.ts index e17844743..0ff7e1432 100644 --- a/packages/typescript/ai-openrouter/src/index.ts +++ b/packages/typescript/ai-openrouter/src/index.ts @@ -11,6 +11,15 @@ export { type OpenRouterTextModelOptions, } from './adapters/text' +// Responses (beta) adapter - for the OpenRouter beta Responses API +export { + OpenRouterResponsesTextAdapter, + createOpenRouterResponsesText, + openRouterResponsesText, + type OpenRouterResponsesConfig, + type OpenRouterResponsesTextProviderOptions, +} from './adapters/responses-text' + // Summarize adapter - for text summarization export { OpenRouterSummarizeAdapter, diff --git a/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts b/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts new file mode 100644 index 000000000..685233f78 --- /dev/null +++ b/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts @@ -0,0 +1,55 @@ +import type { ResponsesRequest } from '@openrouter/sdk/models' +import type { OPENROUTER_CHAT_MODELS } from '../model-meta' + +type OpenRouterResponsesModel = (typeof OPENROUTER_CHAT_MODELS)[number] + +// --------------------------------------------------------------------------- +// Composite option types for the OpenRouter Responses adapter. +// Derived from the SDK's `ResponsesRequest` so future SDK additions surface +// here without manual fan-out (mirrors `text-provider-options.ts`). +// --------------------------------------------------------------------------- + +export type OpenRouterResponsesCommonOptions = Pick< + ResponsesRequest, + | 'provider' + | 'plugins' + | 'user' + | 'sessionId' + | 'metadata' + | 'trace' + | 'parallelToolCalls' + | 'modalities' + | 'serviceTier' + | 'safetyIdentifier' + | 'promptCacheKey' + | 'previousResponseId' + | 'imageConfig' + | 'include' + | 'maxToolCalls' + | 'truncation' +> & { + /** A list of model IDs to use as fallbacks if the primary model is unavailable. */ + models?: Array + /** The model variant to use, if supported by the model. Appended to the model ID. */ + variant?: 'free' | 'nitro' | 'online' | 'exacto' | 'extended' | 'thinking' +} + +export type OpenRouterResponsesBaseOptions = Pick< + ResponsesRequest, + | 'maxOutputTokens' + | 'temperature' + | 'topP' + | 'topK' + | 'topLogprobs' + | 'frequencyPenalty' + | 'presencePenalty' + | 'reasoning' + | 'toolChoice' + | 'parallelToolCalls' + | 'text' + | 'background' + | 'prompt' +> + +export type ExternalResponsesProviderOptions = + OpenRouterResponsesCommonOptions & OpenRouterResponsesBaseOptions diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts new file mode 100644 index 000000000..33b4fedc0 --- /dev/null +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -0,0 +1,451 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { resolveDebugOption } from '@tanstack/ai/adapter-internals' +import { ResponsesRequest$outboundSchema } from '@openrouter/sdk/models' +import { createOpenRouterResponsesText } from '../src/adapters/responses-text' +import { webSearchTool } from '../src/tools/web-search-tool' +import type { StreamChunk, Tool } from '@tanstack/ai' + +const testLogger = resolveDebugOption(false) +let mockSend: any +let lastOpenRouterConfig: any + +vi.mock('@openrouter/sdk', async () => { + return { + OpenRouter: class { + constructor(config?: unknown) { + lastOpenRouterConfig = config + } + beta = { + responses: { + send: (...args: Array) => mockSend(...args), + }, + } + }, + } +}) + +const createAdapter = () => + createOpenRouterResponsesText('openai/gpt-4o-mini', 'test-key') + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + // eslint-disable-next-line @typescript-eslint/require-await + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +function setupMockSdkClient( + streamEvents: Array>, + nonStreamResult?: Record, +) { + mockSend = vi.fn().mockImplementation((params) => { + if (params.responsesRequest?.stream) { + return Promise.resolve(createAsyncIterable(streamEvents)) + } + return Promise.resolve(nonStreamResult) + }) +} + +describe('OpenRouter responses adapter — request shape', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('maps options into the Responses API payload (snake → camel)', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'openai/gpt-4o-mini', + output: [], + usage: { inputTokens: 5, outputTokens: 2, totalTokens: 7 }, + }, + }, + ]) + + const adapter = createAdapter() + + for await (const _ of chat({ + adapter, + systemPrompts: ['Stay concise'], + messages: [{ role: 'user', content: 'How is the weather?' }], + tools: [weatherTool], + temperature: 0.25, + topP: 0.6, + maxTokens: 1024, + modelOptions: { toolChoice: 'auto' as any }, + })) { + // consume + } + + expect(mockSend).toHaveBeenCalledTimes(1) + const [rawParams] = mockSend.mock.calls[0]! + const params = rawParams.responsesRequest + + // Top-level camelCase keys reach the SDK. + expect(params.model).toBe('openai/gpt-4o-mini') + expect(params.temperature).toBe(0.25) + expect(params.topP).toBe(0.6) + expect(params.maxOutputTokens).toBe(1024) + expect(params.toolChoice).toBe('auto') + expect(params.instructions).toBe('Stay concise') + expect(params.stream).toBe(true) + + // Tools land in OpenRouter's flat Responses function-tool shape. + expect(Array.isArray(params.tools)).toBe(true) + expect(params.tools[0]).toMatchObject({ + type: 'function', + name: 'lookup_weather', + }) + + // The wire-format outboundSchema must accept the params — if camelCase + // keys are still snake_case (silently stripped by Zod), this throws. + const serialized = ResponsesRequest$outboundSchema.parse(params) + expect(serialized).toHaveProperty('model', 'openai/gpt-4o-mini') + expect(serialized).toHaveProperty('temperature', 0.25) + expect(serialized).toHaveProperty('top_p', 0.6) + expect(serialized).toHaveProperty('max_output_tokens', 1024) + expect(serialized).toHaveProperty('tool_choice', 'auto') + }) + + it('walks input[] camel-casing call_id and image_url so Zod does not strip them', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'openai/gpt-4o-mini', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_abc', + type: 'function', + function: { name: 'lookup_weather', arguments: '{"x":1}' }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_abc', content: '{"temp":72}' }, + ], + })) { + // consume + } + + const params = mockSend.mock.calls[0]![0].responsesRequest + const fcOutput = params.input.find( + (i: any) => i.type === 'function_call_output', + ) + // call_id was snake_case from the base; we must hand the SDK camelCase + // or Zod silently strips it and the tool result detaches from its call. + expect(fcOutput).toBeDefined() + expect(fcOutput.callId).toBe('call_abc') + expect(fcOutput).not.toHaveProperty('call_id') + }) + + it('applies modelOptions.variant as a `:suffix` to the model id', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'openai/gpt-4o-mini:thinking', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [{ role: 'user', content: 'hi' }], + modelOptions: { variant: 'thinking' as any }, + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + expect(params.model).toBe('openai/gpt-4o-mini:thinking') + }) + + it('rejects webSearchTool() with a clear error pointing at the chat adapter', async () => { + const adapter = createAdapter() + const ws = webSearchTool() as unknown as Tool + await expect(async () => { + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + tools: [ws], + logger: testLogger, + })) { + // consume + } + }).rejects.toThrow(/openRouterText/) + }) +}) + +describe('OpenRouter responses adapter — stream event bridge', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('routes text deltas through TEXT_MESSAGE_* lifecycle', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.output_text.delta', + sequenceNumber: 1, + itemId: 'msg_1', + outputIndex: 0, + contentIndex: 0, + delta: 'Hello ', + }, + { + type: 'response.output_text.delta', + sequenceNumber: 2, + itemId: 'msg_1', + outputIndex: 0, + contentIndex: 0, + delta: 'world', + }, + { + type: 'response.completed', + sequenceNumber: 3, + response: { + model: 'm', + output: [], + usage: { inputTokens: 1, outputTokens: 2, totalTokens: 3 }, + }, + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of chat({ + adapter, + messages: [{ role: 'user', content: 'hi' }], + })) { + chunks.push(c) + } + + const text = chunks.filter((c) => c.type === 'TEXT_MESSAGE_CONTENT') + expect(text.map((c: any) => c.delta)).toEqual(['Hello ', 'world']) + + const finished = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(finished).toBeDefined() + // Usage shape is mapped from camel to snake before the base reads it. + expect(finished.usage).toEqual({ + promptTokens: 1, + completionTokens: 2, + totalTokens: 3, + }) + }) + + it('routes function-call args through TOOL_CALL_START/ARGS/END', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.output_item.added', + sequenceNumber: 1, + outputIndex: 0, + item: { + type: 'function_call', + id: 'item_1', + callId: 'call_abc', + name: 'lookup_weather', + arguments: '', + }, + }, + { + type: 'response.function_call_arguments.delta', + sequenceNumber: 2, + itemId: 'item_1', + outputIndex: 0, + delta: '{"location":"Berlin"}', + }, + { + type: 'response.function_call_arguments.done', + sequenceNumber: 3, + itemId: 'item_1', + outputIndex: 0, + arguments: '{"location":"Berlin"}', + }, + { + type: 'response.completed', + sequenceNumber: 4, + response: { + model: 'm', + output: [{ type: 'function_call' }], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of chat({ + adapter, + messages: [{ role: 'user', content: 'hi' }], + tools: [weatherTool], + })) { + chunks.push(c) + } + + const start = chunks.find((c) => c.type === 'TOOL_CALL_START') as any + expect(start).toMatchObject({ + type: 'TOOL_CALL_START', + toolCallId: 'item_1', + toolCallName: 'lookup_weather', + }) + + const args = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') as any[] + expect(args.length).toBe(1) + expect(args[0]!.delta).toBe('{"location":"Berlin"}') + + const end = chunks.find((c) => c.type === 'TOOL_CALL_END') as any + expect(end.input).toEqual({ location: 'Berlin' }) + + const finished = chunks.find((c) => c.type === 'RUN_FINISHED') as any + expect(finished.finishReason).toBe('tool_calls') + }) + + it('surfaces response.failed with a RUN_ERROR carrying the error message + code', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.failed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + error: { message: 'kaboom', code: 'server_error' }, + }, + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(c) + } + const err = chunks.find((c) => c.type === 'RUN_ERROR') as any + expect(err).toBeDefined() + expect(err.error.message).toBe('kaboom') + expect(err.error.code).toBe('server_error') + // RUN_ERROR is terminal — no synthetic RUN_FINISHED should follow. + expect(chunks.find((c) => c.type === 'RUN_FINISHED')).toBeUndefined() + }) + + it('stringifies non-string error.code on top-level error events', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'error', + sequenceNumber: 1, + message: 'rate limit', + code: 429, + param: null, + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(c) + } + const err = chunks.find((c) => c.type === 'RUN_ERROR') as any + expect(err).toBeDefined() + expect(err.error.code).toBe('429') + }) +}) + +describe('OpenRouter responses adapter — SDK constructor wiring', () => { + beforeEach(() => { + vi.clearAllMocks() + lastOpenRouterConfig = undefined + }) + + it('forwards app-attribution headers (httpReferer, appTitle) to the SDK constructor', () => { + void createOpenRouterResponsesText('openai/gpt-4o-mini', 'test-key', { + httpReferer: 'https://app.example.com', + appTitle: 'TestApp', + } as any) + expect(lastOpenRouterConfig).toBeDefined() + expect(lastOpenRouterConfig.apiKey).toBe('test-key') + expect(lastOpenRouterConfig.httpReferer).toBe('https://app.example.com') + expect(lastOpenRouterConfig.appTitle).toBe('TestApp') + }) + + it('propagates the abort signal to the SDK call', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + const controller = new AbortController() + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + request: { signal: controller.signal } as any, + })) { + // consume + } + const [, options] = mockSend.mock.calls[0]! + expect(options).toEqual({ signal: controller.signal }) + }) +}) diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 48faadd21..8c7ca283d 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -105,7 +105,7 @@ export class OpenAICompatibleResponsesTextAdapter< `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, ) - const response = await this.client.responses.create( + const response = await this.callResponseStream( { ...requestParams, stream: true, @@ -193,7 +193,7 @@ export class OpenAICompatibleResponsesTextAdapter< `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, { provider: this.name, model: this.model }, ) - const response = await this.client.responses.create( + const response = await this.callResponse( { ...(cleanParams as Omit< OpenAI_SDK.Responses.ResponseCreateParams, @@ -261,6 +261,39 @@ export class OpenAICompatibleResponsesTextAdapter< return makeStructuredOutputCompatible(schema, originalRequired) } + /** + * Performs the non-streaming Responses API network call. The default uses + * the OpenAI SDK (`client.responses.create`), which covers any provider + * whose endpoint accepts the OpenAI SDK verbatim. + * + * Override in subclasses whose SDK has a different call shape — for + * example `@openrouter/sdk` exposes `client.beta.responses.send + * ({ responsesRequest })` with camelCase fields. The override is + * responsible for converting the params shape on the way in and returning + * an object structurally compatible with `OpenAI_SDK.Responses.Response` + * (the base only reads documented fields like `response.output[…]`). + */ + protected async callResponse( + params: OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, + requestOptions: ReturnType, + ): Promise { + return this.client.responses.create(params, requestOptions) + } + + /** + * Performs the streaming Responses API network call. Same pattern as + * {@link callResponse} — default uses the OpenAI SDK; override for + * providers whose SDK exposes a different streaming entry point. Returns + * an `AsyncIterable` because the base's + * {@link processStreamChunks} only needs structural iteration over events. + */ + protected async callResponseStream( + params: OpenAI_SDK.Responses.ResponseCreateParamsStreaming, + requestOptions: ReturnType, + ): Promise> { + return this.client.responses.create(params, requestOptions) + } + /** * Extract text content from a non-streaming Responses API response. * Override this in subclasses for provider-specific response shapes. diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts index df6491e18..22b3f429a 100644 --- a/packages/typescript/openai-base/src/index.ts +++ b/packages/typescript/openai-base/src/index.ts @@ -18,6 +18,18 @@ export { type ChatCompletionFunctionTool, } from './adapters/chat-completions-tool-converter' export { OpenAICompatibleResponsesTextAdapter } from './adapters/responses-text' +// Re-export the OpenAI Responses SDK types subclasses need when overriding +// the `callResponse*` / `processStreamChunks` / `extractTextFromResponse` +// hooks, so subclass packages don't need to declare `openai` as a direct +// dependency. +export type { + Response as ResponsesResponse, + ResponseCreateParams, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseInputContent, + ResponseStreamEvent, +} from 'openai/resources/responses/responses' export { convertFunctionToolToResponsesFormat, convertToolsToResponsesFormat, diff --git a/testing/e2e/src/lib/feature-support.ts b/testing/e2e/src/lib/feature-support.ts index 25a8b4d43..7a8dbb8c8 100644 --- a/testing/e2e/src/lib/feature-support.ts +++ b/testing/e2e/src/lib/feature-support.ts @@ -16,6 +16,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'one-shot-text': new Set([ 'openai', @@ -25,6 +26,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), reasoning: new Set(['openai', 'anthropic', 'gemini']), 'multi-turn-reasoning': new Set(['anthropic']), @@ -36,6 +38,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'tool-calling': new Set([ 'openai', @@ -45,6 +48,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'parallel-tool-calls': new Set([ 'openai', @@ -53,6 +57,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), // Gemini excluded: approval flow timing issues with Gemini's streaming format 'tool-approval': new Set([ @@ -62,6 +67,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), // Ollama excluded: aimock doesn't support content+toolCalls for /api/chat format 'text-tool-text': new Set([ @@ -71,6 +77,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'structured-output': new Set([ 'openai', @@ -80,6 +87,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'agentic-structured': new Set([ 'openai', @@ -89,6 +97,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'openrouter-responses', ]), 'multimodal-image': new Set([ 'openai', @@ -96,6 +105,7 @@ export const matrix: Record> = { 'gemini', 'grok', 'openrouter', + 'openrouter-responses', ]), 'multimodal-structured': new Set([ 'openai', @@ -103,6 +113,7 @@ export const matrix: Record> = { 'gemini', 'grok', 'openrouter', + 'openrouter-responses', ]), summarize: new Set([ 'openai', @@ -111,6 +122,7 @@ export const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'openrouter-responses', ]), 'summarize-stream': new Set([ 'openai', @@ -119,6 +131,7 @@ export const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'openrouter-responses', ]), // Gemini excluded: aimock doesn't mock Gemini's Imagen predict endpoint format 'image-gen': new Set(['openai', 'grok']), diff --git a/testing/e2e/src/lib/providers.ts b/testing/e2e/src/lib/providers.ts index ca2d00c4d..fd11a01eb 100644 --- a/testing/e2e/src/lib/providers.ts +++ b/testing/e2e/src/lib/providers.ts @@ -6,7 +6,10 @@ import { createGeminiChat } from '@tanstack/ai-gemini' import { createOllamaChat } from '@tanstack/ai-ollama' import { createGroqText } from '@tanstack/ai-groq' import { createGrokText } from '@tanstack/ai-grok' -import { createOpenRouterText } from '@tanstack/ai-openrouter' +import { + createOpenRouterResponsesText, + createOpenRouterText, +} from '@tanstack/ai-openrouter' import { HTTPClient } from '@openrouter/sdk' import type { Provider } from '@/lib/types' @@ -21,6 +24,7 @@ const defaultModels: Record = { groq: 'llama-3.3-70b-versatile', grok: 'grok-3', openrouter: 'openai/gpt-4o', + 'openrouter-responses': 'openai/gpt-4o', // ElevenLabs has no chat/text model — the support matrix already filters // it out of text features, but we still need an entry to satisfy the // Record constraint. @@ -110,6 +114,26 @@ export function createTextAdapter( }), }) }, + 'openrouter-responses': () => { + // Same X-Test-Id injection rationale as the chat-completions factory + // above. The beta Responses endpoint uses the same SDK base URL + + // HTTPClient surface. + const httpClient = new HTTPClient() + if (testId) { + httpClient.addHook('beforeRequest', (req) => { + const next = new Request(req) + next.headers.set('X-Test-Id', testId) + return next + }) + } + return createChatOptions({ + adapter: createOpenRouterResponsesText( + model as 'openai/gpt-4o', + DUMMY_KEY, + { serverURL: openaiUrl, httpClient }, + ), + }) + }, elevenlabs: () => { throw new Error( 'ElevenLabs has no text/chat adapter — use createTTSAdapter or createTranscriptionAdapter.', diff --git a/testing/e2e/src/lib/types.ts b/testing/e2e/src/lib/types.ts index eafe588fc..2f5cd634c 100644 --- a/testing/e2e/src/lib/types.ts +++ b/testing/e2e/src/lib/types.ts @@ -8,6 +8,7 @@ export type Provider = | 'grok' | 'groq' | 'openrouter' + | 'openrouter-responses' | 'elevenlabs' export type Feature = @@ -41,6 +42,7 @@ export const ALL_PROVIDERS: Provider[] = [ 'grok', 'groq', 'openrouter', + 'openrouter-responses', 'elevenlabs', ] diff --git a/testing/e2e/src/routes/api.summarize.ts b/testing/e2e/src/routes/api.summarize.ts index e5912edf9..2c34c99d9 100644 --- a/testing/e2e/src/routes/api.summarize.ts +++ b/testing/e2e/src/routes/api.summarize.ts @@ -28,6 +28,8 @@ function createSummarizeAdapter(provider: Provider) { createGrokSummarize('grok-3', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), openrouter: () => createOpenaiSummarize('gpt-4o', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), + 'openrouter-responses': () => + createOpenaiSummarize('gpt-4o', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), } return factories[provider]?.() } diff --git a/testing/e2e/tests/test-matrix.ts b/testing/e2e/tests/test-matrix.ts index fea85dc59..f48dcebc0 100644 --- a/testing/e2e/tests/test-matrix.ts +++ b/testing/e2e/tests/test-matrix.ts @@ -21,6 +21,7 @@ export const providers: Provider[] = [ 'groq', 'grok', 'openrouter', + 'openrouter-responses', 'elevenlabs', ] From d741f2f87cc581834a97a12c9209cc82653e57c2 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 12 May 2026 13:09:34 +1000 Subject: [PATCH 05/49] chore(ai-groq): remove dead unused message-param types Removes `validateTextProviderOptions` (no-op stub never called) and the chain of `ChatCompletion*MessageParam` / `ChatCompletionContentPart*` / `ChatCompletionMessageToolCall` types that were only referenced by it. Unblocks the root `test:knip` CI check. None of the removed exports are re-exported from the package's public `src/index.ts`, so this is internal-only cleanup. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../typescript/ai-groq/src/message-types.ts | 126 ------------------ .../ai-groq/src/text/text-provider-options.ts | 36 ----- 2 files changed, 162 deletions(-) diff --git a/packages/typescript/ai-groq/src/message-types.ts b/packages/typescript/ai-groq/src/message-types.ts index 42c218189..dfe55126b 100644 --- a/packages/typescript/ai-groq/src/message-types.ts +++ b/packages/typescript/ai-groq/src/message-types.ts @@ -7,62 +7,6 @@ * @see https://console.groq.com/docs/api-reference#chat */ -export interface ChatCompletionContentPartText { - /** The text content. */ - text: string - - /** The type of the content part. */ - type: 'text' -} - -export interface ChatCompletionContentPartImage { - image_url: { - /** Either a URL of the image or the base64 encoded image data. */ - url: string - - /** Specifies the detail level of the image. */ - detail?: 'auto' | 'low' | 'high' - } - - /** The type of the content part. */ - type: 'image_url' -} - -export interface ChatCompletionMessageToolCall { - /** The ID of the tool call. */ - id: string - - /** The function that the model called. */ - function: { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments: string - - /** The name of the function to call. */ - name: string - } - - /** The type of the tool. Currently, only `function` is supported. */ - type: 'function' -} - -export interface ChatCompletionRequestMessageContentPartDocument { - document: { - /** The JSON document data. */ - data: { [key: string]: unknown } - - /** Optional unique identifier for the document. */ - id?: string | null - } - - /** The type of the content part. */ - type: 'document' -} - export type FunctionParameters = { [key: string]: unknown } export interface ChatCompletionNamedToolChoice { @@ -113,34 +57,6 @@ export type ChatCompletionToolChoiceOption = | 'required' | ChatCompletionNamedToolChoice -export type ChatCompletionContentPart = - | ChatCompletionContentPartText - | ChatCompletionContentPartImage - | ChatCompletionRequestMessageContentPartDocument - -export interface ChatCompletionAssistantMessageParam { - /** The role of the messages author, in this case `assistant`. */ - role: 'assistant' - - /** - * The contents of the assistant message. Required unless `tool_calls` or - * `function_call` is specified. - */ - content?: string | Array | null - - /** An optional name for the participant. */ - name?: string - - /** - * The reasoning output by the assistant if reasoning_format was set to 'parsed'. - * This field is only useable with qwen3 models. - */ - reasoning?: string | null - - /** The tool calls generated by the model, such as function calls. */ - tool_calls?: Array -} - export interface ChatCompletionTool { /** * The type of the tool. `function`, `browser_search`, and `code_interpreter` are @@ -151,48 +67,6 @@ export interface ChatCompletionTool { function?: FunctionDefinition } -export interface ChatCompletionToolMessageParam { - /** The contents of the tool message. */ - content: string | Array - - /** The role of the messages author, in this case `tool`. */ - role: 'tool' - - /** Tool call that this message is responding to. */ - tool_call_id: string -} - -export interface ChatCompletionSystemMessageParam { - /** The contents of the system message. */ - content: string | Array - - /** The role of the messages author, in this case `system`. */ - role: 'system' | 'developer' - - /** An optional name for the participant. */ - name?: string -} - -export interface ChatCompletionUserMessageParam { - /** The contents of the user message. */ - content: string | Array - - /** The role of the messages author, in this case `user`. */ - role: 'user' - - /** An optional name for the participant. */ - name?: string -} - -/** - * Union of all supported chat completion message params. - */ -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam - export interface CompoundCustomModels { /** Custom model to use for answering. */ answering_model?: string | null diff --git a/packages/typescript/ai-groq/src/text/text-provider-options.ts b/packages/typescript/ai-groq/src/text/text-provider-options.ts index c3ee2309e..5fc9fc226 100644 --- a/packages/typescript/ai-groq/src/text/text-provider-options.ts +++ b/packages/typescript/ai-groq/src/text/text-provider-options.ts @@ -1,6 +1,4 @@ import type { - ChatCompletionMessageParam, - ChatCompletionTool, ChatCompletionToolChoiceOption, CompoundCustom, Document, @@ -185,41 +183,7 @@ export interface GroqTextProviderOptions { user?: string | null } -/** - * Internal options interface used for validation within the adapter. - * Extends provider options with required fields for API requests. - */ -export interface InternalTextProviderOptions extends GroqTextProviderOptions { - /** An array of messages comprising the conversation. */ - messages: Array - - /** - * The model name (e.g. "llama-3.3-70b-versatile", "openai/gpt-oss-120b"). - * @see https://console.groq.com/docs/models - */ - model: string - - /** Whether to stream partial message deltas as server-sent events. */ - stream?: boolean | null - - /** - * Tools the model may call (functions, code_interpreter, etc). - * @see https://console.groq.com/docs/tool-use - */ - tools?: Array -} - /** * External provider options (what users pass in) */ export type ExternalTextProviderOptions = GroqTextProviderOptions - -/** - * Validates text provider options. - * Basic validation stub — Groq API handles detailed validation. - */ -export function validateTextProviderOptions( - _options: InternalTextProviderOptions, -): void { - // Groq API handles detailed validation -} From f66f82f6281d73bb513054ffff791fb3c8f2b631 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 12 May 2026 13:14:14 +1000 Subject: [PATCH 06/49] fix(ai-openrouter): pass UNKNOWN-fallback events through verbatim MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The OpenRouter SDK's stream-event schema is built with Speakeasy's discriminated-union helper, which on a per-variant parse failure falls back to `{ raw, type: 'UNKNOWN', isUnknown: true }` rather than throwing. This happens whenever an upstream omits an "optional-looking" required field — notably `sequence_number` and `logprobs` on text/reasoning delta events, which aimock-served fixtures don't include. Before this fix the adapter's switch hit the default branch for UNKNOWN events and emitted them with no usable `type`, so the base's processStreamChunks ignored them silently — the run terminated as `RUN_FINISHED { finishReason: 'stop' }` with no content. The `raw` payload preserved on the fallback is the original wire-shape event in snake_case, which is exactly what processStreamChunks reads. Re-emit it verbatim. Real-OpenRouter responses still flow through the existing camel -> snake bridge because their events include the required fields and parse cleanly. Unblocks the openrouter-responses E2E suite: 11 affected tests now pass locally against aimock; before this commit they all timed out empty. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../ai-openrouter/src/adapters/responses-text.ts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 900a909fa..3f23d8419 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -416,6 +416,18 @@ async function* adaptOpenRouterResponsesStreamEvents( ): AsyncIterable { for await (const event of stream) { const e = event as Record + + // Speakeasy's discriminated-union parser falls back to `{ raw, type: + // 'UNKNOWN', isUnknown: true }` when an event's strict per-variant schema + // rejects (missing optional-ish fields like `sequence_number`/`logprobs` + // that some upstreams — including aimock — omit). The `raw` payload is + // the original wire-shape event in snake_case, which is exactly what the + // base's `processStreamChunks` reads. Re-emit it verbatim. + if (e.isUnknown && e.raw && typeof e.raw === 'object') { + yield e.raw as ResponseStreamEvent + continue + } + switch (e.type) { case 'response.created': case 'response.in_progress': From d5e492d190938f20bfa5ad84ecbd9faf77f55f03 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Tue, 12 May 2026 18:53:51 +1000 Subject: [PATCH 07/49] refactor(adapters): remove asChunk casts, enforce satisfies StreamChunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces ~200 sites of `asChunk({ type: 'X', ... })` (a `Record as unknown as StreamChunk` cast) with `({ type: EventType.X, ... }) satisfies StreamChunk` so the type system validates AG-UI event shape at every emission. The cast was bypassing TypeScript's string-enum nominal typing and masking a cluster of spec deviations now fixed: - RUN_STARTED / RUN_FINISHED in openai-base (chat-completions + responses) and all three summarize adapters were missing the AG-UI-required `threadId`. Threading `options.threadId ?? generateId(this.name)` through `aguiState` (matching the existing Gemini/Anthropic pattern) fixes it. - RUN_ERROR emissions carried a non-existent `runId` field and the deprecated nested `error: { message, code }` form instead of AG-UI's top-level `message`/`code`. Both forms now coexist (deprecated kept for back-compat) and `runId` is dropped — verified no consumer reads it (chat-client.ts:404 only reads runId on RUN_FINISHED). - STEP_STARTED / STEP_FINISHED in responses-text.ts were passing only the deprecated `stepId` alias; AG-UI requires `stepName`. Now passes both. - `finishReason` in chat-completions-text.ts was typed as `string`, dropping below the AG-UI vocabulary. Widened `RunFinishedEvent.finishReason` in `@tanstack/ai` to include OpenAI's `'function_call'` so it narrows cleanly. responses-text.ts maps Responses-API `'max_output_tokens'` → `'length'` and passes `'content_filter'` through. - Per-event timestamps. AG-UI spec: "Optional timestamp indicating when the event was created." Previously a single `const timestamp = Date.now()` was captured at run start and reused on every emission across the eight adapters; each chunk now uses `Date.now()` inline. `@tanstack/ai/tests/test-utils.ts` `ev.*` builders are typed to return precise event members via `satisfies StreamChunk`; the loose `chunk(type, fields)` factory is preserved as a documented escape hatch for tests that deliberately construct off-spec fixtures. ai-client tests no longer declare a local `asChunk`. ai-groq's `processStreamChunks` override signature is updated to include the new `threadId` field on `aguiState`. Out of scope, flagged for follow-up: - Framework tests (ai-react / ai-svelte / ai-vue) with inline string-literal chunk arrays — their test directories aren't currently type-checked, so they compile despite being off-spec. - Summarize adapters omit TEXT_MESSAGE_START / TEXT_MESSAGE_END around content emissions (separate AG-UI lifecycle gap). Verified: pnpm -r test:types, test:lib, test:eslint, test:build all green. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../ai-anthropic/src/adapters/summarize.ts | 27 +- .../ai-anthropic/src/adapters/text.ts | 215 +++++++------- .../ai-client/tests/chat-client-abort.test.ts | 81 +++--- .../ai-client/tests/chat-client.test.ts | 145 ++++++---- .../tests/connection-adapters.test.ts | 48 ++-- .../ai-client/tests/generation-client.test.ts | 197 ++++++++----- .../tests/video-generation-client.test.ts | 208 ++++++++------ .../ai-gemini/src/adapters/summarize.ts | 27 +- .../typescript/ai-gemini/src/adapters/text.ts | 192 ++++++------- .../typescript/ai-groq/src/adapters/text.ts | 1 + .../ai-ollama/src/adapters/summarize.ts | 27 +- .../typescript/ai-ollama/src/adapters/text.ts | 177 ++++++------ .../activities/stream-generation-result.ts | 10 +- packages/typescript/ai/src/types.ts | 8 +- packages/typescript/ai/tests/test-utils.ts | 82 ++++-- .../src/adapters/chat-completions-text.ts | 207 +++++++------ .../src/adapters/responses-text.ts | 272 ++++++++++-------- 17 files changed, 1057 insertions(+), 867 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts index cdd9fe66f..60effa555 100644 --- a/packages/typescript/ai-anthropic/src/adapters/summarize.ts +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' import { createAnthropicClient, @@ -12,10 +13,6 @@ import type { } from '@tanstack/ai' import type { AnthropicClientConfig } from '../utils' -/** Cast an event object to StreamChunk. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * Configuration for Anthropic summarize adapter */ @@ -102,6 +99,7 @@ export class AnthropicSummarizeAdapter< const { logger } = options const systemPrompt = this.buildSummarizationPrompt(options) const id = generateId(this.name) + const threadId = generateId('thread') const model = options.model let accumulatedContent = '' let inputTokens = 0 @@ -114,6 +112,14 @@ export class AnthropicSummarizeAdapter< }) try { + yield { + type: EventType.RUN_STARTED, + runId: id, + threadId, + model, + timestamp: Date.now(), + } satisfies StreamChunk + const stream = await this.client.messages.create({ model: options.model, messages: [{ role: 'user', content: options.text }], @@ -134,20 +140,21 @@ export class AnthropicSummarizeAdapter< if (event.delta.type === 'text_delta') { const delta = event.delta.text accumulatedContent += delta - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: id, model, timestamp: Date.now(), delta, content: accumulatedContent, - }) + } satisfies StreamChunk } } else if (event.type === 'message_delta') { outputTokens = event.usage.output_tokens - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: id, + threadId, model, timestamp: Date.now(), finishReason: event.delta.stop_reason as @@ -160,7 +167,7 @@ export class AnthropicSummarizeAdapter< completionTokens: outputTokens, totalTokens: inputTokens + outputTokens, }, - }) + } satisfies StreamChunk } } } catch (error) { diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index f057a18a4..92d19b8f0 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { convertToolsToProviderFormat } from '../tools/tool-converter' import { validateTextProviderOptions } from '../text/text-provider-options' @@ -48,11 +49,6 @@ import type { } from '../message-types' import type { AnthropicClientConfig } from '../utils' -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * Configuration for Anthropic text adapter */ @@ -177,8 +173,8 @@ export class AnthropicTextAdapter< error, source: 'anthropic.chatStream', }) - yield asChunk({ - type: 'RUN_ERROR', + yield { + type: EventType.RUN_ERROR, model: options.model, timestamp: Date.now(), message: err.message || 'Unknown error occurred', @@ -187,7 +183,7 @@ export class AnthropicTextAdapter< message: err.message || 'Unknown error occurred', code: err.code || String(err.status), }, - }) + } satisfies StreamChunk } } @@ -629,7 +625,6 @@ export class AnthropicTextAdapter< let accumulatedContent = '' let accumulatedThinking = '' let accumulatedSignature = '' - const timestamp = Date.now() const toolCallsMap = new Map< number, { id: string; name: string; input: string; started: boolean } @@ -657,13 +652,13 @@ export class AnthropicTextAdapter< // Emit RUN_STARTED on first event if (!hasEmittedRunStarted) { hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId, threadId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } if (event.type === 'content_block_start') { @@ -684,94 +679,94 @@ export class AnthropicTextAdapter< reasoningMessageId = genId() // Spec REASONING events - yield asChunk({ - type: 'REASONING_START', + yield { + type: EventType.REASONING_START, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP events (kept during transition) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, stepName: stepId, stepId, model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } } else if (event.type === 'content_block_delta') { if (event.delta.type === 'text_delta') { // Close reasoning before text starts if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId, model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } const delta = event.delta.text accumulatedContent += delta - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId, model, - timestamp, + timestamp: Date.now(), delta, content: accumulatedContent, - }) + } satisfies StreamChunk } else if (event.delta.type === 'thinking_delta') { const delta = event.delta.thinking accumulatedThinking += delta // Spec REASONING content event - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', + yield { + type: EventType.REASONING_MESSAGE_CONTENT, messageId: reasoningMessageId!, delta, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP event - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId || genId(), stepId: stepId || genId(), model, - timestamp, + timestamp: Date.now(), delta, content: accumulatedThinking, - }) + } satisfies StreamChunk } else if ( (event.delta as { type: string }).type === 'signature_delta' ) { @@ -783,43 +778,43 @@ export class AnthropicTextAdapter< // Emit TOOL_CALL_START on first args delta if (!existing.started) { existing.started = true - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId: existing.id, toolCallName: existing.name, toolName: existing.name, model, - timestamp, + timestamp: Date.now(), index: currentToolIndex, - }) + } satisfies StreamChunk } existing.input += event.delta.partial_json - yield asChunk({ - type: 'TOOL_CALL_ARGS', + yield { + type: EventType.TOOL_CALL_ARGS, toolCallId: existing.id, model, - timestamp, + timestamp: Date.now(), delta: event.delta.partial_json, args: existing.input, - }) + } satisfies StreamChunk } } } else if (event.type === 'content_block_stop') { if (currentBlockType === 'thinking') { // Emit signature so it can be replayed in multi-turn context if (accumulatedSignature && stepId) { - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId, stepId, model, - timestamp, + timestamp: Date.now(), delta: '', content: accumulatedThinking, signature: accumulatedSignature, - }) + } satisfies StreamChunk } } else if (currentBlockType === 'tool_use') { const existing = toolCallsMap.get(currentToolIndex) @@ -827,15 +822,15 @@ export class AnthropicTextAdapter< // If tool call wasn't started yet (no args), start it now if (!existing.started) { existing.started = true - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId: existing.id, toolCallName: existing.name, toolName: existing.name, model, - timestamp, + timestamp: Date.now(), index: currentToolIndex, - }) + } satisfies StreamChunk } // Emit TOOL_CALL_END @@ -847,15 +842,15 @@ export class AnthropicTextAdapter< parsedInput = {} } - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId: existing.id, toolCallName: existing.name, toolName: existing.name, model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls hasEmittedTextMessageStart = false @@ -863,12 +858,12 @@ export class AnthropicTextAdapter< } else { // Emit TEXT_MESSAGE_END only for text blocks (not tool_use blocks) if (hasEmittedTextMessageStart && accumulatedContent) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } } currentBlockType = null @@ -876,32 +871,32 @@ export class AnthropicTextAdapter< // Close reasoning events if still open if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Only emit RUN_FINISHED from message_stop if message_delta didn't already emit one. // message_delta carries the real stop_reason (tool_use, end_turn, etc.), // while message_stop is just a completion signal. if (!hasEmittedRunFinished) { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId, threadId, model, - timestamp, + timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk } } else if (event.type === 'message_delta') { if (event.delta.stop_reason) { @@ -910,28 +905,28 @@ export class AnthropicTextAdapter< // Close reasoning events if still open if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } switch (event.delta.stop_reason) { case 'tool_use': { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId, threadId, model, - timestamp, + timestamp: Date.now(), finishReason: 'tool_calls', usage: { promptTokens: event.usage.input_tokens || 0, @@ -940,15 +935,14 @@ export class AnthropicTextAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - }) + } satisfies StreamChunk break } case 'max_tokens': { - yield asChunk({ - type: 'RUN_ERROR', - runId, + yield { + type: EventType.RUN_ERROR, model, - timestamp, + timestamp: Date.now(), message: 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', @@ -957,16 +951,16 @@ export class AnthropicTextAdapter< 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', }, - }) + } satisfies StreamChunk break } default: { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId, threadId, model, - timestamp, + timestamp: Date.now(), finishReason: 'stop', usage: { promptTokens: event.usage.input_tokens || 0, @@ -975,7 +969,7 @@ export class AnthropicTextAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - }) + } satisfies StreamChunk } } } @@ -988,18 +982,17 @@ export class AnthropicTextAdapter< error, source: 'anthropic.processAnthropicStream', }) - yield asChunk({ - type: 'RUN_ERROR', - runId, + yield { + type: EventType.RUN_ERROR, model, - timestamp, + timestamp: Date.now(), message: err.message || 'Unknown error occurred', code: err.code || String(err.status), error: { message: err.message || 'Unknown error occurred', code: err.code || String(err.status), }, - }) + } satisfies StreamChunk } } } diff --git a/packages/typescript/ai-client/tests/chat-client-abort.test.ts b/packages/typescript/ai-client/tests/chat-client-abort.test.ts index 71bf71522..882d6d471 100644 --- a/packages/typescript/ai-client/tests/chat-client-abort.test.ts +++ b/packages/typescript/ai-client/tests/chat-client-abort.test.ts @@ -1,12 +1,9 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' +import { EventType } from '@tanstack/ai' import { ChatClient } from '../src/chat-client' import type { ConnectionAdapter } from '../src/connection-adapters' import type { StreamChunk } from '@tanstack/ai' -/** Cast an event object to StreamChunk for type compatibility with EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - describe('ChatClient - Abort Signal Handling', () => { let mockAdapter: ConnectionAdapter let receivedAbortSignal: AbortSignal | undefined @@ -20,29 +17,30 @@ describe('ChatClient - Abort Signal Handling', () => { receivedAbortSignal = abortSignal // Simulate streaming chunks (AG-UI format) - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + } satisfies StreamChunk + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: ' World', content: 'Hello World', - }) - yield asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }, } }) @@ -82,24 +80,24 @@ describe('ChatClient - Abort Signal Handling', () => { } try { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk // Simulate long-running stream await new Promise((resolve) => setTimeout(resolve, 100)) - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: ' World', content: 'Hello World', - }) + } satisfies StreamChunk } catch (err) { // Abort errors are expected if (err instanceof Error && err.name === 'AbortError') { @@ -137,28 +135,28 @@ describe('ChatClient - Abort Signal Handling', () => { const adapterWithPartial: ConnectionAdapter = { // eslint-disable-next-line @typescript-eslint/require-await async *connect(_messages, _data, abortSignal) { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk yieldedChunks++ if (abortSignal?.aborted) { return } - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: ' World', content: 'Hello World', - }) + } satisfies StreamChunk yieldedChunks++ }, } @@ -194,14 +192,14 @@ describe('ChatClient - Abort Signal Handling', () => { const adapterWithAbort: ConnectionAdapter = { // eslint-disable-next-line @typescript-eslint/require-await async *connect(_messages, _data, abortSignal) { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk if (abortSignal?.aborted) { return @@ -234,14 +232,14 @@ describe('ChatClient - Abort Signal Handling', () => { it('should set isLoading to false after abort', async () => { const adapterWithAbort: ConnectionAdapter = { async *connect(_messages, _data, _abortSignal) { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk await new Promise((resolve) => setTimeout(resolve, 50)) }, } @@ -276,13 +274,14 @@ describe('ChatClient - Abort Signal Handling', () => { if (abortSignal) { abortSignals.push(abortSignal) } - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }, } @@ -319,13 +318,14 @@ describe('ChatClient - Abort Signal Handling', () => { // eslint-disable-next-line @typescript-eslint/require-await async *connect(_messages, _data, _abortSignal) { connectCalled = true - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }, } @@ -367,13 +367,14 @@ describe('ChatClient - Abort Signal Handling', () => { if (abortSignal) { signalsPassedToConnect.push(abortSignal) } - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }, } diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index ec997c868..ff933bd01 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it, vi } from 'vitest' +import { EventType } from '@tanstack/ai' import { ChatClient } from '../src/chat-client' import { createMockConnectionAdapter, @@ -15,10 +16,6 @@ import type { import type { StreamChunk } from '@tanstack/ai' import type { UIMessage } from '../src/types' -/** Cast an event object to StreamChunk for type compatibility with EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - describe('ChatClient', () => { describe('constructor', () => { it('should create a client with default options', () => { @@ -154,8 +151,9 @@ describe('ChatClient', () => { it('stop should not unsubscribe an active subscription', async () => { const adapter = createSubscribeAdapter([ { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -259,7 +257,7 @@ describe('ChatClient', () => { it('unsubscribe should abort in-flight requests and disconnect', async () => { const adapter = createSubscribeAdapter([ { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -318,7 +316,7 @@ describe('ChatClient', () => { it('should remain pending without terminal run events', async () => { const adapter = createSubscribeAdapter([ { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -354,13 +352,14 @@ describe('ChatClient', () => { it('should flip to true on RUN_STARTED and false on RUN_FINISHED', async () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -368,8 +367,9 @@ describe('ChatClient', () => { content: 'Hi', } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -393,13 +393,15 @@ describe('ChatClient', () => { it('should flip to false on RUN_ERROR', async () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'RUN_ERROR', + type: EventType.RUN_ERROR, + message: 'something went wrong', runId: 'run-1', model: 'test', timestamp: Date.now(), @@ -424,13 +426,14 @@ describe('ChatClient', () => { it('should remain correct through subscribe/unsubscribe cycles', async () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -438,8 +441,9 @@ describe('ChatClient', () => { content: 'Hi', } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -464,12 +468,13 @@ describe('ChatClient', () => { while (!signal?.aborted) { if (!yieldedStart) { yieldedStart = true - yield asChunk({ - type: 'RUN_STARTED' as const, + yield { + type: EventType.RUN_STARTED as const, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), - }) + } satisfies StreamChunk } await new Promise((resolve) => { const onAbort = () => resolve() @@ -507,12 +512,13 @@ describe('ChatClient', () => { while (!signal?.aborted) { if (!yieldedStart) { yieldedStart = true - yield asChunk({ - type: 'RUN_STARTED' as const, + yield { + type: EventType.RUN_STARTED as const, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), - }) + } satisfies StreamChunk } await new Promise((resolve) => { const onAbort = () => resolve() @@ -546,19 +552,21 @@ describe('ChatClient', () => { it('should not emit duplicate callbacks on repeated same-state events', async () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -566,15 +574,17 @@ describe('ChatClient', () => { content: 'Hi', } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -597,13 +607,14 @@ describe('ChatClient', () => { it('should handle interleaved multi-run events from durable subscription', async () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -611,8 +622,9 @@ describe('ChatClient', () => { content: 'A', } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -671,14 +683,16 @@ describe('ChatClient', () => { // Simulate two concurrent runs starting chunks.push( { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-2', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, @@ -690,8 +704,9 @@ describe('ChatClient', () => { // First run finishes — should still be generating because run-2 is active chunks.push({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -703,8 +718,9 @@ describe('ChatClient', () => { // Second run finishes — now should be false chunks.push({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-2', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -756,14 +772,16 @@ describe('ChatClient', () => { // Two runs active chunks.push( { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-2', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, @@ -775,7 +793,8 @@ describe('ChatClient', () => { // Session-level error without runId clears everything chunks.push({ - type: 'RUN_ERROR', + type: EventType.RUN_ERROR, + message: 'session crashed', model: 'test', timestamp: Date.now(), error: { message: 'session crashed' }, @@ -795,12 +814,13 @@ describe('ChatClient', () => { subscribe: async function* (_signal?: AbortSignal) { if (!yieldedStart) { yieldedStart = true - yield asChunk({ - type: 'RUN_STARTED' as const, + yield { + type: EventType.RUN_STARTED as const, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), - }) + } satisfies StreamChunk await new Promise((resolve) => setTimeout(resolve, 10)) } throw new Error('subscription failed') @@ -1349,7 +1369,7 @@ describe('ChatClient', () => { const noTerminalAdapter = createMockConnectionAdapter({ chunks: [ { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), @@ -1991,7 +2011,7 @@ describe('ChatClient', () => { // Yield the tool call and approval request const preChunks: Array = [ { - type: 'TOOL_CALL_START', + type: EventType.TOOL_CALL_START, toolCallId: 'tc-2', toolName: 'dangerous_tool_2', model: 'test', @@ -1999,21 +2019,21 @@ describe('ChatClient', () => { index: 0, } as unknown as StreamChunk, { - type: 'TOOL_CALL_ARGS', + type: EventType.TOOL_CALL_ARGS, toolCallId: 'tc-2', model: 'test', timestamp: Date.now(), delta: '{}', } as unknown as StreamChunk, { - type: 'TOOL_CALL_END', + type: EventType.TOOL_CALL_END, toolCallId: 'tc-2', toolName: 'dangerous_tool_2', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'CUSTOM', + type: EventType.CUSTOM, model: 'test', timestamp: Date.now(), name: 'approval-requested', @@ -2032,13 +2052,14 @@ describe('ChatClient', () => { resolveStreamPause = resolve }) - yield asChunk({ - type: 'RUN_FINISHED' as const, + yield { + type: EventType.RUN_FINISHED as const, runId: 'run-2', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'tool_calls' as const, - }) + } satisfies StreamChunk } else if (streamCount === 3) { // Third stream (after second approval): final text response const chunks = createTextChunks('All done!') @@ -2134,20 +2155,21 @@ describe('ChatClient', () => { // Run A starts with text message chunks.push( { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-a', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_START', + type: EventType.TEXT_MESSAGE_START, messageId: 'msg-a', role: 'assistant', model: 'test', timestamp: Date.now(), } as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-a', model: 'test', timestamp: Date.now(), @@ -2160,20 +2182,21 @@ describe('ChatClient', () => { // Run B starts concurrently chunks.push( { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-b', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_START', + type: EventType.TEXT_MESSAGE_START, messageId: 'msg-b', role: 'assistant', model: 'test', timestamp: Date.now(), } as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-b', model: 'test', timestamp: Date.now(), @@ -2185,8 +2208,9 @@ describe('ChatClient', () => { // Run B finishes — Run A should still be active chunks.push({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-b', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -2196,7 +2220,7 @@ describe('ChatClient', () => { // Run A continues streaming chunks.push({ - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-a', model: 'test', timestamp: Date.now(), @@ -2224,8 +2248,9 @@ describe('ChatClient', () => { // Finish run A chunks.push({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-a', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', @@ -2289,21 +2314,23 @@ describe('ChatClient', () => { // Resumed content for in-progress message (no TEXT_MESSAGE_START) chunks.push( { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'asst-1', model: 'test', timestamp: Date.now(), delta: 'time...', } as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 60c36763a..263f3600d 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -1,4 +1,5 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { EventType } from '@tanstack/ai' import { fetchHttpStream, fetchServerSentEvents, @@ -8,10 +9,6 @@ import { } from '../src/connection-adapters' import type { StreamChunk } from '@tanstack/ai' -/** Cast an event object to StreamChunk for type compatibility with EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - describe('connection-adapters', () => { let originalFetch: typeof fetch let fetchMock: ReturnType @@ -63,7 +60,7 @@ describe('connection-adapters', () => { expect(chunks).toHaveLength(1) expect(chunks[0]).toMatchObject({ - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', delta: 'Hello', }) @@ -789,14 +786,14 @@ describe('connection-adapters', () => { describe('stream', () => { it('should delegate to stream factory', async () => { const streamFactory = vi.fn().mockImplementation(function* () { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk }) const adapter = stream(streamFactory) @@ -814,13 +811,14 @@ describe('connection-adapters', () => { it('should pass data to stream factory', async () => { const streamFactory = vi.fn().mockImplementation(function* () { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }) const adapter = stream(streamFactory) @@ -874,14 +872,14 @@ describe('connection-adapters', () => { it('should synthesize RUN_FINISHED when wrapped connect stream has no terminal event', async () => { const base = stream(async function* () { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), delta: 'Hi', content: 'Hi', - }) + } satisfies StreamChunk }) const adapter = normalizeConnectionAdapter(base) @@ -933,13 +931,14 @@ describe('connection-adapters', () => { it('should not synthesize duplicate RUN_ERROR when stream already emitted one before throwing', async () => { const base = stream(async function* () { - yield asChunk({ - type: 'RUN_ERROR', + yield { + type: EventType.RUN_ERROR, + message: 'already failed', timestamp: Date.now(), error: { message: 'already failed', }, - }) + } satisfies StreamChunk throw new Error('connect exploded') }) @@ -972,14 +971,14 @@ describe('connection-adapters', () => { describe('rpcStream', () => { it('should delegate to RPC call', async () => { const rpcCall = vi.fn().mockImplementation(function* () { - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), delta: 'Hello', content: 'Hello', - }) + } satisfies StreamChunk }) const adapter = rpcStream(rpcCall) @@ -994,20 +993,21 @@ describe('connection-adapters', () => { expect(rpcCall).toHaveBeenCalled() expect(chunks).toHaveLength(1) expect(chunks[0]).toMatchObject({ - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, delta: 'Hello', }) }) it('should pass messages and data to RPC call', async () => { const rpcCall = vi.fn().mockImplementation(function* () { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', model: 'test', timestamp: Date.now(), finishReason: 'stop', - }) + } satisfies StreamChunk }) const adapter = rpcStream(rpcCall) diff --git a/packages/typescript/ai-client/tests/generation-client.test.ts b/packages/typescript/ai-client/tests/generation-client.test.ts index e17a09f23..4ce69353c 100644 --- a/packages/typescript/ai-client/tests/generation-client.test.ts +++ b/packages/typescript/ai-client/tests/generation-client.test.ts @@ -1,12 +1,9 @@ import { describe, it, expect, vi } from 'vitest' +import { EventType } from '@tanstack/ai' import { GenerationClient } from '../src/generation-client' import type { StreamChunk } from '@tanstack/ai' import type { ConnectConnectionAdapter } from '../src/connection-adapters' -/** Cast an event object to StreamChunk for type compatibility with EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - // Helper to create a mock connect-based adapter from StreamChunks function createMockConnection( chunks: Array, @@ -133,19 +130,25 @@ describe('GenerationClient', () => { const onResult = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: mockResult, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient({ @@ -164,13 +167,19 @@ describe('GenerationClient', () => { const onError = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'RUN_ERROR', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.RUN_ERROR, + message: 'Generation failed', runId: 'run-1', error: { message: 'Generation failed' }, timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient({ @@ -189,25 +198,31 @@ describe('GenerationClient', () => { const onProgress = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:progress', value: { progress: 50, message: 'Halfway' }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient({ @@ -225,19 +240,21 @@ describe('GenerationClient', () => { const chunks: Array = [ { - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), } as unknown as StreamChunk, { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), } as unknown as StreamChunk, @@ -257,18 +274,19 @@ describe('GenerationClient', () => { it('should pass body and input as data to connection', async () => { const connectSpy = vi.fn(async function* () { - yield asChunk({ - type: 'CUSTOM' as const, + yield { + type: EventType.CUSTOM as const, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - }) - yield asChunk({ - type: 'RUN_FINISHED' as const, + } satisfies StreamChunk + yield { + type: EventType.RUN_FINISHED as const, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - }) + } satisfies StreamChunk }) const connection: ConnectConnectionAdapter = { @@ -334,12 +352,13 @@ describe('GenerationClient', () => { describe('updateOptions()', () => { it('should update body without recreating client', async () => { const connectSpy = vi.fn(async function* () { - yield asChunk({ - type: 'RUN_FINISHED' as const, + yield { + type: EventType.RUN_FINISHED as const, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - }) + } satisfies StreamChunk }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -366,23 +385,24 @@ describe('GenerationClient', () => { const connection: ConnectConnectionAdapter = { async *connect(_msgs, _data, signal) { - yield asChunk({ - type: 'RUN_STARTED' as const, + yield { + type: EventType.RUN_STARTED as const, runId: 'run-1', + threadId: 'thread-1', timestamp: Date.now(), - }) + } satisfies StreamChunk // Wait until abort is triggered await new Promise((resolve) => { signal?.addEventListener('abort', () => resolve()) }) // Adapter honors abort signal and stops yielding if (signal?.aborted) return - yield asChunk({ - type: 'CUSTOM' as const, + yield { + type: EventType.CUSTOM as const, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - }) + } satisfies StreamChunk }, } @@ -464,13 +484,19 @@ describe('GenerationClient', () => { const onResult = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'RUN_FINISHED', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient({ @@ -489,19 +515,25 @@ describe('GenerationClient', () => { const onChunk = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'unknown:event', value: { foo: 'bar' }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient({ @@ -594,19 +626,25 @@ describe('GenerationClient', () => { it('should transform result from stream CUSTOM event', async () => { const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1', images: [] }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new GenerationClient< @@ -697,19 +735,21 @@ describe('GenerationClient', () => { const response = createSSEResponse([ JSON.stringify({ - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', timestamp: 100, }), JSON.stringify({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:result', value: mockResult, timestamp: 200, }), JSON.stringify({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: 300, }), @@ -732,12 +772,14 @@ describe('GenerationClient', () => { const response = createSSEResponse([ JSON.stringify({ - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', timestamp: 100, }), JSON.stringify({ - type: 'RUN_ERROR', + type: EventType.RUN_ERROR, + message: 'Generation failed', runId: 'run-1', error: { message: 'Generation failed' }, timestamp: 200, @@ -761,19 +803,21 @@ describe('GenerationClient', () => { const response = createSSEResponse([ JSON.stringify({ - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', timestamp: 100, }), JSON.stringify({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: 200, }), JSON.stringify({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: 300, }), @@ -794,25 +838,27 @@ describe('GenerationClient', () => { const response = createSSEResponse([ JSON.stringify({ - type: 'RUN_STARTED', + type: EventType.RUN_STARTED, runId: 'run-1', + threadId: 'thread-1', timestamp: 100, }), JSON.stringify({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:progress', value: { progress: 50, message: 'Halfway' }, timestamp: 200, }), JSON.stringify({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: 300, }), JSON.stringify({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: 400, }), @@ -852,14 +898,15 @@ describe('GenerationClient', () => { const fetcherSpy = vi.fn(async (_input: { prompt: string }) => { return createSSEResponse([ JSON.stringify({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: 100, }), JSON.stringify({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: 200, }), diff --git a/packages/typescript/ai-client/tests/video-generation-client.test.ts b/packages/typescript/ai-client/tests/video-generation-client.test.ts index 7118dbf1b..a0aa267f0 100644 --- a/packages/typescript/ai-client/tests/video-generation-client.test.ts +++ b/packages/typescript/ai-client/tests/video-generation-client.test.ts @@ -1,12 +1,9 @@ import { describe, it, expect, vi } from 'vitest' +import { EventType } from '@tanstack/ai' import { VideoGenerationClient } from '../src/video-generation-client' import type { StreamChunk } from '@tanstack/ai' import type { ConnectConnectionAdapter } from '../src/connection-adapters' -/** Cast an event object to StreamChunk for type compatibility with EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - // Helper to create a mock connect-based adapter from StreamChunks function createMockConnection( chunks: Array, @@ -145,15 +142,20 @@ describe('VideoGenerationClient', () => { const onStatusUpdate = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:status', value: { jobId: 'job-123', @@ -161,9 +163,9 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:status', value: { jobId: 'job-123', @@ -171,9 +173,9 @@ describe('VideoGenerationClient', () => { progress: 100, }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { jobId: 'job-123', @@ -181,13 +183,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -215,9 +218,14 @@ describe('VideoGenerationClient', () => { const onVideoStatusChange = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:status', value: { jobId: 'job-1', @@ -225,9 +233,9 @@ describe('VideoGenerationClient', () => { progress: 25, }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { jobId: 'job-1', @@ -235,13 +243,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -268,13 +277,19 @@ describe('VideoGenerationClient', () => { const onError = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'RUN_ERROR', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.RUN_ERROR, + message: 'Video generation failed', runId: 'run-1', error: { message: 'Video generation failed' }, timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -293,9 +308,14 @@ describe('VideoGenerationClient', () => { const onProgress = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:status', value: { jobId: 'job-1', @@ -303,13 +323,14 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -326,19 +347,25 @@ describe('VideoGenerationClient', () => { const onProgress = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:progress', value: { progress: 75, message: 'Almost done' }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -355,9 +382,14 @@ describe('VideoGenerationClient', () => { const onChunk = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { jobId: 'job-1', @@ -365,13 +397,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -386,12 +419,13 @@ describe('VideoGenerationClient', () => { it('should pass body and input as data to connection', async () => { const connectSpy = vi.fn(async function* () { - yield asChunk({ - type: 'RUN_FINISHED' as const, + yield { + type: EventType.RUN_FINISHED as const, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - }) + } satisfies StreamChunk }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -445,15 +479,20 @@ describe('VideoGenerationClient', () => { const onVideoStatusChange = vi.fn() const connection = createMockConnection([ - asChunk({ type: 'RUN_STARTED', runId: 'run-1', timestamp: Date.now() }), - asChunk({ - type: 'CUSTOM', + { + type: EventType.RUN_STARTED, + runId: 'run-1', + threadId: 'thread-1', + timestamp: Date.now(), + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'video:status', value: { jobId: 'job-123', @@ -461,9 +500,9 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - }), - asChunk({ - type: 'CUSTOM', + } satisfies StreamChunk, + { + type: EventType.CUSTOM, name: 'generation:result', value: { jobId: 'job-123', @@ -471,13 +510,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - }), - asChunk({ - type: 'RUN_FINISHED', + } satisfies StreamChunk, + { + type: EventType.RUN_FINISHED, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - }), + } satisfies StreamChunk, ]) const client = new VideoGenerationClient({ @@ -503,12 +543,13 @@ describe('VideoGenerationClient', () => { describe('updateOptions()', () => { it('should update body without recreating client', async () => { const connectSpy = vi.fn(async function* () { - yield asChunk({ - type: 'RUN_FINISHED' as const, + yield { + type: EventType.RUN_FINISHED as const, runId: 'run-1', + threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - }) + } satisfies StreamChunk }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -536,25 +577,26 @@ describe('VideoGenerationClient', () => { const connection: ConnectConnectionAdapter = { async *connect(_msgs, _data, signal) { - yield asChunk({ - type: 'RUN_STARTED' as const, + yield { + type: EventType.RUN_STARTED as const, runId: 'run-1', + threadId: 'thread-1', timestamp: Date.now(), - }) - yield asChunk({ - type: 'CUSTOM' as const, + } satisfies StreamChunk + yield { + type: EventType.CUSTOM as const, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - }) + } satisfies StreamChunk // Wait until abort is triggered await new Promise((resolve) => { signal?.addEventListener('abort', () => resolve()) }) // Adapter honors abort signal and stops yielding if (signal?.aborted) return - yield asChunk({ - type: 'CUSTOM' as const, + yield { + type: EventType.CUSTOM as const, name: 'generation:result', value: { jobId: 'job-123', @@ -562,7 +604,7 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - }) + } satisfies StreamChunk }, } diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index e5b3330b5..e82b3ec29 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -1,4 +1,5 @@ import { FinishReason } from '@google/genai' +import { EventType } from '@tanstack/ai' import { createGeminiClient, generateId, @@ -13,10 +14,6 @@ import type { SummarizationResult, } from '@tanstack/ai' -/** Cast an event object to StreamChunk. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * Configuration for Gemini summarize adapter */ @@ -142,6 +139,7 @@ export class GeminiSummarizeAdapter< const { logger } = options const model = options.model const id = generateId('sum') + const threadId = generateId('thread') let accumulatedContent = '' let inputTokens = 0 let outputTokens = 0 @@ -161,6 +159,14 @@ export class GeminiSummarizeAdapter< }) try { + yield { + type: EventType.RUN_STARTED, + runId: id, + threadId, + model, + timestamp: Date.now(), + } satisfies StreamChunk + const result = await this.client.models.generateContentStream({ model, contents: [ @@ -189,14 +195,14 @@ export class GeminiSummarizeAdapter< for (const part of chunk.candidates[0].content.parts) { if (part.text) { accumulatedContent += part.text - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: id, model, timestamp: Date.now(), delta: part.text, content: accumulatedContent, - }) + } satisfies StreamChunk } } } @@ -208,9 +214,10 @@ export class GeminiSummarizeAdapter< finishReason === FinishReason.MAX_TOKENS || finishReason === FinishReason.SAFETY ) { - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: id, + threadId, model, timestamp: Date.now(), finishReason: @@ -224,7 +231,7 @@ export class GeminiSummarizeAdapter< completionTokens: outputTokens, totalTokens: inputTokens + outputTokens, }, - }) + } satisfies StreamChunk } } } catch (error) { diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index ea744f456..e52a74859 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -1,4 +1,5 @@ import { FinishReason } from '@google/genai' +import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { convertToolsToProviderFormat } from '../tools/tool-converter' import { @@ -39,11 +40,6 @@ import type { } from '../message-types' import type { GeminiClientConfig } from '../utils' -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * Configuration for Gemini text adapter */ @@ -136,15 +132,14 @@ export class GeminiTextAdapter< yield* this.processStreamChunks(result, options, logger) } catch (error) { - const timestamp = Date.now() logger.errors('gemini.chatStream fatal', { error, source: 'gemini.chatStream', }) - yield asChunk({ - type: 'RUN_ERROR', + yield { + type: EventType.RUN_ERROR, model: options.model, - timestamp, + timestamp: Date.now(), message: error instanceof Error ? error.message @@ -155,7 +150,7 @@ export class GeminiTextAdapter< ? error.message : 'An unknown error occurred during the chat stream.', }, - }) + } satisfies StreamChunk } } @@ -240,7 +235,6 @@ export class GeminiTextAdapter< logger: InternalLogger, ): AsyncIterable { const model = options.model - const timestamp = Date.now() let accumulatedContent = '' let accumulatedThinking = '' const toolCallMap = new Map< @@ -271,13 +265,13 @@ export class GeminiTextAdapter< // Emit RUN_STARTED on first chunk if (!hasEmittedRunStarted) { hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId, threadId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } if (chunk.candidates?.[0]?.content?.parts) { @@ -293,92 +287,92 @@ export class GeminiTextAdapter< reasoningMessageId = generateId(this.name) // Spec REASONING events - yield asChunk({ - type: 'REASONING_START', + yield { + type: EventType.REASONING_START, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP events (kept during transition) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, stepName: stepId, stepId, model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } accumulatedThinking += part.text // Spec REASONING content event - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', + yield { + type: EventType.REASONING_MESSAGE_CONTENT, messageId: reasoningMessageId!, delta: part.text, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP event - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId || generateId(this.name), stepId: stepId || generateId(this.name), model, - timestamp, + timestamp: Date.now(), delta: part.text, content: accumulatedThinking, - }) + } satisfies StreamChunk } else if (part.text.trim()) { // Close reasoning before text starts if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Skip whitespace-only text parts (e.g. "\n" during auto-continuation) // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId, model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } accumulatedContent += part.text - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId, model, - timestamp, + timestamp: Date.now(), delta: part.text, content: accumulatedContent, - }) + } satisfies StreamChunk } } @@ -430,31 +424,31 @@ export class GeminiTextAdapter< // Emit TOOL_CALL_START if not already started if (!toolCallData.started) { toolCallData.started = true - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId, toolCallName: toolCallData.name, toolName: toolCallData.name, model, - timestamp, + timestamp: Date.now(), index: toolCallData.index, ...(toolCallData.thoughtSignature && { metadata: { thoughtSignature: toolCallData.thoughtSignature, } satisfies GeminiToolCallMetadata, }), - }) + } satisfies StreamChunk } // Emit TOOL_CALL_ARGS - yield asChunk({ - type: 'TOOL_CALL_ARGS', + yield { + type: EventType.TOOL_CALL_ARGS, toolCallId, model, - timestamp, + timestamp: Date.now(), delta: toolCallData.args, args: toolCallData.args, - }) + } satisfies StreamChunk } } } else if (chunk.data && chunk.data.trim()) { @@ -462,24 +456,24 @@ export class GeminiTextAdapter< // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId, model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } accumulatedContent += chunk.data - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId, model, - timestamp, + timestamp: Date.now(), delta: chunk.data, content: accumulatedContent, - }) + } satisfies StreamChunk } if (chunk.candidates?.[0]?.finishReason) { @@ -508,15 +502,15 @@ export class GeminiTextAdapter< }) // Emit TOOL_CALL_START - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId, toolCallName: functionCall.name || '', toolName: functionCall.name || '', model, - timestamp, + timestamp: Date.now(), index: nextToolIndex - 1, - }) + } satisfies StreamChunk // Emit TOOL_CALL_END with parsed input let parsedInput: unknown = {} @@ -531,15 +525,15 @@ export class GeminiTextAdapter< parsedInput = {} } - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId, toolCallName: functionCall.name || '', toolName: functionCall.name || '', model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk } } } @@ -555,15 +549,15 @@ export class GeminiTextAdapter< parsedInput = {} } - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId, toolCallName: toolCallData.name, toolName: toolCallData.name, model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk } // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls @@ -572,11 +566,11 @@ export class GeminiTextAdapter< } if (finishReason === FinishReason.MAX_TOKENS) { - yield asChunk({ - type: 'RUN_ERROR', + yield { + type: EventType.RUN_ERROR, runId, model, - timestamp, + timestamp: Date.now(), message: 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', @@ -585,42 +579,42 @@ export class GeminiTextAdapter< 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', }, - }) + } satisfies StreamChunk } // Close reasoning events if still open if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit TEXT_MESSAGE_END if we had text content if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId, model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId, threadId, model, - timestamp, + timestamp: Date.now(), finishReason: toolCallMap.size > 0 ? 'tool_calls' : 'stop', usage: chunk.usageMetadata ? { @@ -629,7 +623,7 @@ export class GeminiTextAdapter< totalTokens: chunk.usageMetadata.totalTokenCount ?? 0, } : undefined, - }) + } satisfies StreamChunk } } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 4d8ba131b..6f8cec127 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -73,6 +73,7 @@ export class GroqTextAdapter< options: TextOptions, aguiState: { runId: string + threadId: string messageId: string timestamp: number hasEmittedRunStarted: boolean diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index 0b8407e4b..b0729c662 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { createOllamaClient, estimateTokens, @@ -14,10 +15,6 @@ import type { SummarizationResult, } from '@tanstack/ai' -/** Cast an event object to StreamChunk. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - export type OllamaSummarizeModel = | (typeof OllamaSummarizeModels)[number] | (string & {}) @@ -128,6 +125,7 @@ export class OllamaSummarizeAdapter< const { logger } = options const model = options.model const id = generateId('sum') + const threadId = generateId('thread') const prompt = this.buildSummarizationPrompt(options) let accumulatedContent = '' @@ -138,6 +136,14 @@ export class OllamaSummarizeAdapter< }) try { + yield { + type: EventType.RUN_STARTED, + runId: id, + threadId, + model, + timestamp: Date.now(), + } satisfies StreamChunk + const stream = await this.client.generate({ model, prompt, @@ -153,22 +159,23 @@ export class OllamaSummarizeAdapter< if (chunk.response) { accumulatedContent += chunk.response - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: id, model: chunk.model, timestamp: Date.now(), delta: chunk.response, content: accumulatedContent, - }) + } satisfies StreamChunk } if (chunk.done) { const promptTokens = estimateTokens(prompt) const completionTokens = estimateTokens(accumulatedContent) - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: id, + threadId, model: chunk.model, timestamp: Date.now(), finishReason: 'stop', @@ -177,7 +184,7 @@ export class OllamaSummarizeAdapter< completionTokens, totalTokens: promptTokens + completionTokens, }, - }) + } satisfies StreamChunk } } } catch (error) { diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 209951569..907418330 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { createOllamaClient, generateId, getOllamaHostFromEnv } from '../utils' @@ -24,11 +25,6 @@ import type { } from 'ollama' import type { StreamChunk, TextOptions, Tool } from '@tanstack/ai' -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - export type OllamaTextModel = | (typeof OLLAMA_TEXT_MODELS)[number] | (string & {}) @@ -227,7 +223,6 @@ export class OllamaTextAdapter extends BaseTextAdapter< logger: InternalLogger, ): AsyncIterable { let accumulatedContent = '' - const timestamp = Date.now() let accumulatedReasoning = '' const toolCallsEmitted = new Set() @@ -247,13 +242,13 @@ export class OllamaTextAdapter extends BaseTextAdapter< // Emit RUN_STARTED on first chunk if (!hasEmittedRunStarted) { hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId, threadId, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } const handleToolCall = (toolCall: ToolCall): Array => { @@ -268,17 +263,15 @@ export class OllamaTextAdapter extends BaseTextAdapter< // Emit TOOL_CALL_START if not already emitted for this tool call if (!toolCallsEmitted.has(toolCallId)) { toolCallsEmitted.add(toolCallId) - events.push( - asChunk({ - type: 'TOOL_CALL_START', - toolCallId, - toolCallName: actualToolCall.function.name || '', - toolName: actualToolCall.function.name || '', - model: chunk.model, - timestamp, - index: actualToolCall.function.index, - }), - ) + events.push({ + type: EventType.TOOL_CALL_START, + toolCallId, + toolCallName: actualToolCall.function.name || '', + toolName: actualToolCall.function.name || '', + model: chunk.model, + timestamp: Date.now(), + index: actualToolCall.function.index, + } satisfies StreamChunk) } // Serialize arguments to a string for the TOOL_CALL_ARGS event @@ -295,29 +288,25 @@ export class OllamaTextAdapter extends BaseTextAdapter< } // Emit TOOL_CALL_ARGS with full args (Ollama doesn't stream args incrementally) - events.push( - asChunk({ - type: 'TOOL_CALL_ARGS', - toolCallId, - model: chunk.model, - timestamp, - delta: argsStr, - args: argsStr, - }), - ) + events.push({ + type: EventType.TOOL_CALL_ARGS, + toolCallId, + model: chunk.model, + timestamp: Date.now(), + delta: argsStr, + args: argsStr, + } satisfies StreamChunk) // Emit TOOL_CALL_END - events.push( - asChunk({ - type: 'TOOL_CALL_END', - toolCallId, - toolCallName: actualToolCall.function.name || '', - toolName: actualToolCall.function.name || '', - model: chunk.model, - timestamp, - input: parsedInput, - }), - ) + events.push({ + type: EventType.TOOL_CALL_END, + toolCallId, + toolCallName: actualToolCall.function.name || '', + toolName: actualToolCall.function.name || '', + model: chunk.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk) return events } @@ -335,36 +324,36 @@ export class OllamaTextAdapter extends BaseTextAdapter< // Close reasoning events if still open if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model: chunk.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit TEXT_MESSAGE_END if we had text content if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId, threadId, model: chunk.model, - timestamp, + timestamp: Date.now(), finishReason: toolCallsEmitted.size > 0 ? 'tool_calls' : 'stop', usage: { promptTokens: chunk.prompt_eval_count || 0, @@ -372,7 +361,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< totalTokens: (chunk.prompt_eval_count || 0) + (chunk.eval_count || 0), }, - }) + } satisfies StreamChunk continue } @@ -380,41 +369,41 @@ export class OllamaTextAdapter extends BaseTextAdapter< // Close reasoning before text starts if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model: chunk.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId, model: chunk.model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } accumulatedContent += chunk.message.content - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId, model: chunk.model, - timestamp, + timestamp: Date.now(), delta: chunk.message.content, content: accumulatedContent, - }) + } satisfies StreamChunk } if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { @@ -434,52 +423,52 @@ export class OllamaTextAdapter extends BaseTextAdapter< reasoningMessageId = generateId('msg') // Spec REASONING events - yield asChunk({ - type: 'REASONING_START', + yield { + type: EventType.REASONING_START, messageId: reasoningMessageId, model: chunk.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP events (kept during transition) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, stepName: stepId, stepId, model: chunk.model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } accumulatedReasoning += chunk.message.thinking // Spec REASONING content event - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', + yield { + type: EventType.REASONING_MESSAGE_CONTENT, messageId: reasoningMessageId!, delta: chunk.message.thinking, model: chunk.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP event - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId || generateId('step'), stepId: stepId || generateId('step'), model: chunk.model, - timestamp, + timestamp: Date.now(), delta: chunk.message.thinking, content: accumulatedReasoning, - }) + } satisfies StreamChunk } } } diff --git a/packages/typescript/ai/src/activities/stream-generation-result.ts b/packages/typescript/ai/src/activities/stream-generation-result.ts index 2a2274cbd..deeacda8f 100644 --- a/packages/typescript/ai/src/activities/stream-generation-result.ts +++ b/packages/typescript/ai/src/activities/stream-generation-result.ts @@ -34,7 +34,7 @@ export async function* streamGenerationResult( runId, threadId, timestamp: Date.now(), - } as StreamChunk + } satisfies StreamChunk try { const result = await generator() @@ -44,7 +44,7 @@ export async function* streamGenerationResult( name: 'generation:result', value: result as unknown, timestamp: Date.now(), - } as StreamChunk + } satisfies StreamChunk yield { type: EventType.RUN_FINISHED, @@ -52,18 +52,16 @@ export async function* streamGenerationResult( threadId, finishReason: 'stop', timestamp: Date.now(), - } as StreamChunk + } satisfies StreamChunk } catch (error: unknown) { const payload = toRunErrorPayload(error, 'Generation failed') yield { type: EventType.RUN_ERROR, - runId, - threadId, message: payload.message, code: payload.code, // Deprecated nested form for backward compatibility error: payload, timestamp: Date.now(), - } as StreamChunk + } satisfies StreamChunk } } diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index ec87c3259..7c34446b4 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -830,7 +830,13 @@ export interface RunFinishedEvent extends AGUIRunFinishedEvent { /** Model identifier for multi-model support */ model?: string /** Why the generation stopped */ - finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null + finishReason?: + | 'stop' + | 'length' + | 'content_filter' + | 'tool_calls' + | 'function_call' + | null /** Token usage statistics */ usage?: { promptTokens: number diff --git a/packages/typescript/ai/tests/test-utils.ts b/packages/typescript/ai/tests/test-utils.ts index 73b239b93..365d4f80c 100644 --- a/packages/typescript/ai/tests/test-utils.ts +++ b/packages/typescript/ai/tests/test-utils.ts @@ -1,3 +1,4 @@ +import { EventType } from '../src/types' import type { AnyTextAdapter } from '../src/activities/chat/adapter' import type { StreamChunk, TextMessageContentEvent, Tool } from '../src/types' @@ -5,7 +6,9 @@ import type { StreamChunk, TextMessageContentEvent, Tool } from '../src/types' // Chunk factory // ============================================================================ -/** Create a typed StreamChunk with minimal boilerplate. */ +/** Escape hatch for tests that deliberately construct off-spec chunks (e.g. + * to exercise deprecated-field handling or malformed input). Prefer the + * strictly-typed `ev.*` builders below for normal cases. */ export function chunk( type: string, fields: Record = {}, @@ -20,32 +23,61 @@ export function chunk( /** Shorthand chunk factories for common AG-UI events. */ export const ev = { runStarted: (runId = 'run-1', threadId = 'thread-1') => - chunk('RUN_STARTED', { runId, threadId }), + ({ + type: EventType.RUN_STARTED, + runId, + threadId, + timestamp: Date.now(), + }) satisfies StreamChunk, textStart: (messageId = 'msg-1') => - chunk('TEXT_MESSAGE_START', { messageId, role: 'assistant' as const }), + ({ + type: EventType.TEXT_MESSAGE_START, + messageId, + role: 'assistant', + timestamp: Date.now(), + }) satisfies StreamChunk, textContent: (delta: string, messageId = 'msg-1') => - chunk('TEXT_MESSAGE_CONTENT', { messageId, delta }), - textEnd: (messageId = 'msg-1') => chunk('TEXT_MESSAGE_END', { messageId }), + ({ + type: EventType.TEXT_MESSAGE_CONTENT, + messageId, + delta, + timestamp: Date.now(), + }) satisfies StreamChunk, + textEnd: (messageId = 'msg-1') => + ({ + type: EventType.TEXT_MESSAGE_END, + messageId, + timestamp: Date.now(), + }) satisfies StreamChunk, toolStart: (toolCallId: string, toolCallName: string, index?: number) => - chunk('TOOL_CALL_START', { + ({ + type: EventType.TOOL_CALL_START, toolCallId, toolCallName, toolName: toolCallName, + timestamp: Date.now(), ...(index !== undefined ? { index } : {}), - }), + }) satisfies StreamChunk, toolArgs: (toolCallId: string, delta: string) => - chunk('TOOL_CALL_ARGS', { toolCallId, delta }), + ({ + type: EventType.TOOL_CALL_ARGS, + toolCallId, + delta, + timestamp: Date.now(), + }) satisfies StreamChunk, toolEnd: ( toolCallId: string, toolCallName: string, opts?: { input?: unknown; result?: string }, ) => - chunk('TOOL_CALL_END', { + ({ + type: EventType.TOOL_CALL_END, toolCallId, toolCallName, toolName: toolCallName, + timestamp: Date.now(), ...opts, - }), + }) satisfies StreamChunk, runFinished: ( finishReason: | 'stop' @@ -61,17 +93,35 @@ export const ev = { }, threadId = 'thread-1', ) => - chunk('RUN_FINISHED', { + ({ + type: EventType.RUN_FINISHED, runId, threadId, finishReason, + timestamp: Date.now(), ...(usage ? { usage } : {}), - }), - runError: (message: string, runId = 'run-1') => - chunk('RUN_ERROR', { message, runId, error: { message } }), - stepStarted: (stepName = 'step-1') => chunk('STEP_STARTED', { stepName }), + }) satisfies StreamChunk, + runError: (message: string) => + ({ + type: EventType.RUN_ERROR, + message, + timestamp: Date.now(), + error: { message }, + }) satisfies StreamChunk, + stepStarted: (stepName = 'step-1') => + ({ + type: EventType.STEP_STARTED, + stepName, + timestamp: Date.now(), + }) satisfies StreamChunk, stepFinished: (delta: string, stepName = 'step-1') => - chunk('STEP_FINISHED', { stepName, stepId: stepName, delta }), + ({ + type: EventType.STEP_FINISHED, + stepName, + stepId: stepName, + delta, + timestamp: Date.now(), + }) satisfies StreamChunk, } // ============================================================================ diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index b15b2d8df..7ea5fffa8 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' @@ -20,11 +21,6 @@ import type { } from '@tanstack/ai' import type { OpenAICompatibleClientConfig } from '../types/config' -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * OpenAI-compatible Chat Completions Text Adapter * @@ -71,13 +67,12 @@ export class OpenAICompatibleChatCompletionsTextAdapter< options: TextOptions, ): AsyncIterable { const requestParams = this.mapOptionsToRequest(options) - const timestamp = Date.now() // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) const aguiState = { runId: generateId(this.name), + threadId: options.threadId ?? generateId(this.name), messageId: generateId(this.name), - timestamp, hasEmittedRunStarted: false, } @@ -107,22 +102,24 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // Emit RUN_STARTED if not yet emitted if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId: aguiState.runId, + threadId: aguiState.threadId, model: options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: options.model, - timestamp, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, error: errorPayload, - }) + } satisfies StreamChunk options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -302,13 +299,12 @@ export class OpenAICompatibleChatCompletionsTextAdapter< options: TextOptions, aguiState: { runId: string + threadId: string messageId: string - timestamp: number hasEmittedRunStarted: boolean }, ): AsyncIterable { let accumulatedContent = '' - const timestamp = aguiState.timestamp let hasEmittedTextMessageStart = false let lastModel: string | undefined // Track usage from any chunk that carries it. With @@ -378,12 +374,13 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // `hasEmittedRunStarted`). if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId: aguiState.runId, + threadId: aguiState.threadId, model: chunk.model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Reasoning content (extractReasoning() hook). Run before reading @@ -395,38 +392,38 @@ export class OpenAICompatibleChatCompletionsTextAdapter< if (!reasoningMessageId) { reasoningMessageId = generateId(this.name) stepId = generateId(this.name) - yield asChunk({ - type: 'REASONING_START', + yield { + type: EventType.REASONING_START, messageId: reasoningMessageId, model: chunk.model || options.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_MESSAGE_START', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model: chunk.model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk // Legacy STEP_STARTED (single emission, paired with the // STEP_FINISHED below when reasoning closes). - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, stepName: stepId, stepId, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } accumulatedReasoning += reasoning.text - yield asChunk({ - type: 'REASONING_MESSAGE_CONTENT', + yield { + type: EventType.REASONING_MESSAGE_CONTENT, messageId: reasoningMessageId, delta: reasoning.text, model: chunk.model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } const choice = chunk.choices[0] @@ -443,53 +440,53 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // REASONING_END before any TEXT_MESSAGE_START. if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model: chunk.model || options.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk if (stepId) { - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId, stepId, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), content: accumulatedReasoning, - }) + } satisfies StreamChunk } } // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId: aguiState.messageId, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } accumulatedContent += deltaContent // Emit AG-UI TEXT_MESSAGE_CONTENT - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: aguiState.messageId, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), delta: deltaContent, content: accumulatedContent, - }) + } satisfies StreamChunk } // Handle tool calls - they come in as deltas @@ -523,26 +520,26 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // Emit TOOL_CALL_START when we have id and name if (toolCall.id && toolCall.name && !toolCall.started) { toolCall.started = true - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId: toolCall.id, toolCallName: toolCall.name, toolName: toolCall.name, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), index, - }) + } satisfies StreamChunk } // Emit TOOL_CALL_ARGS for argument deltas if (toolCallDelta.function?.arguments && toolCall.started) { - yield asChunk({ - type: 'TOOL_CALL_ARGS', + yield { + type: EventType.TOOL_CALL_ARGS, toolCallId: toolCall.id, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), delta: toolCallDelta.function.arguments, - }) + } satisfies StreamChunk } } } @@ -596,15 +593,15 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } // Emit AG-UI TOOL_CALL_END - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId: toolCall.id, toolCallName: toolCall.name, toolName: toolCall.name, model: chunk.model || options.model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk emittedAnyToolCallEnd = true } // Clear tool-call state after emission so a subsequent @@ -615,12 +612,12 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // Emit TEXT_MESSAGE_END if we had text content if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId: aguiState.messageId, model: chunk.model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk hasEmittedTextMessageStart = false } @@ -669,15 +666,15 @@ export class OpenAICompatibleChatCompletionsTextAdapter< parsedInput = {} } } - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId: toolCall.id, toolCallName: toolCall.name, toolName: toolCall.name, model: lastModel || options.model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk pendingToolCount += 1 emittedAnyToolCallEnd = true } @@ -686,39 +683,39 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // Make sure the text message lifecycle is closed even on early // termination paths where finish_reason never arrives. if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId: aguiState.messageId, model: lastModel || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Close any reasoning lifecycle that text never closed (no text // content arrived, or the stream cut off before text started). if (reasoningMessageId && !hasClosedReasoning) { hasClosedReasoning = true - yield asChunk({ - type: 'REASONING_MESSAGE_END', + yield { + type: EventType.REASONING_MESSAGE_END, messageId: reasoningMessageId, model: lastModel || options.model, - timestamp, - }) - yield asChunk({ - type: 'REASONING_END', + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, messageId: reasoningMessageId, model: lastModel || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk if (stepId) { - yield asChunk({ - type: 'STEP_FINISHED', + yield { + type: EventType.STEP_FINISHED, stepName: stepId, stepId, model: lastModel || options.model, - timestamp, + timestamp: Date.now(), content: accumulatedReasoning, - }) + } satisfies StreamChunk } } @@ -730,17 +727,18 @@ export class OpenAICompatibleChatCompletionsTextAdapter< // `tool_calls` but never produced a started/ended pair must NOT // surface `tool_calls` here, since downstream consumers wait for // tool results that would never arrive. - const finishReason: string = emittedAnyToolCallEnd + const finishReason = emittedAnyToolCallEnd ? 'tool_calls' : pendingFinishReason === 'tool_calls' ? 'stop' : (pendingFinishReason ?? 'stop') - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: aguiState.runId, + threadId: aguiState.threadId, model: lastModel || options.model, - timestamp, + timestamp: Date.now(), usage: lastUsage ? { promptTokens: lastUsage.prompt_tokens || 0, @@ -749,7 +747,7 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } : undefined, finishReason, - }) + } satisfies StreamChunk } } catch (error: unknown) { // Narrow before logging: raw SDK errors can carry request metadata @@ -764,13 +762,14 @@ export class OpenAICompatibleChatCompletionsTextAdapter< }) // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: options.model, - timestamp, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, error: errorPayload, - }) + } satisfies StreamChunk } } diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 8c7ca283d..4f057b6cf 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' @@ -21,11 +22,6 @@ import type { } from '@tanstack/ai' import type { OpenAICompatibleClientConfig } from '../types/config' -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk - /** * OpenAI-compatible Responses API Text Adapter * @@ -90,13 +86,12 @@ export class OpenAICompatibleResponsesTextAdapter< { index: number; name: string; started: boolean } >() const requestParams = this.mapOptionsToRequest(options) - const timestamp = Date.now() // AG-UI lifecycle tracking const aguiState = { runId: generateId(this.name), + threadId: options.threadId ?? generateId(this.name), messageId: generateId(this.name), - timestamp, hasEmittedRunStarted: false, } @@ -130,22 +125,24 @@ export class OpenAICompatibleResponsesTextAdapter< // Emit RUN_STARTED if not yet emitted if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId: aguiState.runId, + threadId: aguiState.threadId, model: options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } // Emit AG-UI RUN_ERROR - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: options.model, - timestamp, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, error: errorPayload, - }) + } satisfies StreamChunk options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -355,14 +352,13 @@ export class OpenAICompatibleResponsesTextAdapter< options: TextOptions, aguiState: { runId: string + threadId: string messageId: string - timestamp: number hasEmittedRunStarted: boolean }, ): AsyncIterable { let accumulatedContent = '' let accumulatedReasoning = '' - const timestamp = aguiState.timestamp // Track if we've been streaming deltas to avoid duplicating content from done events let hasStreamedContentDeltas = false @@ -390,12 +386,13 @@ export class OpenAICompatibleResponsesTextAdapter< // Emit RUN_STARTED on first chunk if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield asChunk({ - type: 'RUN_STARTED', + yield { + type: EventType.RUN_STARTED, runId: aguiState.runId, + threadId: aguiState.threadId, model: model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } const handleContentPart = (contentPart: { @@ -405,14 +402,14 @@ export class OpenAICompatibleResponsesTextAdapter< }): StreamChunk => { if (contentPart.type === 'output_text') { accumulatedContent += contentPart.text || '' - return asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + return { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: aguiState.messageId, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedContent, - }) + } satisfies StreamChunk } if (contentPart.type === 'reasoning_text') { @@ -424,14 +421,15 @@ export class OpenAICompatibleResponsesTextAdapter< if (!stepId) { stepId = generateId(this.name) } - return asChunk({ - type: 'STEP_FINISHED', + return { + type: EventType.STEP_FINISHED, + stepName: stepId, stepId, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedReasoning, - }) + } satisfies StreamChunk } // Either a real refusal or an unknown content_part type. Surface // the part type in the error so unknown parts are debuggable @@ -440,16 +438,15 @@ export class OpenAICompatibleResponsesTextAdapter< const message = isRefusal ? contentPart.refusal || 'Refused without explanation' : `Unsupported response content_part type: ${contentPart.type}` - return asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + const code = isRefusal ? 'refusal' : contentPart.type + return { + type: EventType.RUN_ERROR, model: model || options.model, - timestamp, - error: { - message, - code: isRefusal ? 'refusal' : contentPart.type, - }, - }) + timestamp: Date.now(), + message, + code, + error: { message, code }, + } satisfies StreamChunk } // Capture model metadata from any of these events (created starts @@ -484,12 +481,12 @@ export class OpenAICompatibleResponsesTextAdapter< chunk.type === 'response.incomplete' ) { if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId: aguiState.messageId, model: chunk.response.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk hasEmittedTextMessageStart = false } // Coalesce error + incomplete_details into a single RUN_ERROR @@ -502,23 +499,25 @@ export class OpenAICompatibleResponsesTextAdapter< ? 'Response failed' : 'Response ended incomplete') const errorCode = - chunk.response.error?.code || - (chunk.response.incomplete_details ? 'incomplete' : undefined) + chunk.response.error?.code ?? + (chunk.response.incomplete_details ? 'incomplete' : undefined) ?? + undefined // Always emit RUN_ERROR for terminal failure events, even when the // upstream omitted both `error` and `incomplete_details`. Skipping // emission on a `response.incomplete` with no detail would let the // post-loop synthetic block silently coerce the run to a clean // `RUN_FINISHED { finishReason: 'stop' }` — masking the failure. - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: chunk.response.model, - timestamp, + timestamp: Date.now(), + message: errorMessage, + ...(errorCode !== undefined && { code: errorCode }), error: { message: errorMessage, ...(errorCode !== undefined && { code: errorCode }), }, - }) + } satisfies StreamChunk // RUN_ERROR is the terminal event for this run; stop processing // any further chunks the iterator might still deliver. runFinishedEmitted = true @@ -539,25 +538,25 @@ export class OpenAICompatibleResponsesTextAdapter< // Emit TEXT_MESSAGE_START on first text content if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId: aguiState.messageId, model: model || options.model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } accumulatedContent += textDelta hasStreamedContentDeltas = true - yield asChunk({ - type: 'TEXT_MESSAGE_CONTENT', + yield { + type: EventType.TEXT_MESSAGE_CONTENT, messageId: aguiState.messageId, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: textDelta, content: accumulatedContent, - }) + } satisfies StreamChunk } } @@ -576,25 +575,28 @@ export class OpenAICompatibleResponsesTextAdapter< if (!hasEmittedStepStarted) { hasEmittedStepStarted = true stepId = generateId(this.name) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, + stepName: stepId, stepId, model: model || options.model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } accumulatedReasoning += reasoningDelta hasStreamedReasoningDeltas = true - yield asChunk({ - type: 'STEP_FINISHED', - stepId: stepId || generateId(this.name), + const fallbackStepId = stepId || generateId(this.name) + yield { + type: EventType.STEP_FINISHED, + stepName: fallbackStepId, + stepId: fallbackStepId, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: reasoningDelta, content: accumulatedReasoning, - }) + } satisfies StreamChunk } } @@ -612,25 +614,28 @@ export class OpenAICompatibleResponsesTextAdapter< if (!hasEmittedStepStarted) { hasEmittedStepStarted = true stepId = generateId(this.name) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, + stepName: stepId, stepId, model: model || options.model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } accumulatedReasoning += summaryDelta hasStreamedReasoningDeltas = true - yield asChunk({ - type: 'STEP_FINISHED', - stepId: stepId || generateId(this.name), + const fallbackStepId = stepId || generateId(this.name) + yield { + type: EventType.STEP_FINISHED, + stepName: fallbackStepId, + stepId: fallbackStepId, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: summaryDelta, content: accumulatedReasoning, - }) + } satisfies StreamChunk } } @@ -643,25 +648,26 @@ export class OpenAICompatibleResponsesTextAdapter< !hasEmittedTextMessageStart ) { hasEmittedTextMessageStart = true - yield asChunk({ - type: 'TEXT_MESSAGE_START', + yield { + type: EventType.TEXT_MESSAGE_START, messageId: aguiState.messageId, model: model || options.model, - timestamp, + timestamp: Date.now(), role: 'assistant', - }) + } satisfies StreamChunk } // Emit STEP_STARTED if this is reasoning content if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { hasEmittedStepStarted = true stepId = generateId(this.name) - yield asChunk({ - type: 'STEP_STARTED', + yield { + type: EventType.STEP_STARTED, + stepName: stepId, stepId, model: model || options.model, - timestamp, + timestamp: Date.now(), stepType: 'thinking', - }) + } satisfies StreamChunk } // Mark whichever stream we just emitted into so a subsequent // `content_part.done` doesn't duplicate the same text. Without @@ -726,15 +732,15 @@ export class OpenAICompatibleResponsesTextAdapter< started: false, }) } - yield asChunk({ - type: 'TOOL_CALL_START', + yield { + type: EventType.TOOL_CALL_START, toolCallId: item.id, toolCallName: item.name || '', toolName: item.name || '', model: model || options.model, - timestamp, + timestamp: Date.now(), index: chunk.output_index, - }) + } satisfies StreamChunk toolCallMetadata.get(item.id)!.started = true } } @@ -768,13 +774,13 @@ export class OpenAICompatibleResponsesTextAdapter< ) continue } - yield asChunk({ - type: 'TOOL_CALL_ARGS', + yield { + type: EventType.TOOL_CALL_ARGS, toolCallId: chunk.item_id, model: model || options.model, - timestamp, + timestamp: Date.now(), delta: chunk.delta, - }) + } satisfies StreamChunk } if (chunk.type === 'response.function_call_arguments.done') { @@ -825,26 +831,26 @@ export class OpenAICompatibleResponsesTextAdapter< } } - yield asChunk({ - type: 'TOOL_CALL_END', + yield { + type: EventType.TOOL_CALL_END, toolCallId: item_id, toolCallName: name, toolName: name, model: model || options.model, - timestamp, + timestamp: Date.now(), input: parsedInput, - }) + } satisfies StreamChunk } if (chunk.type === 'response.completed') { // Emit TEXT_MESSAGE_END if we had text content if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId: aguiState.messageId, model: model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk hasEmittedTextMessageStart = false } @@ -852,40 +858,54 @@ export class OpenAICompatibleResponsesTextAdapter< // Otherwise surface incomplete_details.reason when present so // callers can distinguish length-limit / content-filter cutoffs // from a clean stop, mirroring the chat-completions adapter. + // The Responses API's incomplete_details.reason ('max_output_tokens' + // | 'content_filter') maps to the AG-UI finishReason vocabulary: + // max_output_tokens → 'length', content_filter → 'content_filter'. const hasFunctionCalls = chunk.response.output.some( (item: unknown) => (item as { type: string }).type === 'function_call', ) - const finishReason: string = hasFunctionCalls + const incompleteReason = chunk.response.incomplete_details?.reason + const finishReason: + | 'tool_calls' + | 'length' + | 'content_filter' + | 'stop' = hasFunctionCalls ? 'tool_calls' - : (chunk.response.incomplete_details?.reason ?? 'stop') - - yield asChunk({ - type: 'RUN_FINISHED', + : incompleteReason === 'max_output_tokens' + ? 'length' + : incompleteReason === 'content_filter' + ? 'content_filter' + : 'stop' + + yield { + type: EventType.RUN_FINISHED, runId: aguiState.runId, + threadId: aguiState.threadId, model: model || options.model, - timestamp, + timestamp: Date.now(), usage: { promptTokens: chunk.response.usage?.input_tokens || 0, completionTokens: chunk.response.usage?.output_tokens || 0, totalTokens: chunk.response.usage?.total_tokens || 0, }, finishReason, - }) + } satisfies StreamChunk runFinishedEmitted = true } if (chunk.type === 'error') { - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: model || options.model, - timestamp, + timestamp: Date.now(), + message: chunk.message, + code: chunk.code ?? undefined, error: { message: chunk.message, code: chunk.code ?? undefined, }, - }) + } satisfies StreamChunk // RUN_ERROR is terminal — don't let the synthetic RUN_FINISHED // block fire after a top-level stream error event. runFinishedEmitted = true @@ -898,21 +918,22 @@ export class OpenAICompatibleResponsesTextAdapter< // see a terminal event for every started run. if (!runFinishedEmitted && aguiState.hasEmittedRunStarted) { if (hasEmittedTextMessageStart) { - yield asChunk({ - type: 'TEXT_MESSAGE_END', + yield { + type: EventType.TEXT_MESSAGE_END, messageId: aguiState.messageId, model: model || options.model, - timestamp, - }) + timestamp: Date.now(), + } satisfies StreamChunk } - yield asChunk({ - type: 'RUN_FINISHED', + yield { + type: EventType.RUN_FINISHED, runId: aguiState.runId, + threadId: aguiState.threadId, model: model || options.model, - timestamp, + timestamp: Date.now(), usage: undefined, finishReason: toolCallMetadata.size > 0 ? 'tool_calls' : 'stop', - }) + } satisfies StreamChunk } } catch (error: unknown) { // Narrow before logging: raw SDK errors can carry request metadata @@ -925,13 +946,14 @@ export class OpenAICompatibleResponsesTextAdapter< error: errorPayload, source: `${this.name}.processStreamChunks`, }) - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, + yield { + type: EventType.RUN_ERROR, model: options.model, - timestamp, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, error: errorPayload, - }) + } satisfies StreamChunk } } From 50214f7dcbf0fb9f41028bdc1fbae2ef51f03908 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:38:58 +0200 Subject: [PATCH 08/49] fix(ai-openrouter): preserve assistant/tool message content fidelity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The chat adapter's convertMessage JSON-stringified Array assistant content (so a multi-part assistant turn would round-trip as the literal JSON of the parts instead of joined text) and emitted `content: undefined` for tool-call-only assistants where the OpenAI Chat Completions contract documents `null`. Use the base's extractTextContent + emit `null` for the tool-call-only case so the override matches the chat-completions base. The Responses adapter's convertMessagesToInput tool branch had the same shape — JSON.stringify(message.content) fed the raw ContentPart shape into function_call_output.output for structured tool results. Use extractTextContent there too. Regression tests assert (a) array-shaped assistant content extracts to joined text rather than JSON, and (b) tool-call-only assistant content emits `null` rather than `undefined`. --- .../src/adapters/responses-text.ts | 7 +- .../ai-openrouter/src/adapters/text.ts | 15 ++-- .../tests/openrouter-adapter.test.ts | 85 +++++++++++++++++++ 3 files changed, 100 insertions(+), 7 deletions(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 3f23d8419..c877b1281 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -258,13 +258,18 @@ export class OpenRouterResponsesTextAdapter< for (const message of messages) { if (message.role === 'tool') { + // For structured (Array) tool results, extract the text + // content rather than JSON-stringifying the parts — sending the raw + // ContentPart shape (e.g. `[{"type":"text","content":"…"}]`) into the + // `output` field would feed the literal JSON of the parts back to the + // model instead of the tool's textual result. result.push({ type: 'function_call_output', callId: message.toolCallId || '', output: typeof message.content === 'string' ? message.content - : JSON.stringify(message.content), + : this.extractTextContent(message.content), } as unknown as InputsItem) continue } diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index db7677cab..38e503b9b 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -196,14 +196,17 @@ export class OpenRouterTextAdapter< : JSON.stringify(tc.function.arguments), }, })) + // Per the OpenAI-compatible Chat Completions contract, an assistant + // message that only carries tool_calls should have `content: null` + // rather than `content: ''` or `content: undefined`. For multi-part + // assistant content (Array) we extract the text rather + // than JSON-stringifying the parts, which would otherwise leak the + // literal part shape into the next-turn prompt. + const textContent = this.extractTextContent(message.content) + const hasToolCalls = !!toolCalls && toolCalls.length > 0 return { role: 'assistant', - content: - typeof message.content === 'string' - ? message.content - : message.content - ? JSON.stringify(message.content) - : undefined, + content: hasToolCalls && !textContent ? null : textContent, toolCalls, } satisfies ChatMessages } diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index d4dc528de..ac9d94bbd 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1889,4 +1889,89 @@ describe('OpenRouter convertMessage fail-loud guards', () => { expect(typeof args).toBe('string') expect(JSON.parse(args)).toEqual({ location: 'Berlin' }) }) + + it('extracts text from array-shaped assistant content instead of JSON-stringifying parts', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'ok' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { role: 'user', content: 'first' }, + { + role: 'assistant', + // Multi-part assistant content from a prior turn. The base extracts + // joined text; the OpenRouter override must do the same instead of + // JSON-stringifying the parts into the next-turn prompt. + content: [ + { type: 'text', content: 'hello ' }, + { type: 'text', content: 'world' }, + ], + }, + { role: 'user', content: 'second' }, + ], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const assistantMsg = rawParams.chatRequest.messages.find( + (m: any) => m.role === 'assistant', + ) + expect(assistantMsg).toBeDefined() + expect(assistantMsg.content).toBe('hello world') + }) + + it('emits content: null (not undefined) for assistant messages with only tool calls', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'ok' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { role: 'user', content: 'hi' }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_1', content: '{"temp":72}' }, + ], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const assistantMsg = rawParams.chatRequest.messages.find( + (m: any) => m.role === 'assistant', + ) + expect(assistantMsg).toBeDefined() + // Strictly null — the OpenAI Chat Completions contract documents `null` + // for tool-call-only assistant messages, and the SDK's Zod schema may + // strip `undefined` entirely. + expect(assistantMsg.content).toBeNull() + }) }) From e8cce25f9b844c24aa960d78b0bb63113efe9a5a Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:39:10 +0200 Subject: [PATCH 09/49] fix(ai-groq): correct ChatCompletionNamedToolChoice shape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The interface declared a single capitalized `Function` key with no `type` discriminator. The OpenAI / Groq Chat Completions wire format for a named tool_choice is `{ type: 'function', function: { name } }`. Construct a literal against the old type and the SDK's Zod schema would either reject it or treat tool_choice as unset. No production code constructs this type literally yet — only the `ChatCompletionToolChoiceOption` union in the same file uses it — so fixing the shape now is a no-op at runtime but locks the type to the correct contract going forward. --- packages/typescript/ai-groq/src/message-types.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-groq/src/message-types.ts b/packages/typescript/ai-groq/src/message-types.ts index dfe55126b..d769cf4f9 100644 --- a/packages/typescript/ai-groq/src/message-types.ts +++ b/packages/typescript/ai-groq/src/message-types.ts @@ -10,7 +10,9 @@ export type FunctionParameters = { [key: string]: unknown } export interface ChatCompletionNamedToolChoice { - Function: { + /** Always `function` for a named tool choice. */ + type: 'function' + function: { /** The name of the function to call. */ name: string } From ba9936e18eb7e6f3a47d3dbe249e7ef1dd31c29c Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:39:14 +0200 Subject: [PATCH 10/49] test(ai-groq): reset pendingMockCreate between tests The module-level pendingMockCreate is only cleared inside applyPendingMock when a factory call consumes it. Tests in the first describe block instantiate the adapter without calling setupMockSdkClient first, so a leaked value from a prior test would inject a stale mock into a later adapter. Reset in beforeEach for deterministic ordering regardless of test-runner permutation. --- packages/typescript/ai-groq/tests/groq-adapter.test.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index def98d8da..8615f7cfb 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -91,6 +91,13 @@ const weatherTool: Tool = { } describe('Groq adapters', () => { + // Reset the module-level `pendingMockCreate` between tests so a previous + // test's setupMockSdkClient call can't leak into a later test that + // instantiates the adapter without setting up a mock. + beforeEach(() => { + pendingMockCreate = undefined + }) + afterEach(() => { vi.unstubAllEnvs() }) From 74cbd77a366def357a2f31f55a9734a8412049ed Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:39:19 +0200 Subject: [PATCH 11/49] test(e2e): route OpenRouter summarize through createOpenRouterSummarize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The feature-support matrix advertises summarize / summarize-stream for both `openrouter` and `openrouter-responses`, but the factories silently substituted `createOpenaiSummarize` against the OpenAI base URL — exercising the OpenAI adapter while reporting OpenRouter coverage. Wire `createOpenRouterSummarize` (a thin wrapper over the OpenRouter chat adapter, used for both rows since the summarize endpoint is chat-completions-only) against the LLMOCK base so the matrix's claim is actually verified. --- testing/e2e/src/routes/api.summarize.ts | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/testing/e2e/src/routes/api.summarize.ts b/testing/e2e/src/routes/api.summarize.ts index 2c34c99d9..131aedac6 100644 --- a/testing/e2e/src/routes/api.summarize.ts +++ b/testing/e2e/src/routes/api.summarize.ts @@ -5,6 +5,7 @@ import { createAnthropicSummarize } from '@tanstack/ai-anthropic' import { createGeminiSummarize } from '@tanstack/ai-gemini' import { createOllamaSummarize } from '@tanstack/ai-ollama' import { createGrokSummarize } from '@tanstack/ai-grok' +import { createOpenRouterSummarize } from '@tanstack/ai-openrouter' import type { Provider } from '@/lib/types' const LLMOCK_BASE = process.env.LLMOCK_URL || 'http://127.0.0.1:4010' @@ -26,10 +27,19 @@ function createSummarizeAdapter(provider: Provider) { ollama: () => createOllamaSummarize('mistral', LLMOCK_BASE), grok: () => createGrokSummarize('grok-3', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), + // Both OpenRouter provider rows use the OpenRouter summarize adapter: + // `OpenRouterSummarizeAdapter` wraps the OpenRouter chat-completions + // text adapter regardless of whether the caller selected the Chat + // Completions or Responses surface, so a single factory backs both + // matrix entries. openrouter: () => - createOpenaiSummarize('gpt-4o', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), + createOpenRouterSummarize('openai/gpt-4o', DUMMY_KEY, { + serverURL: LLMOCK_OPENAI, + }), 'openrouter-responses': () => - createOpenaiSummarize('gpt-4o', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), + createOpenRouterSummarize('openai/gpt-4o', DUMMY_KEY, { + serverURL: LLMOCK_OPENAI, + }), } return factories[provider]?.() } From 0ecfd3b807e5a4307e58e37ada1c940934ff2253 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:39:23 +0200 Subject: [PATCH 12/49] chore(ai-openrouter): declare zod as peer dependency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sibling adapters (`ai-openai`, `ai-groq`, `ai-grok`) all declare zod as a peerDependency so a consumer that passes a Zod tool schema gets a single zod instance shared with this adapter. Without the peerDep, strict installs (pnpm `strict-peer-dependencies`, yarn berry pnp) can end up with two zod copies — one transitive via `@openrouter/sdk` or `@tanstack/ai`, one direct — and `instanceof ZodType` checks then fail across the boundary. --- packages/typescript/ai-openrouter/package.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 6ead29809..82bcf976c 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -54,6 +54,7 @@ "zod": "^4.2.0" }, "peerDependencies": { - "@tanstack/ai": "workspace:^" + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" } } From 5eb7aa7ff7ee067d0b258c3a9364087b723ba84e Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:50:25 +0200 Subject: [PATCH 13/49] fix(ai-groq): drop spurious timestamp field from processStreamChunks override The Groq subclass declared its aguiState parameter with an extra `timestamp: number` field that does not exist on the base class's aguiState type. TypeScript's bivariant method-parameter checks let the wider type pass typecheck, but at runtime the body never reads `timestamp` and the field is never populated by the base, so any caller (or future override) that relied on the declared shape would observe `undefined`. Realign the override's parameter type with the base. --- packages/typescript/ai-groq/src/adapters/text.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 6f8cec127..3ed8b6546 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -75,7 +75,6 @@ export class GroqTextAdapter< runId: string threadId: string messageId: string - timestamp: number hasEmittedRunStarted: boolean }, ) { From a773bd5bc0f93bcae1adbd56962a7816e959885e Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:50:29 +0200 Subject: [PATCH 14/49] fix(ai-openrouter): stringify error.code on response.failed events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The chunk-level 'error' branch in adaptOpenRouterResponsesStreamEvents already stringifies provider codes so they survive toRunErrorPayload's string-only code filter, but the parallel response.failed / response.incomplete path went through toSnakeResponseResult which forwarded `r.error.code` raw. A provider that returned a numeric code (401/429/500/…) on a terminal failure event would lose it on the way through to RUN_ERROR. Mirror the chunk-level stringification inside toSnakeResponseResult and add a regression test for response.failed with a numeric error.code. --- .../src/adapters/responses-text.ts | 9 +- .../openrouter-responses-adapter.test.ts | 108 ++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index c877b1281..28802f083 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -557,7 +557,14 @@ function toSnakeResponseResult(r: any): Record { ? r.output.map((it: any) => toSnakeOutputItem(it)) : r.output, ...(r.error && { - error: { message: r.error.message, code: r.error.code }, + // Stringify provider error codes so they survive `toRunErrorPayload`'s + // string-only `code` filter — mirrors the top-level `'error'` event + // branch in `adaptOpenRouterResponsesStreamEvents` and the chat- + // completions fix in commit 0171b18e. + error: { + message: r.error.message, + code: r.error.code != null ? String(r.error.code) : undefined, + }, }), } } diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts index 33b4fedc0..8621e4483 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -404,6 +404,114 @@ describe('OpenRouter responses adapter — stream event bridge', () => { expect(err).toBeDefined() expect(err.error.code).toBe('429') }) + + it('stringifies non-string error.code on response.failed events', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.failed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + error: { message: 'upstream auth failed', code: 401 }, + }, + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(c) + } + const err = chunks.find((c) => c.type === 'RUN_ERROR') as any + expect(err).toBeDefined() + expect(err.message).toBe('upstream auth failed') + // Provider code must survive as a string so `toRunErrorPayload`'s + // string-only `code` filter doesn't drop it on the way through. + expect(err.code).toBe('401') + expect(err.error.code).toBe('401') + }) + + it('does not emit further lifecycle events after a top-level error event', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.output_item.added', + sequenceNumber: 1, + outputIndex: 0, + item: { type: 'message', id: 'msg_1', role: 'assistant' }, + }, + { + type: 'response.output_text.delta', + sequenceNumber: 2, + itemId: 'msg_1', + outputIndex: 0, + contentIndex: 0, + delta: 'partial ', + }, + // Top-level error mid-stream — terminal. + { + type: 'error', + sequenceNumber: 3, + message: 'rate limit', + code: 429, + param: null, + }, + // The adapter MUST NOT process anything after the error event; + // these chunks would otherwise yield TEXT_MESSAGE_CONTENT / END + // events past the terminal RUN_ERROR. + { + type: 'response.output_text.delta', + sequenceNumber: 4, + itemId: 'msg_1', + outputIndex: 0, + contentIndex: 0, + delta: 'after-error', + }, + { + type: 'response.output_text.done', + sequenceNumber: 5, + itemId: 'msg_1', + outputIndex: 0, + contentIndex: 0, + text: 'partial after-error', + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(c) + } + + const errIndex = chunks.findIndex((c) => c.type === 'RUN_ERROR') + expect(errIndex).toBeGreaterThanOrEqual(0) + // No content/lifecycle events emitted after RUN_ERROR. + const post = chunks.slice(errIndex + 1) + expect(post).toEqual([]) + // The first delta's content reached the consumer; the second did not. + const allContent = chunks + .filter((c) => c.type === 'TEXT_MESSAGE_CONTENT') + .map((c: any) => c.delta) + .join('') + expect(allContent).toBe('partial ') + expect(allContent).not.toContain('after-error') + }) }) describe('OpenRouter responses adapter — SDK constructor wiring', () => { From 993df3e07a63f940467f1c8b22601e9968df5b79 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:50:33 +0200 Subject: [PATCH 15/49] fix(ai-openrouter): default image data URI mime type to octet-stream When a base64 image source has no mimeType the override produced a literal `data:undefined;base64,...` URI that the upstream rejects as invalid. The chat-completions base defaults to `application/octet-stream` for exactly this case; mirror the same defaulting in the OpenRouter convertContentPart override. Regression test asserts the data URI no longer contains the literal `undefined`. --- .../ai-openrouter/src/adapters/text.ts | 8 +++- .../tests/openrouter-adapter.test.ts | 46 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 38e503b9b..b658a2ff8 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -265,9 +265,15 @@ export class OpenRouterTextAdapter< case 'image': { const meta = part.metadata as OpenRouterImageMetadata | undefined const value = part.source.value + // Default to `application/octet-stream` when the source didn't + // provide a MIME type — interpolating `undefined` into the URI + // ("data:undefined;base64,...") produces an invalid data URI the + // API rejects. Mirrors the base's defaulting in + // `OpenAICompatibleChatCompletionsTextAdapter.convertContentPart`. + const imageMime = part.source.mimeType || 'application/octet-stream' const url = part.source.type === 'data' && !value.startsWith('data:') - ? `data:${part.source.mimeType};base64,${value}` + ? `data:${imageMime};base64,${value}` : value return { type: 'image_url', diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index ac9d94bbd..7bab688ff 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -381,6 +381,52 @@ describe('OpenRouter adapter option mapping', () => { }) }) + it('defaults base64 image data URIs to application/octet-stream when mimeType is missing', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'ok' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { + role: 'user', + content: [ + { type: 'text', content: 'see image' }, + { + type: 'image', + // The TS type requires `mimeType` on data sources, but at + // runtime a JS caller (or a cast) can still elide it. Cast + // to bypass the type check so the adapter's defensive + // default — `application/octet-stream` — is exercised; the + // alternative is a literal `data:undefined;base64,...` URI + // that the upstream rejects. + source: { type: 'data', value: 'aGVsbG8=' } as any, + }, + ], + }, + ], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const params = rawParams.chatRequest + const imagePart = params.messages[0].content.find( + (p: any) => p.type === 'image_url', + ) + expect(imagePart).toBeDefined() + expect(imagePart.imageUrl.url).toBe( + 'data:application/octet-stream;base64,aGVsbG8=', + ) + expect(imagePart.imageUrl.url).not.toContain('undefined') + }) + it('yields error chunk on SDK error', async () => { mockSend = vi.fn().mockRejectedValueOnce(new Error('Invalid API key')) From 6a9ce76a37fa9f53c4d75139c66777df13da95f1 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:50:38 +0200 Subject: [PATCH 16/49] fix(openai-base): stop processing chunks after top-level error event The Responses adapter's processStreamChunks marked `runFinishedEmitted` on a top-level chunk.type === 'error' to prevent the synthetic terminal block from firing, but it did not return from the for-await loop. Any subsequent chunks the upstream delivered after a terminal error event (a stray output_text.delta, an output_item.done, etc.) would continue to emit lifecycle events past RUN_ERROR, violating the 'RUN_ERROR is terminal' contract. Mirror the response.failed / response.incomplete branches above: return after yielding RUN_ERROR. Regression test covers the case where the upstream continues delivering chunks after a top-level error event and asserts no further chunks reach the consumer. --- .../typescript/openai-base/src/adapters/responses-text.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 4f057b6cf..c5d8e5482 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -907,8 +907,13 @@ export class OpenAICompatibleResponsesTextAdapter< }, } satisfies StreamChunk // RUN_ERROR is terminal — don't let the synthetic RUN_FINISHED - // block fire after a top-level stream error event. + // block fire after a top-level stream error event, and stop + // processing further chunks so no in-flight lifecycle events + // (TEXT_MESSAGE_CONTENT, TOOL_CALL_*) leak past the terminal + // error. Mirrors the `response.failed` / `response.incomplete` + // branches above which return after their RUN_ERROR emission. runFinishedEmitted = true + return } } From 06dd544ed63be169a7cfa0d515d63ef6429c0ba0 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:59:13 +0200 Subject: [PATCH 17/49] fix(openai-base, ai-openrouter): route Responses structuredOutput through transformStructuredOutput hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Responses base hard-coded transformNullsToUndefined on parsed structured-output JSON, leaving no hook for subclasses to opt out. The changeset's promise of 'transformStructuredOutput for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined' was therefore only fulfilled on the chat-completions surface — the matching Responses adapter would silently strip nulls regardless of provider intent. Add the transformStructuredOutput protected hook on OpenAICompatibleResponsesTextAdapter mirroring the chat-completions base's design, and override it as a no-op on OpenRouterResponsesTextAdapter so OpenRouter callers see null sentinels round-trip identically across the two adapter surfaces. Regression test asserts a structuredOutput response containing `nickname: null` round-trips as null (not undefined) through the Responses adapter. --- .../src/adapters/responses-text.ts | 11 ++++ .../openrouter-responses-adapter.test.ts | 62 +++++++++++++++++++ .../src/adapters/responses-text.ts | 23 ++++++- 3 files changed, 93 insertions(+), 3 deletions(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 28802f083..f05a5f343 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -110,6 +110,17 @@ export class OpenRouterResponsesTextAdapter< this.orClient = new OpenRouter(config) } + /** + * Preserve nulls in structured-output results. OpenRouter routes through + * a wide variety of upstream providers; some of them return `null` as a + * distinct sentinel ("the field exists, the value is null") rather than + * collapsing it to absent. Stripping nulls here would erase that + * distinction. Mirrors the chat-completions adapter override. + */ + protected override transformStructuredOutput(parsed: unknown): unknown { + return parsed + } + // ──────────────────────────────────────────────────────────────────────── // SDK call hooks — the params we get here were built by our overridden // mapOptionsToRequest / convertMessagesToInput / convertContentPartToInput diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts index 8621e4483..0d5817605 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -514,6 +514,68 @@ describe('OpenRouter responses adapter — stream event bridge', () => { }) }) +describe('OpenRouter responses adapter — structured output', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('preserves null values in structured output (does not strip nulls)', async () => { + // Non-streaming Responses API result with a `null` field in the parsed + // JSON. The base default `transformStructuredOutput` would convert + // nulls to undefined; the OpenRouter override must keep them intact + // so consumers that discriminate "field present but null" from + // "field absent" see the null sentinel the upstream returned. + setupMockSdkClient([], { + id: 'resp_1', + model: 'openai/gpt-4o-mini', + output: [ + { + type: 'message', + id: 'msg_1', + role: 'assistant', + content: [ + { + type: 'output_text', + text: JSON.stringify({ + name: 'Alice', + age: 30, + nickname: null, + }), + }, + ], + }, + ], + usage: { inputTokens: 5, outputTokens: 2, totalTokens: 7 }, + }) + + const adapter = createAdapter() + const result = await adapter.structuredOutput({ + chatOptions: { + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'profile?' }], + logger: testLogger, + }, + outputSchema: { + type: 'object', + properties: { + name: { type: 'string' }, + age: { type: 'number' }, + nickname: { type: ['string', 'null'] }, + }, + required: ['name', 'age', 'nickname'], + }, + }) + + expect(result.data).toEqual({ + name: 'Alice', + age: 30, + nickname: null, + }) + // Critical: nickname should be `null`, not `undefined`. + expect((result.data as any).nickname).toBeNull() + }) +}) + describe('OpenRouter responses adapter — SDK constructor wiring', () => { beforeEach(() => { vi.clearAllMocks() diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index c5d8e5482..93c48bcc6 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -228,9 +228,13 @@ export class OpenAICompatibleResponsesTextAdapter< ) } - // Transform null values to undefined to match original Zod schema expectations - // Provider returns null for optional fields we made nullable in the schema - const transformed = transformNullsToUndefined(parsed) + // Apply the provider-specific post-parse shaping (default: null → + // undefined to align with the original Zod schema's optional-field + // semantics; subclasses with different conventions can override + // `transformStructuredOutput`, mirroring the chat-completions base's + // hook so OpenRouter and other providers that preserve nulls in + // structured output can opt out without forking `structuredOutput`). + const transformed = this.transformStructuredOutput(parsed) return { data: transformed, @@ -258,6 +262,19 @@ export class OpenAICompatibleResponsesTextAdapter< return makeStructuredOutputCompatible(schema, originalRequired) } + /** + * Final shaping pass applied to parsed structured-output JSON before it is + * returned to the caller. Default converts `null` values to `undefined` so + * the result aligns with the original Zod schema's optional-field + * semantics. Subclasses with different conventions (OpenRouter historically + * preserves nulls) can override — mirrors the chat-completions base's hook + * so a subclass that opts out of null-stripping doesn't have to fork the + * whole `structuredOutput` method. + */ + protected transformStructuredOutput(parsed: unknown): unknown { + return transformNullsToUndefined(parsed) + } + /** * Performs the non-streaming Responses API network call. The default uses * the OpenAI SDK (`client.responses.create`), which covers any provider From 39c927b3b99f535cb6d5fbc7a835fc5394663dc5 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 15:59:17 +0200 Subject: [PATCH 18/49] fix(ai-openrouter): extract text from array-shaped tool message content The chat-completions adapter's convertMessage tool branch still JSON-stringified Array tool message content, so a tool result delivered as structured parts (e.g. [{type:'text', content: '"temp":'}, {type:'text', content:'72'}]) reached the model as the literal JSON of the parts rather than the joined textual result. The parallel responses adapter override was fixed earlier; this mirrors the same fix on the chat-completions path so both surfaces handle structured tool content identically. Regression test feeds a structured tool result and asserts the wire payload's tool message content is the joined text without any '"type":"text"' leakage. --- .../ai-openrouter/src/adapters/text.ts | 7 ++- .../tests/openrouter-adapter.test.ts | 55 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index b658a2ff8..0f8162248 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -170,12 +170,17 @@ export class OpenRouterTextAdapter< protected override convertMessage(message: ModelMessage): any { if (message.role === 'tool') { + // For structured (Array) tool results, extract the text + // content rather than JSON-stringifying the parts — sending the raw + // ContentPart shape (e.g. `[{"type":"text","content":"…"}]`) into the + // tool message's `content` field would feed the literal JSON of the + // parts back to the model instead of the tool's textual result. return { role: 'tool', content: typeof message.content === 'string' ? message.content - : JSON.stringify(message.content), + : this.extractTextContent(message.content), toolCallId: message.toolCallId || '', } satisfies ChatMessages } diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index 7bab688ff..f6e83df18 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1975,6 +1975,61 @@ describe('OpenRouter convertMessage fail-loud guards', () => { expect(assistantMsg.content).toBe('hello world') }) + it('extracts text from array-shaped tool message content instead of JSON-stringifying parts', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'ok' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { role: 'user', content: 'hi' }, + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_1', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + }, + ], + }, + { + role: 'tool', + toolCallId: 'call_1', + // Structured tool result content. The adapter must extract the + // text rather than JSON-stringifying the parts; otherwise the + // model would see the literal `[{"type":"text","content":"..."}]` + // shape on its next turn instead of the actual tool output. + content: [ + { type: 'text', content: '{"temp":' }, + { type: 'text', content: '72}' }, + ], + }, + ], + logger: testLogger, + })) { + // consume + } + + const [rawParams] = mockSend.mock.calls[0]! + const toolMsg = rawParams.chatRequest.messages.find( + (m: any) => m.role === 'tool', + ) + expect(toolMsg).toBeDefined() + expect(toolMsg.content).toBe('{"temp":72}') + expect(toolMsg.content).not.toContain('"type":"text"') + }) + it('emits content: null (not undefined) for assistant messages with only tool calls', async () => { setupMockSdkClient([ { From 335adaf451fb49527e881a30b9f36479bf8e6ca8 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 16:09:09 +0200 Subject: [PATCH 19/49] chore(ai-groq): declare @tanstack/ai as workspace devDependency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Every sibling adapter (ai-openai, ai-grok, ai-openrouter, ai-anthropic, ai-gemini, ai-fal, ai-ollama) explicitly lists `@tanstack/ai: workspace:*` under devDependencies in addition to declaring it as a peer. ai-groq omitted the devDep entry, so resolution worked only via pnpm's autoInstallPeers behaviour — toggling that off (strict installs, some yarn berry configs) would silently break ai-groq while every other adapter kept working. Add the dev dep for parity. --- packages/typescript/ai-groq/package.json | 1 + pnpm-lock.yaml | 225 ++++++++++++++++------- 2 files changed, 161 insertions(+), 65 deletions(-) diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index 9899b1d46..54af3dcf0 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -43,6 +43,7 @@ "adapter" ], "devDependencies": { + "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7" }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 48c977659..07a916d8b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -642,7 +642,7 @@ importers: version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitest: specifier: ^4.0.14 - version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) web-vitals: specifier: ^5.1.0 version: 5.1.0 @@ -944,7 +944,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -963,7 +963,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -982,7 +982,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1001,7 +1001,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.3.6 @@ -1029,7 +1029,7 @@ importers: version: link:../ai-openai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) commander: specifier: ^13.1.0 version: 13.1.0 @@ -1106,7 +1106,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1143,7 +1143,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-event-client: dependencies: @@ -1156,7 +1156,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-fal: dependencies: @@ -1172,7 +1172,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1191,7 +1191,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1219,16 +1219,13 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) packages/typescript/ai-groq: dependencies: - '@tanstack/ai': - specifier: workspace:^ - version: link:../ai '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils @@ -1239,9 +1236,12 @@ importers: specifier: ^4.0.0 version: 4.3.6 devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1257,7 +1257,7 @@ importers: version: 4.20260317.1 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) wrangler: specifier: ^4.88.0 version: 4.88.0(@cloudflare/workers-types@4.20260317.1) @@ -1273,7 +1273,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-isolate-quickjs: dependencies: @@ -1286,7 +1286,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-ollama: dependencies: @@ -1302,7 +1302,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1327,7 +1327,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1352,7 +1352,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1374,7 +1374,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1402,7 +1402,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1442,7 +1442,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1513,7 +1513,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -1541,7 +1541,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1565,7 +1565,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1630,7 +1630,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1655,7 +1655,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1674,7 +1674,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1693,7 +1693,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1712,7 +1712,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -6757,12 +6757,26 @@ packages: '@vitest/browser': optional: true + '@vitest/expect@4.0.14': + resolution: {integrity: sha512-RHk63V3zvRiYOWAV0rGEBRO820ce17hz7cI2kDmEdfQsBjT2luEKB5tCOc91u1oSQoUOZkSv3ZyzkdkSLD7lKw==} + '@vitest/expect@4.0.15': resolution: {integrity: sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==} '@vitest/expect@4.1.4': resolution: {integrity: sha512-iPBpra+VDuXmBFI3FMKHSFXp3Gx5HfmSCE8X67Dn+bwephCnQCaB7qWK2ldHa+8ncN8hJU8VTMcxjPpyMkUjww==} + '@vitest/mocker@4.0.14': + resolution: {integrity: sha512-RzS5NujlCzeRPF1MK7MXLiEFpkIXeMdQ+rN3Kk3tDI9j0mtbr7Nmuq67tpkOJQpgyClbOltCXMjLZicJHsH5Cg==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} peerDependencies: @@ -6794,18 +6808,27 @@ packages: '@vitest/pretty-format@4.1.4': resolution: {integrity: sha512-ddmDHU0gjEUyEVLxtZa7xamrpIefdEETu3nZjWtHeZX4QxqJ7tRxSteHVXJOcr8jhiLoGAhkK4WJ3WqBpjx42A==} + '@vitest/runner@4.0.14': + resolution: {integrity: sha512-BsAIk3FAqxICqREbX8SetIteT8PiaUL/tgJjmhxJhCsigmzzH8xeadtp7LRnTpCVzvf0ib9BgAfKJHuhNllKLw==} + '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} '@vitest/runner@4.1.4': resolution: {integrity: sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==} + '@vitest/snapshot@4.0.14': + resolution: {integrity: sha512-aQVBfT1PMzDSA16Y3Fp45a0q8nKexx6N5Amw3MX55BeTeZpoC08fGqEZqVmPcqN0ueZsuUQ9rriPMhZ3Mu19Ag==} + '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} '@vitest/snapshot@4.1.4': resolution: {integrity: sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==} + '@vitest/spy@4.0.14': + resolution: {integrity: sha512-JmAZT1UtZooO0tpY3GRyiC/8W7dCs05UOq9rfsUUgEZEdq+DuHLmWhPsrTt0TiW7WYeL/hXpaE07AZ2RCk44hg==} + '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} @@ -11829,6 +11852,40 @@ packages: vite: optional: true + vitest@4.0.14: + resolution: {integrity: sha512-d9B2J9Cm9dN9+6nxMnnNJKJCtcyKfnHj15N6YNJfaFHRLua/d3sRKU9RuKmO9mB0XdFtUizlxfz/VPbd3OxGhw==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.14 + '@vitest/browser-preview': 4.0.14 + '@vitest/browser-webdriverio': 4.0.14 + '@vitest/ui': 4.0.14 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vitest@4.0.15: resolution: {integrity: sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} @@ -17292,7 +17349,7 @@ snapshots: vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vue: 3.5.25(typescript@5.9.3) - '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17305,11 +17362,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.1.4)': + '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17322,10 +17379,19 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color + '@vitest/expect@4.0.14': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.14 + '@vitest/utils': 4.0.14 + chai: 6.2.2 + tinyrainbow: 3.1.0 + '@vitest/expect@4.0.15': dependencies: '@standard-schema/spec': 1.1.0 @@ -17344,6 +17410,14 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.1.0 + '@vitest/mocker@4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + dependencies: + '@vitest/spy': 4.0.14 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + '@vitest/mocker@4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.15 @@ -17372,6 +17446,11 @@ snapshots: dependencies: tinyrainbow: 3.1.0 + '@vitest/runner@4.0.14': + dependencies: + '@vitest/utils': 4.0.14 + pathe: 2.0.3 + '@vitest/runner@4.0.15': dependencies: '@vitest/utils': 4.0.15 @@ -17382,6 +17461,12 @@ snapshots: '@vitest/utils': 4.1.4 pathe: 2.0.3 + '@vitest/snapshot@4.0.14': + dependencies: + '@vitest/pretty-format': 4.0.14 + magic-string: 0.30.21 + pathe: 2.0.3 + '@vitest/snapshot@4.0.15': dependencies: '@vitest/pretty-format': 4.0.15 @@ -17395,6 +17480,8 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 + '@vitest/spy@4.0.14': {} + '@vitest/spy@4.0.15': {} '@vitest/spy@4.1.4': {} @@ -23494,26 +23581,26 @@ snapshots: optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.15 - '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.15 - '@vitest/runner': 4.0.15 - '@vitest/snapshot': 4.0.15 - '@vitest/spy': 4.0.15 - '@vitest/utils': 4.0.15 + '@vitest/expect': 4.0.14 + '@vitest/mocker': 4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.14 + '@vitest/runner': 4.0.14 + '@vitest/snapshot': 4.0.14 + '@vitest/spy': 4.0.14 + '@vitest/utils': 4.0.14 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.3 + picomatch: 4.0.4 std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 1.0.2 - tinyglobby: 0.2.15 - tinyrainbow: 3.0.3 + tinyexec: 0.3.2 + tinyglobby: 0.2.16 + tinyrainbow: 3.1.0 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: @@ -23534,36 +23621,45 @@ snapshots: - tsx - yaml - vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): + vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.1.4 - '@vitest/mocker': 4.1.4(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.1.4 - '@vitest/runner': 4.1.4 - '@vitest/snapshot': 4.1.4 - '@vitest/spy': 4.1.4 - '@vitest/utils': 4.1.4 - es-module-lexer: 2.0.0 + '@vitest/expect': 4.0.15 + '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.15 + '@vitest/runner': 4.0.15 + '@vitest/snapshot': 4.0.15 + '@vitest/spy': 4.0.15 + '@vitest/utils': 4.0.15 + es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.4 - std-env: 4.0.0 + picomatch: 4.0.3 + std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 1.1.1 - tinyglobby: 0.2.16 - tinyrainbow: 3.1.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.1 '@types/node': 24.10.3 - '@vitest/coverage-v8': 4.0.14(vitest@4.1.4) happy-dom: 20.0.11 jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: + - jiti + - less + - lightningcss - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): dependencies: @@ -23594,7 +23690,6 @@ snapshots: jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: - msw - optional: true vscode-uri@3.1.0: {} From 7bb2b826a86047f96c7290580babc899adaea103 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 16:09:13 +0200 Subject: [PATCH 20/49] fix(ai-openrouter): route audio URLs to text fallback on chat-completions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The chat-completions OpenRouter adapter's convertContentPart for audio unconditionally emitted `{ type: 'input_audio', inputAudio: { data, format: 'mp3' } }` — but `data` is supposed to be base64. A URL-sourced audio part therefore shipped the literal URL string into the base64 slot, which the upstream rejects (or worse, treats as garbage audio bytes). The Responses adapter already handles this by routing URL audio through `input_file` (where the URL belongs); chat-completions has no `input_file` shape on this surface, so mirror the existing document fallback: emit a text reference noting the URL. Callers needing real audio URL support should use the Responses adapter. --- .../typescript/ai-openrouter/src/adapters/text.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 0f8162248..e1b8ec76f 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -286,6 +286,17 @@ export class OpenRouterTextAdapter< } } case 'audio': + // OpenRouter's chat-completions `input_audio` shape carries + // `{ data, format }` where `data` is base64 — there's no URL + // variant on this wire. For URL-sourced audio, fall back to a + // text reference rather than feeding the literal URL into the + // base64 slot (which would either be rejected upstream or + // silently misinterpreted as garbage audio bytes). The + // Responses adapter does have an `input_file` URL variant and + // routes URLs there directly — see `responses-text.ts`. + if (part.source.type === 'url') { + return { type: 'text', text: `[Audio: ${part.source.value}]` } + } return { type: 'input_audio', inputAudio: { data: part.source.value, format: 'mp3' }, From 272fe5fe3b77057304573e9ebce002f5ea139f25 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 16:09:18 +0200 Subject: [PATCH 21/49] =?UTF-8?q?docs(ai-groq):=20correct=20message-types?= =?UTF-8?q?=20header=20=E2=80=94=20Groq=20SDK=20was=20dropped?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The header comment claimed these types "mirror the Groq SDK types", but the migration dropped the groq-sdk dependency entirely in favour of pointing the OpenAI SDK at Groq's /openai/v1 base URL. The file is now the source of truth for Groq-specific wire fields (compound tools, citation/service-tier provider options, …), not a mirror of an external SDK. Update the header to reflect the post-migration role. --- packages/typescript/ai-groq/src/message-types.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-groq/src/message-types.ts b/packages/typescript/ai-groq/src/message-types.ts index d769cf4f9..ffa574d90 100644 --- a/packages/typescript/ai-groq/src/message-types.ts +++ b/packages/typescript/ai-groq/src/message-types.ts @@ -1,8 +1,12 @@ /** * Groq-specific message types for the Chat Completions API. * - * These type definitions mirror the Groq SDK types and are used internally - * by the adapter to avoid tight coupling to the SDK's exported types. + * Groq's wire format is OpenAI Chat Completions plus a few Groq-specific + * extensions (compound tools, citation/service-tier provider options, + * etc.). These type definitions describe that wire shape directly — the + * Groq SDK was dropped in favour of pointing the OpenAI SDK at Groq's + * `/openai/v1` base URL, so this file is the source of truth for + * Groq-only fields rather than a mirror of an external SDK's types. * * @see https://console.groq.com/docs/api-reference#chat */ From 2bc993cfb08ed6eeed6b33fb7d25b56288545843 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 12 May 2026 16:15:31 +0200 Subject: [PATCH 22/49] fix(ai-openrouter): reject inline document data on chat-completions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The chat-completions convertContentPart 'document' branch unconditionally returned `{ type: 'text', text: `[Document: ${part.source.value}]` }`. For URL sources that's a reasonable degradation. For data sources, `part.source.value` is the raw base64 payload — a multi-megabyte document would be inlined into the prompt verbatim, blowing the context window and leaking the document content as plaintext bytes. Branch on `part.source.type`: URL sources keep the text-reference fallback, data sources throw with a clear error pointing the caller at the Responses adapter (which has proper `input_file` support for inline document data). Mirrors the audio URL/data branching added in the prior round. --- .../ai-openrouter/src/adapters/text.ts | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index e1b8ec76f..39f408f48 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -304,8 +304,22 @@ export class OpenRouterTextAdapter< case 'video': return { type: 'video_url', videoUrl: { url: part.source.value } } case 'document': - // SDK doesn't have a document_url type — surface as text so the - // model at least sees the URL rather than dropping the part. + // The chat-completions SDK has no document_url type. For URL + // sources, surface a text reference so the model at least sees + // the link. For data sources, `part.source.value` is the raw + // base64 payload — inlining it into the prompt would blow the + // context window with megabytes of binary and leak the document + // content verbatim. Throw instead so the caller can either + // switch to the Responses adapter (which has proper input_file + // support for data documents) or strip the document before + // sending. + if (part.source.type === 'data') { + throw new Error( + `${this.name} chat-completions does not support inline (data) document content parts. ` + + `Use the Responses adapter (openRouterResponsesText) for document data, ` + + `or pass the document as a URL.`, + ) + } return { type: 'text', text: `[Document: ${part.source.value}]` } default: return null From 9d6a1e8bcecacb4709eaf4a6e7279f19143083a9 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 10:33:48 +1000 Subject: [PATCH 23/49] =?UTF-8?q?refactor:=20rename=20@tanstack/openai-bas?= =?UTF-8?q?e=20=E2=86=92=20@tanstack/openai-compatible?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped wire-format protocols (Chat Completions, Responses) that multiple providers ship — OpenRouter, Groq, Grok, vLLM, SGLang, Together. "OpenAI-compatible" is the industry term for this family (cf. Vercel's @ai-sdk/openai-compatible, LiteLLM, BentoML, Lightning AI). OpenRouter's beta Responses endpoint routes to Claude, Gemini, and other underlying models, confirming that /v1/responses (like /v1/chat/completions) is a multi-provider wire format rather than OpenAI-only — so the Responses adapter stays alongside Chat Completions in the renamed package. Pure rename, no behavior change. Class names (OpenAICompatibleChatCompletionsTextAdapter, OpenAICompatibleResponsesTextAdapter, …) and protected hook contracts are unchanged. Consumer packages (ai-openai, ai-openrouter, ai-groq, ai-grok) only update internal import paths; public API is unchanged. @tanstack/openai-base@0.2.x remains published on npm for any pinned lockfile references but will receive no further updates. A README in the renamed package documents the protocol-vs-product contract. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../migrate-groq-openrouter-to-openai-base.md | 6 +- ...rename-openai-base-to-openai-compatible.md | 27 ++ packages/typescript/ai-grok/package.json | 2 +- .../typescript/ai-grok/src/adapters/image.ts | 2 +- .../ai-grok/src/adapters/summarize.ts | 4 +- .../typescript/ai-grok/src/adapters/text.ts | 4 +- .../typescript/ai-grok/src/tools/index.ts | 2 +- .../typescript/ai-grok/src/utils/client.ts | 2 +- .../ai-grok/src/utils/schema-converter.ts | 2 +- packages/typescript/ai-groq/package.json | 2 +- .../typescript/ai-groq/src/adapters/text.ts | 4 +- .../typescript/ai-groq/src/utils/client.ts | 2 +- .../ai-groq/src/utils/schema-converter.ts | 6 +- .../ai-groq/tests/groq-adapter.test.ts | 2 +- .../ai-groq/tests/schema-converter.test.ts | 4 +- packages/typescript/ai-openai/package.json | 2 +- .../ai-openai/src/adapters/image.ts | 2 +- .../ai-openai/src/adapters/summarize.ts | 2 +- .../typescript/ai-openai/src/adapters/text.ts | 4 +- .../ai-openai/src/adapters/transcription.ts | 2 +- .../typescript/ai-openai/src/adapters/tts.ts | 2 +- .../ai-openai/src/adapters/video.ts | 2 +- .../ai-openai/src/tools/apply-patch-tool.ts | 4 +- .../src/tools/code-interpreter-tool.ts | 6 +- .../ai-openai/src/tools/computer-use-tool.ts | 6 +- .../ai-openai/src/tools/custom-tool.ts | 2 +- .../ai-openai/src/tools/file-search-tool.ts | 6 +- .../ai-openai/src/tools/function-tool.ts | 2 +- .../src/tools/image-generation-tool.ts | 6 +- .../typescript/ai-openai/src/tools/index.ts | 2 +- .../ai-openai/src/tools/local-shell-tool.ts | 4 +- .../ai-openai/src/tools/mcp-tool.ts | 6 +- .../ai-openai/src/tools/shell-tool.ts | 4 +- .../ai-openai/src/tools/tool-choice.ts | 2 +- .../ai-openai/src/tools/tool-converter.ts | 2 +- .../src/tools/web-search-preview-tool.ts | 6 +- .../ai-openai/src/tools/web-search-tool.ts | 6 +- .../typescript/ai-openai/src/utils/client.ts | 2 +- .../ai-openai/src/utils/schema-converter.ts | 2 +- .../typescript/ai-openrouter/package.json | 2 +- .../src/adapters/responses-text.ts | 6 +- .../ai-openrouter/src/adapters/text.ts | 4 +- .../CHANGELOG.md | 5 +- .../typescript/openai-compatible/README.md | 70 ++++++ .../package.json | 9 +- .../src/adapters/chat-completions-text.ts | 0 .../chat-completions-tool-converter.ts | 0 .../src/adapters/image.ts | 0 .../src/adapters/responses-text.ts | 0 .../src/adapters/responses-tool-converter.ts | 0 .../src/adapters/summarize.ts | 0 .../src/adapters/transcription.ts | 0 .../src/adapters/tts.ts | 0 .../src/adapters/video.ts | 0 .../src/index.ts | 0 .../src/tools/apply-patch-tool.ts | 0 .../src/tools/code-interpreter-tool.ts | 0 .../src/tools/computer-use-tool.ts | 0 .../src/tools/custom-tool.ts | 0 .../src/tools/file-search-tool.ts | 0 .../src/tools/function-tool.ts | 0 .../src/tools/image-generation-tool.ts | 0 .../src/tools/index.ts | 0 .../src/tools/local-shell-tool.ts | 0 .../src/tools/mcp-tool.ts | 0 .../src/tools/shell-tool.ts | 0 .../src/tools/tool-choice.ts | 0 .../src/tools/tool-converter.ts | 0 .../src/tools/web-search-preview-tool.ts | 0 .../src/tools/web-search-tool.ts | 0 .../src/types/config.ts | 0 .../src/utils/client.ts | 0 .../src/utils/request-options.ts | 0 .../src/utils/schema-converter.ts | 0 .../tests/chat-completions-text.test.ts | 0 .../tests/mcp-tool.test.ts | 0 .../tests/media-adapters.test.ts | 0 .../tests/responses-text.test.ts | 0 .../tests/schema-converter.test.ts | 0 .../tsconfig.json | 0 .../vite.config.ts | 0 pnpm-lock.yaml | 237 ++++++------------ 82 files changed, 247 insertions(+), 241 deletions(-) create mode 100644 .changeset/rename-openai-base-to-openai-compatible.md rename packages/typescript/{openai-base => openai-compatible}/CHANGELOG.md (86%) create mode 100644 packages/typescript/openai-compatible/README.md rename packages/typescript/{openai-base => openai-compatible}/package.json (78%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/chat-completions-text.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/chat-completions-tool-converter.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/image.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/responses-text.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/responses-tool-converter.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/summarize.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/transcription.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/tts.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/adapters/video.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/index.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/apply-patch-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/code-interpreter-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/computer-use-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/custom-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/file-search-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/function-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/image-generation-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/index.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/local-shell-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/mcp-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/shell-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/tool-choice.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/tool-converter.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/web-search-preview-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/tools/web-search-tool.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/types/config.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/utils/client.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/utils/request-options.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/src/utils/schema-converter.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tests/chat-completions-text.test.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tests/mcp-tool.test.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tests/media-adapters.test.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tests/responses-text.test.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tests/schema-converter.test.ts (100%) rename packages/typescript/{openai-base => openai-compatible}/tsconfig.json (100%) rename packages/typescript/{openai-base => openai-compatible}/vite.config.ts (100%) diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md index dd48aba33..255ad5068 100644 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -1,5 +1,5 @@ --- -'@tanstack/openai-base': minor +'@tanstack/openai-compatible': minor '@tanstack/ai-groq': patch '@tanstack/ai-openrouter': patch '@tanstack/ai': patch @@ -7,9 +7,9 @@ Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. -`@tanstack/openai-base` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. +`@tanstack/openai-compatible` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. -`@tanstack/openai-base` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. +`@tanstack/openai-compatible` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. `@tanstack/ai` normalizes abort-shaped errors (`AbortError`, `APIUserAbortError`, `RequestAbortedError`) to a stable `{ message: 'Request aborted', code: 'aborted' }` payload in `toRunErrorPayload`, so consumers can discriminate user-initiated cancellation from other failures without matching on provider-specific message strings. diff --git a/.changeset/rename-openai-base-to-openai-compatible.md b/.changeset/rename-openai-base-to-openai-compatible.md new file mode 100644 index 000000000..d149f5283 --- /dev/null +++ b/.changeset/rename-openai-base-to-openai-compatible.md @@ -0,0 +1,27 @@ +--- +'@tanstack/openai-compatible': minor +'@tanstack/ai-openai': patch +'@tanstack/ai-openrouter': patch +'@tanstack/ai-groq': patch +'@tanstack/ai-grok': patch +--- + +Rename `@tanstack/openai-base` → `@tanstack/openai-compatible`. + +The previous "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped *wire-format protocols* that multiple providers ship: + +- **Chat Completions** (`/v1/chat/completions`) — natively implemented by OpenAI, Groq, Grok, OpenRouter, vLLM, SGLang, Together, etc. +- **Responses** (`/v1/responses`) — OpenAI's reference implementation plus OpenRouter's beta routing implementation (which fans out to Anthropic, Google, and other underlying models). + +"OpenAI-compatible" is the actual industry term for this family of wire formats (cf. Vercel's `@ai-sdk/openai-compatible`, LiteLLM's "OpenAI-compatible endpoint", BentoML / Lightning AI docs). The renamed package makes the boundary explicit: it holds the protocol, while OpenAI-specific tools, models, and behaviors continue to live in `@tanstack/ai-openai`. + +No runtime behavior changes. Class names (`OpenAICompatibleChatCompletionsTextAdapter`, `OpenAICompatibleResponsesTextAdapter`, …) and protected hook contracts are unchanged. Consumer packages (`ai-openai`, `ai-openrouter`, `ai-groq`, `ai-grok`) only update their internal import paths — public API is unchanged. + +If you were importing from `@tanstack/openai-base` directly (uncommon — the package was not yet documented as a public extension point), update your imports: + +```diff +- import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' ++ import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' +``` + +`@tanstack/openai-base@0.2.x` remains published on npm for anyone with a pinned lockfile reference but will receive no further updates. diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index 063c64780..b76380441 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -45,7 +45,7 @@ ], "dependencies": { "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-base": "workspace:*" + "@tanstack/openai-compatible": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 53ef336b9..5427f3973 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleImageAdapter } from '@tanstack/openai-compatible' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import { validateImageSize, diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index f13984bac..43ed2c7d4 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,7 +1,7 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-compatible' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' -import type { ChatStreamCapable } from '@tanstack/openai-base' +import type { ChatStreamCapable } from '@tanstack/openai-compatible' import type { GROK_CHAT_MODELS } from '../model-meta' import type { GrokClientConfig } from '../utils' diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index c0c22e3c4..e29c4a637 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import type { GROK_CHAT_MODELS, @@ -35,7 +35,7 @@ export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../ * Uses OpenAI-compatible Chat Completions API (not Responses API). * * Delegates implementation to {@link OpenAICompatibleChatCompletionsTextAdapter} - * from `@tanstack/openai-base` and threads Grok-specific tool-capability typing + * from `@tanstack/openai-compatible` and threads Grok-specific tool-capability typing * through the 5th generic of the base class. */ export class GrokTextAdapter< diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts index 95a570117..2d134228d 100644 --- a/packages/typescript/ai-grok/src/tools/index.ts +++ b/packages/typescript/ai-grok/src/tools/index.ts @@ -2,4 +2,4 @@ export { type ChatCompletionFunctionTool as FunctionTool, convertFunctionToolToChatCompletionsFormat as convertFunctionToolToAdapterFormat, convertToolsToChatCompletionsFormat as convertToolsToProviderFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts index 890224592..9fbe11e8f 100644 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' export interface GrokClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts index 20c2d36d3..b0b85bb1a 100644 --- a/packages/typescript/ai-grok/src/utils/schema-converter.ts +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -1,2 +1,2 @@ export { transformNullsToUndefined } from '@tanstack/ai-utils' -export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/openai-base' +export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index 54af3dcf0..c623da3f9 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -53,6 +53,6 @@ }, "dependencies": { "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-base": "workspace:*" + "@tanstack/openai-compatible": "workspace:*" } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 3ed8b6546..2ba00e399 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -1,8 +1,8 @@ -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' import { getGroqApiKeyFromEnv, withGroqDefaults } from '../utils/client' import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' import type { Modality, TextOptions } from '@tanstack/ai' -import type { ChatCompletionChunk } from '@tanstack/openai-base' +import type { ChatCompletionChunk } from '@tanstack/openai-compatible' import type { GROQ_CHAT_MODELS, GroqChatModelToolCapabilitiesByName, diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index 082e347e0..5318fb04d 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' export interface GroqClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 6db95a620..02e53221a 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -1,4 +1,4 @@ -import { makeStructuredOutputCompatible } from '@tanstack/openai-base' +import { makeStructuredOutputCompatible } from '@tanstack/openai-compatible' import { transformNullsToUndefined } from '@tanstack/ai-utils' export { transformNullsToUndefined } @@ -62,7 +62,7 @@ function removeEmptyRequired(schema: Record): Record { /** * Recursively normalise object schemas so any `{ type: 'object' }` node * without `properties` gets an empty `properties: {}` object. The - * openai-base transformer only descends into objects that already have + * openai-compatible transformer only descends into objects that already have * `properties` set, so a Zod `z.object({})` nested inside `properties`, * `items`, `additionalProperties`, or a combinator branch would otherwise * skip the strict-mode rewrite and fail Groq validation. @@ -140,7 +140,7 @@ export function makeGroqStructuredOutputCompatible( schema: Record, originalRequired: Array = [], ): Record { - // Recursively patch every `{ type: 'object' }` node so the openai-base + // Recursively patch every `{ type: 'object' }` node so the openai-compatible // transformer descends into nested empty objects too. const normalised = normalizeObjectSchemas(schema) diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index 8615f7cfb..99f38b769 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -20,7 +20,7 @@ const testLogger = resolveDebugOption(false) // Stub the OpenAI SDK so adapter construction doesn't open a real network // handle. The per-test mock client is injected post-construction via // `setupMockSdkClient` (mirrors the ai-grok pattern). We avoid relying on -// vi.mock to intercept transitive openai imports — the built openai-base +// vi.mock to intercept transitive openai imports — the built openai-compatible // dist resolves `openai` independently and is unaffected by vi.mock here. vi.mock('openai', () => { return { diff --git a/packages/typescript/ai-groq/tests/schema-converter.test.ts b/packages/typescript/ai-groq/tests/schema-converter.test.ts index b6daa00e0..40ce8f004 100644 --- a/packages/typescript/ai-groq/tests/schema-converter.test.ts +++ b/packages/typescript/ai-groq/tests/schema-converter.test.ts @@ -65,7 +65,7 @@ describe('makeGroqStructuredOutputCompatible', () => { it('should normalise nested empty-object schemas in properties', () => { // Reproduces the bug where a nested `{ type: 'object' }` without - // `properties` slipped past the openai-base transformer because the + // `properties` slipped past the openai-compatible transformer because the // ai-groq layer only normalised the top-level node. const schema = { type: 'object', @@ -79,7 +79,7 @@ describe('makeGroqStructuredOutputCompatible', () => { expect(result.properties.child.type).toBe('object') expect(result.properties.child.properties).toEqual({}) - // openai-base sets additionalProperties: false on every rewritten object + // openai-compatible sets additionalProperties: false on every rewritten object expect(result.properties.child.additionalProperties).toBe(false) }) diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index 9a3a24895..02ad11c55 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -45,7 +45,7 @@ ], "dependencies": { "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-base": "workspace:*", + "@tanstack/openai-compatible": "workspace:*", "openai": "^6.9.1" }, "peerDependencies": { diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index e1220738f..fc8e96b63 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleImageAdapter } from '@tanstack/openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateImageSize, diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index 6e143bfab..f37da1c97 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' import type { OpenAIChatModel } from '../model-meta' diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 2e9f91e9c..d0b65a9ab 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/openai-compatible' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' import { getOpenAIApiKeyFromEnv } from '../utils/client' @@ -68,7 +68,7 @@ type ResolveToolCapabilities = * * Tree-shakeable adapter for OpenAI chat/text completion functionality. * Delegates implementation to {@link OpenAICompatibleResponsesTextAdapter} from - * `@tanstack/openai-base` and threads OpenAI-specific tool-capability typing + * `@tanstack/openai-compatible` and threads OpenAI-specific tool-capability typing * through the 5th generic of the base class. */ export class OpenAITextAdapter< diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 5a7742298..0c198bbc4 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import type { OpenAITranscriptionModel } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index 59d302970..da93b109e 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleTTSAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleTTSAdapter } from '@tanstack/openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateAudioInput, diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index e5bc9aee1..b22d47b75 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleVideoAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleVideoAdapter } from '@tanstack/openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { toApiSeconds, diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index ab4ed63df..0e0967dce 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,11 +1,11 @@ -import { applyPatchTool as baseApplyPatchTool } from '@tanstack/openai-base' +import { applyPatchTool as baseApplyPatchTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type ApplyPatchToolConfig, type ApplyPatchTool, convertApplyPatchToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIApplyPatchTool = ProviderTool<'openai', 'apply_patch'> diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index 52c43d89f..910a49289 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,12 +1,12 @@ -import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/openai-base' +import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { CodeInterpreterToolConfig } from '@tanstack/openai-base' +import type { CodeInterpreterToolConfig } from '@tanstack/openai-compatible' export { type CodeInterpreterToolConfig, type CodeInterpreterTool, convertCodeInterpreterToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAICodeInterpreterTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index 8226c7acd..36b7d405b 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,12 +1,12 @@ -import { computerUseTool as baseComputerUseTool } from '@tanstack/openai-base' +import { computerUseTool as baseComputerUseTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { ComputerUseToolConfig } from '@tanstack/openai-base' +import type { ComputerUseToolConfig } from '@tanstack/openai-compatible' export { type ComputerUseToolConfig, type ComputerUseTool, convertComputerUseToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' // The brand discriminator (`computer_use`) intentionally differs from the // runtime tool name (`computer_use_preview`). The brand matches the model-meta diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index 9d898a897..4865b0fd1 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -3,4 +3,4 @@ export { type CustomTool, convertCustomToolToAdapterFormat, customTool, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index c90af1011..c0dccb83a 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,12 +1,12 @@ -import { fileSearchTool as baseFileSearchTool } from '@tanstack/openai-base' +import { fileSearchTool as baseFileSearchTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { FileSearchToolConfig } from '@tanstack/openai-base' +import type { FileSearchToolConfig } from '@tanstack/openai-compatible' export { type FileSearchToolConfig, type FileSearchTool, convertFileSearchToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIFileSearchTool = ProviderTool<'openai', 'file_search'> diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index fefd46433..092468bee 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -2,4 +2,4 @@ export { type FunctionToolConfig, type FunctionTool, convertFunctionToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index d621889f9..8bdabea04 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,12 +1,12 @@ -import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/openai-base' +import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { ImageGenerationToolConfig } from '@tanstack/openai-base' +import type { ImageGenerationToolConfig } from '@tanstack/openai-compatible' export { type ImageGenerationToolConfig, type ImageGenerationTool, convertImageGenerationToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIImageGenerationTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/index.ts b/packages/typescript/ai-openai/src/tools/index.ts index 7eff9fc69..1a359bdfe 100644 --- a/packages/typescript/ai-openai/src/tools/index.ts +++ b/packages/typescript/ai-openai/src/tools/index.ts @@ -1,4 +1,4 @@ -export { type OpenAITool } from '@tanstack/openai-base' +export { type OpenAITool } from '@tanstack/openai-compatible' export { applyPatchTool, diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index f49850b84..296bd4e77 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,11 +1,11 @@ -import { localShellTool as baseLocalShellTool } from '@tanstack/openai-base' +import { localShellTool as baseLocalShellTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type LocalShellToolConfig, type LocalShellTool, convertLocalShellToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAILocalShellTool = ProviderTool<'openai', 'local_shell'> diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index 73c6b95b7..f3a97504f 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,13 +1,13 @@ -import { mcpTool as baseMcpTool } from '@tanstack/openai-base' +import { mcpTool as baseMcpTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { MCPToolConfig } from '@tanstack/openai-base' +import type { MCPToolConfig } from '@tanstack/openai-compatible' export { type MCPToolConfig, type MCPTool, validateMCPtool, convertMCPToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIMCPTool = ProviderTool<'openai', 'mcp'> diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 9f48503a4..05a254dcb 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,11 +1,11 @@ -import { shellTool as baseShellTool } from '@tanstack/openai-base' +import { shellTool as baseShellTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type ShellToolConfig, type ShellTool, convertShellToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIShellTool = ProviderTool<'openai', 'shell'> diff --git a/packages/typescript/ai-openai/src/tools/tool-choice.ts b/packages/typescript/ai-openai/src/tools/tool-choice.ts index 99df1824f..a682f8898 100644 --- a/packages/typescript/ai-openai/src/tools/tool-choice.ts +++ b/packages/typescript/ai-openai/src/tools/tool-choice.ts @@ -1 +1 @@ -export { type ToolChoice } from '@tanstack/openai-base' +export { type ToolChoice } from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/tool-converter.ts b/packages/typescript/ai-openai/src/tools/tool-converter.ts index 3d78a1b18..17b938053 100644 --- a/packages/typescript/ai-openai/src/tools/tool-converter.ts +++ b/packages/typescript/ai-openai/src/tools/tool-converter.ts @@ -1 +1 @@ -export { convertToolsToProviderFormat } from '@tanstack/openai-base' +export { convertToolsToProviderFormat } from '@tanstack/openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index b822bafbf..40b884a5d 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,12 +1,12 @@ -import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/openai-base' +import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchPreviewToolConfig } from '@tanstack/openai-base' +import type { WebSearchPreviewToolConfig } from '@tanstack/openai-compatible' export { type WebSearchPreviewToolConfig, type WebSearchPreviewTool, convertWebSearchPreviewToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIWebSearchPreviewTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index bdb39c944..8436c437c 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,12 +1,12 @@ -import { webSearchTool as baseWebSearchTool } from '@tanstack/openai-base' +import { webSearchTool as baseWebSearchTool } from '@tanstack/openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchToolConfig } from '@tanstack/openai-base' +import type { WebSearchToolConfig } from '@tanstack/openai-compatible' export { type WebSearchToolConfig, type WebSearchTool, convertWebSearchToolToAdapterFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' export type OpenAIWebSearchTool = ProviderTool<'openai', 'web_search'> diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index 97f1efe50..b4b46795f 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-base' +import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' export interface OpenAIClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-openai/src/utils/schema-converter.ts b/packages/typescript/ai-openai/src/utils/schema-converter.ts index fb9ee165e..518198ffb 100644 --- a/packages/typescript/ai-openai/src/utils/schema-converter.ts +++ b/packages/typescript/ai-openai/src/utils/schema-converter.ts @@ -1,5 +1,5 @@ import { transformNullsToUndefined } from '@tanstack/ai-utils' -import { makeStructuredOutputCompatible } from '@tanstack/openai-base' +import { makeStructuredOutputCompatible } from '@tanstack/openai-compatible' export { transformNullsToUndefined } diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 82bcf976c..633bf4430 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -45,7 +45,7 @@ "dependencies": { "@openrouter/sdk": "0.12.14", "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-base": "workspace:*" + "@tanstack/openai-compatible": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index f05a5f343..d937010cc 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -2,7 +2,7 @@ import { OpenRouter } from '@openrouter/sdk' import { OpenAICompatibleResponsesTextAdapter, convertFunctionToolToResponsesFormat, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' import { isWebSearchTool } from '../tools/web-search-tool' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' @@ -19,7 +19,7 @@ import type { ResponseStreamEvent, ResponsesFunctionTool, ResponsesResponse, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' import type { ContentPart, ModelMessage, TextOptions, Tool } from '@tanstack/ai' import type { ExternalResponsesProviderOptions } from '../text/responses-provider-options' import type { @@ -201,7 +201,7 @@ export class OpenRouterResponsesTextAdapter< | ResponsesRequest['input'] | undefined - // Reuse the openai-base function-tool converter. ResponsesFunctionTool + // Reuse the openai-compatible function-tool converter. ResponsesFunctionTool // already matches OpenRouter's ResponsesRequestToolFunction shape: // `{ type:'function', name, parameters, description, strict }`. const tools: Array | undefined = options.tools diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 39f408f48..1f374e5de 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -1,5 +1,5 @@ import { OpenRouter } from '@openrouter/sdk' -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' import { convertToolsToProviderFormat } from '../tools' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' @@ -8,7 +8,7 @@ import type { ChatCompletionChunk, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, -} from '@tanstack/openai-base' +} from '@tanstack/openai-compatible' import type { ChatContentItems, ChatMessages, diff --git a/packages/typescript/openai-base/CHANGELOG.md b/packages/typescript/openai-compatible/CHANGELOG.md similarity index 86% rename from packages/typescript/openai-base/CHANGELOG.md rename to packages/typescript/openai-compatible/CHANGELOG.md index dc37dc0f0..7d83e1661 100644 --- a/packages/typescript/openai-base/CHANGELOG.md +++ b/packages/typescript/openai-compatible/CHANGELOG.md @@ -1,4 +1,7 @@ -# @tanstack/openai-base +# @tanstack/openai-compatible + +> Renamed from `@tanstack/openai-base` in 0.3.0. See the [README](./README.md) for context. + ## 0.2.1 diff --git a/packages/typescript/openai-compatible/README.md b/packages/typescript/openai-compatible/README.md new file mode 100644 index 000000000..3419be682 --- /dev/null +++ b/packages/typescript/openai-compatible/README.md @@ -0,0 +1,70 @@ +# @tanstack/openai-compatible + +Shared protocol adapters for OpenAI-compatible providers in TanStack AI. + +> Renamed from `@tanstack/openai-base` in 0.3.0. The "base" name implied this package +> tracked OpenAI's product roadmap; in fact it implements two OpenAI-shaped *wire +> formats* that multiple providers ship — see below. + +## What this package is + +This package holds the shared implementation of the two OpenAI-compatible +wire-format protocols: + +- **Chat Completions** (`/v1/chat/completions`) — mature, natively implemented by + OpenAI, Groq, Grok, OpenRouter, vLLM, SGLang, Together, Ollama (compat layer), + and many others. +- **Responses** (`/v1/responses`) — newer; OpenAI's reference implementation plus + OpenRouter's beta routing implementation (which fans out to Anthropic, Google, + etc. under the hood). Younger protocol, fewer native implementers today. + +Both are exposed as abstract classes that providers subclass: + +- `OpenAICompatibleChatCompletionsTextAdapter` +- `OpenAICompatibleResponsesTextAdapter` + +Subclasses customize SDK-shape variance via a small set of protected hook +methods: `callChatCompletion`, `callChatCompletionStream`, `extractReasoning`, +`convertMessage`, `mapOptionsToRequest`, `transformStructuredOutput`, +`makeStructuredOutputCompatible`, `processStreamChunks` (and the equivalent set +on the Responses adapter). + +## What this package is not + +It is **not** the base for OpenAI's evolving product surface. OpenAI-specific +tools (e.g. `web_search_preview`, `code_interpreter`, `local_shell`), +OpenAI-only models, and OpenAI's product behaviors live in +[`@tanstack/ai-openai`](../ai-openai), not here. + +The distinction matters because it tells contributors where to add things: + +- Adding a field to a class in this package is a claim that the field is + supported by **multiple** OpenAI-compatible providers (not just OpenAI). + Otherwise it belongs as an override or extension in the provider's own + package. +- If OpenAI ships a new field that no other provider supports yet, it goes in + `@tanstack/ai-openai` and is plumbed into this base only once a second + provider has adopted it. + +## Architecture context + +Every text adapter in TanStack AI — regardless of provider — emits +[AG-UI](https://github.com/CopilotKit/ag-ui) events (`RUN_STARTED`, +`TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. That +is the *universal* unification. + +Input protocols are different. The OpenAI-compatible family (covered by this +package) has many implementers and warrants a shared base. Anthropic, Google +Gemini, and Ollama have single-provider input protocols and their adapters +extend `BaseTextAdapter` from `@tanstack/ai` directly — no compatible base +exists because no compatible family exists. + +## Direct use + +Most users don't import from this package directly; they install a provider +package (`@tanstack/ai-openai`, `@tanstack/ai-openrouter`, +`@tanstack/ai-groq`, `@tanstack/ai-grok`) which extends the bases here. + +If you're building a custom OpenAI-compatible provider adapter (e.g. for vLLM, +Together, Fireworks), you can extend the bases from this package directly. See +the existing providers as worked examples. diff --git a/packages/typescript/openai-base/package.json b/packages/typescript/openai-compatible/package.json similarity index 78% rename from packages/typescript/openai-base/package.json rename to packages/typescript/openai-compatible/package.json index 95c91b039..6ac972103 100644 --- a/packages/typescript/openai-base/package.json +++ b/packages/typescript/openai-compatible/package.json @@ -1,13 +1,13 @@ { - "name": "@tanstack/openai-base", + "name": "@tanstack/openai-compatible", "version": "0.2.1", - "description": "Shared base adapters and utilities for OpenAI-compatible providers in TanStack AI", + "description": "Shared protocol adapters and utilities for OpenAI-compatible providers in TanStack AI (Chat Completions and Responses wire formats)", "author": "", "license": "MIT", "repository": { "type": "git", "url": "git+https://github.com/TanStack/ai.git", - "directory": "packages/typescript/openai-base" + "directory": "packages/typescript/openai-compatible" }, "type": "module", "module": "./dist/esm/index.js", @@ -35,9 +35,10 @@ "keywords": [ "ai", "openai", + "openai-compatible", "tanstack", "adapter", - "base" + "protocol" ], "dependencies": { "@tanstack/ai-utils": "workspace:*", diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-compatible/src/adapters/chat-completions-text.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/chat-completions-text.ts rename to packages/typescript/openai-compatible/src/adapters/chat-completions-text.ts diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts b/packages/typescript/openai-compatible/src/adapters/chat-completions-tool-converter.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts rename to packages/typescript/openai-compatible/src/adapters/chat-completions-tool-converter.ts diff --git a/packages/typescript/openai-base/src/adapters/image.ts b/packages/typescript/openai-compatible/src/adapters/image.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/image.ts rename to packages/typescript/openai-compatible/src/adapters/image.ts diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-compatible/src/adapters/responses-text.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/responses-text.ts rename to packages/typescript/openai-compatible/src/adapters/responses-text.ts diff --git a/packages/typescript/openai-base/src/adapters/responses-tool-converter.ts b/packages/typescript/openai-compatible/src/adapters/responses-tool-converter.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/responses-tool-converter.ts rename to packages/typescript/openai-compatible/src/adapters/responses-tool-converter.ts diff --git a/packages/typescript/openai-base/src/adapters/summarize.ts b/packages/typescript/openai-compatible/src/adapters/summarize.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/summarize.ts rename to packages/typescript/openai-compatible/src/adapters/summarize.ts diff --git a/packages/typescript/openai-base/src/adapters/transcription.ts b/packages/typescript/openai-compatible/src/adapters/transcription.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/transcription.ts rename to packages/typescript/openai-compatible/src/adapters/transcription.ts diff --git a/packages/typescript/openai-base/src/adapters/tts.ts b/packages/typescript/openai-compatible/src/adapters/tts.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/tts.ts rename to packages/typescript/openai-compatible/src/adapters/tts.ts diff --git a/packages/typescript/openai-base/src/adapters/video.ts b/packages/typescript/openai-compatible/src/adapters/video.ts similarity index 100% rename from packages/typescript/openai-base/src/adapters/video.ts rename to packages/typescript/openai-compatible/src/adapters/video.ts diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-compatible/src/index.ts similarity index 100% rename from packages/typescript/openai-base/src/index.ts rename to packages/typescript/openai-compatible/src/index.ts diff --git a/packages/typescript/openai-base/src/tools/apply-patch-tool.ts b/packages/typescript/openai-compatible/src/tools/apply-patch-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/apply-patch-tool.ts rename to packages/typescript/openai-compatible/src/tools/apply-patch-tool.ts diff --git a/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts b/packages/typescript/openai-compatible/src/tools/code-interpreter-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/code-interpreter-tool.ts rename to packages/typescript/openai-compatible/src/tools/code-interpreter-tool.ts diff --git a/packages/typescript/openai-base/src/tools/computer-use-tool.ts b/packages/typescript/openai-compatible/src/tools/computer-use-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/computer-use-tool.ts rename to packages/typescript/openai-compatible/src/tools/computer-use-tool.ts diff --git a/packages/typescript/openai-base/src/tools/custom-tool.ts b/packages/typescript/openai-compatible/src/tools/custom-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/custom-tool.ts rename to packages/typescript/openai-compatible/src/tools/custom-tool.ts diff --git a/packages/typescript/openai-base/src/tools/file-search-tool.ts b/packages/typescript/openai-compatible/src/tools/file-search-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/file-search-tool.ts rename to packages/typescript/openai-compatible/src/tools/file-search-tool.ts diff --git a/packages/typescript/openai-base/src/tools/function-tool.ts b/packages/typescript/openai-compatible/src/tools/function-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/function-tool.ts rename to packages/typescript/openai-compatible/src/tools/function-tool.ts diff --git a/packages/typescript/openai-base/src/tools/image-generation-tool.ts b/packages/typescript/openai-compatible/src/tools/image-generation-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/image-generation-tool.ts rename to packages/typescript/openai-compatible/src/tools/image-generation-tool.ts diff --git a/packages/typescript/openai-base/src/tools/index.ts b/packages/typescript/openai-compatible/src/tools/index.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/index.ts rename to packages/typescript/openai-compatible/src/tools/index.ts diff --git a/packages/typescript/openai-base/src/tools/local-shell-tool.ts b/packages/typescript/openai-compatible/src/tools/local-shell-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/local-shell-tool.ts rename to packages/typescript/openai-compatible/src/tools/local-shell-tool.ts diff --git a/packages/typescript/openai-base/src/tools/mcp-tool.ts b/packages/typescript/openai-compatible/src/tools/mcp-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/mcp-tool.ts rename to packages/typescript/openai-compatible/src/tools/mcp-tool.ts diff --git a/packages/typescript/openai-base/src/tools/shell-tool.ts b/packages/typescript/openai-compatible/src/tools/shell-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/shell-tool.ts rename to packages/typescript/openai-compatible/src/tools/shell-tool.ts diff --git a/packages/typescript/openai-base/src/tools/tool-choice.ts b/packages/typescript/openai-compatible/src/tools/tool-choice.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/tool-choice.ts rename to packages/typescript/openai-compatible/src/tools/tool-choice.ts diff --git a/packages/typescript/openai-base/src/tools/tool-converter.ts b/packages/typescript/openai-compatible/src/tools/tool-converter.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/tool-converter.ts rename to packages/typescript/openai-compatible/src/tools/tool-converter.ts diff --git a/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts b/packages/typescript/openai-compatible/src/tools/web-search-preview-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/web-search-preview-tool.ts rename to packages/typescript/openai-compatible/src/tools/web-search-preview-tool.ts diff --git a/packages/typescript/openai-base/src/tools/web-search-tool.ts b/packages/typescript/openai-compatible/src/tools/web-search-tool.ts similarity index 100% rename from packages/typescript/openai-base/src/tools/web-search-tool.ts rename to packages/typescript/openai-compatible/src/tools/web-search-tool.ts diff --git a/packages/typescript/openai-base/src/types/config.ts b/packages/typescript/openai-compatible/src/types/config.ts similarity index 100% rename from packages/typescript/openai-base/src/types/config.ts rename to packages/typescript/openai-compatible/src/types/config.ts diff --git a/packages/typescript/openai-base/src/utils/client.ts b/packages/typescript/openai-compatible/src/utils/client.ts similarity index 100% rename from packages/typescript/openai-base/src/utils/client.ts rename to packages/typescript/openai-compatible/src/utils/client.ts diff --git a/packages/typescript/openai-base/src/utils/request-options.ts b/packages/typescript/openai-compatible/src/utils/request-options.ts similarity index 100% rename from packages/typescript/openai-base/src/utils/request-options.ts rename to packages/typescript/openai-compatible/src/utils/request-options.ts diff --git a/packages/typescript/openai-base/src/utils/schema-converter.ts b/packages/typescript/openai-compatible/src/utils/schema-converter.ts similarity index 100% rename from packages/typescript/openai-base/src/utils/schema-converter.ts rename to packages/typescript/openai-compatible/src/utils/schema-converter.ts diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-compatible/tests/chat-completions-text.test.ts similarity index 100% rename from packages/typescript/openai-base/tests/chat-completions-text.test.ts rename to packages/typescript/openai-compatible/tests/chat-completions-text.test.ts diff --git a/packages/typescript/openai-base/tests/mcp-tool.test.ts b/packages/typescript/openai-compatible/tests/mcp-tool.test.ts similarity index 100% rename from packages/typescript/openai-base/tests/mcp-tool.test.ts rename to packages/typescript/openai-compatible/tests/mcp-tool.test.ts diff --git a/packages/typescript/openai-base/tests/media-adapters.test.ts b/packages/typescript/openai-compatible/tests/media-adapters.test.ts similarity index 100% rename from packages/typescript/openai-base/tests/media-adapters.test.ts rename to packages/typescript/openai-compatible/tests/media-adapters.test.ts diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-compatible/tests/responses-text.test.ts similarity index 100% rename from packages/typescript/openai-base/tests/responses-text.test.ts rename to packages/typescript/openai-compatible/tests/responses-text.test.ts diff --git a/packages/typescript/openai-base/tests/schema-converter.test.ts b/packages/typescript/openai-compatible/tests/schema-converter.test.ts similarity index 100% rename from packages/typescript/openai-base/tests/schema-converter.test.ts rename to packages/typescript/openai-compatible/tests/schema-converter.test.ts diff --git a/packages/typescript/openai-base/tsconfig.json b/packages/typescript/openai-compatible/tsconfig.json similarity index 100% rename from packages/typescript/openai-base/tsconfig.json rename to packages/typescript/openai-compatible/tsconfig.json diff --git a/packages/typescript/openai-base/vite.config.ts b/packages/typescript/openai-compatible/vite.config.ts similarity index 100% rename from packages/typescript/openai-base/vite.config.ts rename to packages/typescript/openai-compatible/vite.config.ts diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 07a916d8b..4dad875dd 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -642,7 +642,7 @@ importers: version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitest: specifier: ^4.0.14 - version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) web-vitals: specifier: ^5.1.0 version: 5.1.0 @@ -944,7 +944,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) zod: specifier: ^4.2.0 version: 4.2.1 @@ -963,7 +963,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) zod: specifier: ^4.2.0 version: 4.2.1 @@ -982,7 +982,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1001,7 +1001,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) zod: specifier: ^4.2.0 version: 4.3.6 @@ -1029,7 +1029,7 @@ importers: version: link:../ai-openai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) commander: specifier: ^13.1.0 version: 13.1.0 @@ -1106,7 +1106,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1143,7 +1143,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) packages/typescript/ai-event-client: dependencies: @@ -1156,7 +1156,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) packages/typescript/ai-fal: dependencies: @@ -1172,7 +1172,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1191,7 +1191,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1204,9 +1204,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-base': + '@tanstack/openai-compatible': specifier: workspace:* - version: link:../openai-base + version: link:../openai-compatible zod: specifier: ^4.0.0 version: 4.3.6 @@ -1219,7 +1219,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1229,9 +1229,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-base': + '@tanstack/openai-compatible': specifier: workspace:* - version: link:../openai-base + version: link:../openai-compatible zod: specifier: ^4.0.0 version: 4.3.6 @@ -1241,7 +1241,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1257,7 +1257,7 @@ importers: version: 4.20260317.1 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) wrangler: specifier: ^4.88.0 version: 4.88.0(@cloudflare/workers-types@4.20260317.1) @@ -1273,7 +1273,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) packages/typescript/ai-isolate-quickjs: dependencies: @@ -1286,7 +1286,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) packages/typescript/ai-ollama: dependencies: @@ -1302,7 +1302,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1312,9 +1312,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-base': + '@tanstack/openai-compatible': specifier: workspace:* - version: link:../openai-base + version: link:../openai-compatible openai: specifier: ^6.9.1 version: 6.10.0(ws@8.19.0)(zod@4.2.1) @@ -1327,7 +1327,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1343,16 +1343,16 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-base': + '@tanstack/openai-compatible': specifier: workspace:* - version: link:../openai-base + version: link:../openai-compatible devDependencies: '@tanstack/ai': specifier: workspace:* version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1374,7 +1374,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1402,7 +1402,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1442,7 +1442,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) react: specifier: ^19.2.3 version: 19.2.3 @@ -1513,7 +1513,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -1541,7 +1541,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1565,7 +1565,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1630,7 +1630,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1641,7 +1641,7 @@ importers: specifier: ^2.2.10 version: 2.2.12(typescript@5.9.3) - packages/typescript/openai-base: + packages/typescript/openai-compatible: dependencies: '@tanstack/ai-utils': specifier: workspace:* @@ -1655,7 +1655,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1674,7 +1674,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1693,7 +1693,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) react: specifier: ^19.2.3 version: 19.2.3 @@ -1712,7 +1712,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.1.4) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -6757,26 +6757,12 @@ packages: '@vitest/browser': optional: true - '@vitest/expect@4.0.14': - resolution: {integrity: sha512-RHk63V3zvRiYOWAV0rGEBRO820ce17hz7cI2kDmEdfQsBjT2luEKB5tCOc91u1oSQoUOZkSv3ZyzkdkSLD7lKw==} - '@vitest/expect@4.0.15': resolution: {integrity: sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==} '@vitest/expect@4.1.4': resolution: {integrity: sha512-iPBpra+VDuXmBFI3FMKHSFXp3Gx5HfmSCE8X67Dn+bwephCnQCaB7qWK2ldHa+8ncN8hJU8VTMcxjPpyMkUjww==} - '@vitest/mocker@4.0.14': - resolution: {integrity: sha512-RzS5NujlCzeRPF1MK7MXLiEFpkIXeMdQ+rN3Kk3tDI9j0mtbr7Nmuq67tpkOJQpgyClbOltCXMjLZicJHsH5Cg==} - peerDependencies: - msw: ^2.4.9 - vite: ^6.0.0 || ^7.0.0-0 - peerDependenciesMeta: - msw: - optional: true - vite: - optional: true - '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} peerDependencies: @@ -6808,27 +6794,18 @@ packages: '@vitest/pretty-format@4.1.4': resolution: {integrity: sha512-ddmDHU0gjEUyEVLxtZa7xamrpIefdEETu3nZjWtHeZX4QxqJ7tRxSteHVXJOcr8jhiLoGAhkK4WJ3WqBpjx42A==} - '@vitest/runner@4.0.14': - resolution: {integrity: sha512-BsAIk3FAqxICqREbX8SetIteT8PiaUL/tgJjmhxJhCsigmzzH8xeadtp7LRnTpCVzvf0ib9BgAfKJHuhNllKLw==} - '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} '@vitest/runner@4.1.4': resolution: {integrity: sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==} - '@vitest/snapshot@4.0.14': - resolution: {integrity: sha512-aQVBfT1PMzDSA16Y3Fp45a0q8nKexx6N5Amw3MX55BeTeZpoC08fGqEZqVmPcqN0ueZsuUQ9rriPMhZ3Mu19Ag==} - '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} '@vitest/snapshot@4.1.4': resolution: {integrity: sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==} - '@vitest/spy@4.0.14': - resolution: {integrity: sha512-JmAZT1UtZooO0tpY3GRyiC/8W7dCs05UOq9rfsUUgEZEdq+DuHLmWhPsrTt0TiW7WYeL/hXpaE07AZ2RCk44hg==} - '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} @@ -11852,40 +11829,6 @@ packages: vite: optional: true - vitest@4.0.14: - resolution: {integrity: sha512-d9B2J9Cm9dN9+6nxMnnNJKJCtcyKfnHj15N6YNJfaFHRLua/d3sRKU9RuKmO9mB0XdFtUizlxfz/VPbd3OxGhw==} - engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} - hasBin: true - peerDependencies: - '@edge-runtime/vm': '*' - '@opentelemetry/api': ^1.9.0 - '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.14 - '@vitest/browser-preview': 4.0.14 - '@vitest/browser-webdriverio': 4.0.14 - '@vitest/ui': 4.0.14 - happy-dom: '*' - jsdom: '*' - peerDependenciesMeta: - '@edge-runtime/vm': - optional: true - '@opentelemetry/api': - optional: true - '@types/node': - optional: true - '@vitest/browser-playwright': - optional: true - '@vitest/browser-preview': - optional: true - '@vitest/browser-webdriverio': - optional: true - '@vitest/ui': - optional: true - happy-dom: - optional: true - jsdom: - optional: true - vitest@4.0.15: resolution: {integrity: sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} @@ -17349,7 +17292,7 @@ snapshots: vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vue: 3.5.25(typescript@5.9.3) - '@vitest/coverage-v8@4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17362,11 +17305,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.1.4)': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17379,19 +17322,10 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - supports-color - '@vitest/expect@4.0.14': - dependencies: - '@standard-schema/spec': 1.1.0 - '@types/chai': 5.2.3 - '@vitest/spy': 4.0.14 - '@vitest/utils': 4.0.14 - chai: 6.2.2 - tinyrainbow: 3.1.0 - '@vitest/expect@4.0.15': dependencies: '@standard-schema/spec': 1.1.0 @@ -17410,14 +17344,6 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.1.0 - '@vitest/mocker@4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': - dependencies: - '@vitest/spy': 4.0.14 - estree-walker: 3.0.3 - magic-string: 0.30.21 - optionalDependencies: - vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.15 @@ -17446,11 +17372,6 @@ snapshots: dependencies: tinyrainbow: 3.1.0 - '@vitest/runner@4.0.14': - dependencies: - '@vitest/utils': 4.0.14 - pathe: 2.0.3 - '@vitest/runner@4.0.15': dependencies: '@vitest/utils': 4.0.15 @@ -17461,12 +17382,6 @@ snapshots: '@vitest/utils': 4.1.4 pathe: 2.0.3 - '@vitest/snapshot@4.0.14': - dependencies: - '@vitest/pretty-format': 4.0.14 - magic-string: 0.30.21 - pathe: 2.0.3 - '@vitest/snapshot@4.0.15': dependencies: '@vitest/pretty-format': 4.0.15 @@ -17480,8 +17395,6 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 - '@vitest/spy@4.0.14': {} - '@vitest/spy@4.0.15': {} '@vitest/spy@4.1.4': {} @@ -23581,26 +23494,26 @@ snapshots: optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.14 - '@vitest/mocker': 4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.14 - '@vitest/runner': 4.0.14 - '@vitest/snapshot': 4.0.14 - '@vitest/spy': 4.0.14 - '@vitest/utils': 4.0.14 + '@vitest/expect': 4.0.15 + '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.15 + '@vitest/runner': 4.0.15 + '@vitest/snapshot': 4.0.15 + '@vitest/spy': 4.0.15 + '@vitest/utils': 4.0.15 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.4 + picomatch: 4.0.3 std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 0.3.2 - tinyglobby: 0.2.16 - tinyrainbow: 3.1.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: @@ -23621,45 +23534,36 @@ snapshots: - tsx - yaml - vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): dependencies: - '@vitest/expect': 4.0.15 - '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.15 - '@vitest/runner': 4.0.15 - '@vitest/snapshot': 4.0.15 - '@vitest/spy': 4.0.15 - '@vitest/utils': 4.0.15 - es-module-lexer: 1.7.0 + '@vitest/expect': 4.1.4 + '@vitest/mocker': 4.1.4(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.1.4 + '@vitest/runner': 4.1.4 + '@vitest/snapshot': 4.1.4 + '@vitest/spy': 4.1.4 + '@vitest/utils': 4.1.4 + es-module-lexer: 2.0.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.3 - std-env: 3.10.0 + picomatch: 4.0.4 + std-env: 4.0.0 tinybench: 2.9.0 - tinyexec: 1.0.2 - tinyglobby: 0.2.15 - tinyrainbow: 3.0.3 + tinyexec: 1.1.1 + tinyglobby: 0.2.16 + tinyrainbow: 3.1.0 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.1 '@types/node': 24.10.3 + '@vitest/coverage-v8': 4.0.14(vitest@4.1.4) happy-dom: 20.0.11 jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: - - jiti - - less - - lightningcss - msw - - sass - - sass-embedded - - stylus - - sugarss - - terser - - tsx - - yaml vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): dependencies: @@ -23690,6 +23594,7 @@ snapshots: jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: - msw + optional: true vscode-uri@3.1.0: {} From 091daa6fb63b06cef66b36560590d1f6af445eae Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 00:34:56 +0000 Subject: [PATCH 24/49] ci: apply automated fixes --- .changeset/rename-openai-base-to-openai-compatible.md | 2 +- packages/typescript/openai-compatible/CHANGELOG.md | 1 - packages/typescript/openai-compatible/README.md | 6 +++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.changeset/rename-openai-base-to-openai-compatible.md b/.changeset/rename-openai-base-to-openai-compatible.md index d149f5283..9d4e82b18 100644 --- a/.changeset/rename-openai-base-to-openai-compatible.md +++ b/.changeset/rename-openai-base-to-openai-compatible.md @@ -8,7 +8,7 @@ Rename `@tanstack/openai-base` → `@tanstack/openai-compatible`. -The previous "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped *wire-format protocols* that multiple providers ship: +The previous "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped _wire-format protocols_ that multiple providers ship: - **Chat Completions** (`/v1/chat/completions`) — natively implemented by OpenAI, Groq, Grok, OpenRouter, vLLM, SGLang, Together, etc. - **Responses** (`/v1/responses`) — OpenAI's reference implementation plus OpenRouter's beta routing implementation (which fans out to Anthropic, Google, and other underlying models). diff --git a/packages/typescript/openai-compatible/CHANGELOG.md b/packages/typescript/openai-compatible/CHANGELOG.md index 7d83e1661..74402c006 100644 --- a/packages/typescript/openai-compatible/CHANGELOG.md +++ b/packages/typescript/openai-compatible/CHANGELOG.md @@ -2,7 +2,6 @@ > Renamed from `@tanstack/openai-base` in 0.3.0. See the [README](./README.md) for context. - ## 0.2.1 ### Patch Changes diff --git a/packages/typescript/openai-compatible/README.md b/packages/typescript/openai-compatible/README.md index 3419be682..96d0f8e85 100644 --- a/packages/typescript/openai-compatible/README.md +++ b/packages/typescript/openai-compatible/README.md @@ -3,8 +3,8 @@ Shared protocol adapters for OpenAI-compatible providers in TanStack AI. > Renamed from `@tanstack/openai-base` in 0.3.0. The "base" name implied this package -> tracked OpenAI's product roadmap; in fact it implements two OpenAI-shaped *wire -> formats* that multiple providers ship — see below. +> tracked OpenAI's product roadmap; in fact it implements two OpenAI-shaped _wire +> formats_ that multiple providers ship — see below. ## What this package is @@ -51,7 +51,7 @@ The distinction matters because it tells contributors where to add things: Every text adapter in TanStack AI — regardless of provider — emits [AG-UI](https://github.com/CopilotKit/ag-ui) events (`RUN_STARTED`, `TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. That -is the *universal* unification. +is the _universal_ unification. Input protocols are different. The OpenAI-compatible family (covered by this package) has many implementers and warrants a shared base. Anthropic, Google From e90c8d9399d05ebc08bf9264fc135c3a61bdbb75 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 10:45:11 +1000 Subject: [PATCH 25/49] =?UTF-8?q?refactor:=20rename=20@tanstack/openai-com?= =?UTF-8?q?patible=20=E2=86=92=20@tanstack/ai-openai-compatible?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Match the `ai-*` prefix convention used by every other package in the AI subnamespace (ai-utils, ai-openai, ai-anthropic, ai-client, ai-react, …). `@tanstack/` is a flat namespace shared across all TanStack products (Query, Router, Table, Form, AI, …), so `@tanstack/openai-compatible` alone gives no signal about which TanStack product it belongs to. Pure rename of the rename in 06d3d8c9; no behavior change. Directory `packages/typescript/openai-compatible` → `packages/typescript/ai-openai-compatible`, package.json `name` field, consumer dependency declarations, TypeScript imports, README, CHANGELOG header, both changesets all updated. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../migrate-groq-openrouter-to-openai-base.md | 6 +- ...me-openai-base-to-ai-openai-compatible.md} | 6 +- packages/typescript/ai-grok/package.json | 4 +- .../typescript/ai-grok/src/adapters/image.ts | 2 +- .../ai-grok/src/adapters/summarize.ts | 4 +- .../typescript/ai-grok/src/adapters/text.ts | 4 +- .../typescript/ai-grok/src/tools/index.ts | 2 +- .../typescript/ai-grok/src/utils/client.ts | 2 +- .../ai-grok/src/utils/schema-converter.ts | 2 +- packages/typescript/ai-groq/package.json | 4 +- .../typescript/ai-groq/src/adapters/text.ts | 4 +- .../typescript/ai-groq/src/utils/client.ts | 2 +- .../ai-groq/src/utils/schema-converter.ts | 6 +- .../ai-groq/tests/groq-adapter.test.ts | 2 +- .../ai-groq/tests/schema-converter.test.ts | 4 +- .../CHANGELOG.md | 2 +- .../README.md | 2 +- .../package.json | 4 +- .../src/adapters/chat-completions-text.ts | 0 .../chat-completions-tool-converter.ts | 0 .../src/adapters/image.ts | 0 .../src/adapters/responses-text.ts | 0 .../src/adapters/responses-tool-converter.ts | 0 .../src/adapters/summarize.ts | 0 .../src/adapters/transcription.ts | 0 .../src/adapters/tts.ts | 0 .../src/adapters/video.ts | 0 .../src/index.ts | 0 .../src/tools/apply-patch-tool.ts | 0 .../src/tools/code-interpreter-tool.ts | 0 .../src/tools/computer-use-tool.ts | 0 .../src/tools/custom-tool.ts | 0 .../src/tools/file-search-tool.ts | 0 .../src/tools/function-tool.ts | 0 .../src/tools/image-generation-tool.ts | 0 .../src/tools/index.ts | 0 .../src/tools/local-shell-tool.ts | 0 .../src/tools/mcp-tool.ts | 0 .../src/tools/shell-tool.ts | 0 .../src/tools/tool-choice.ts | 0 .../src/tools/tool-converter.ts | 0 .../src/tools/web-search-preview-tool.ts | 0 .../src/tools/web-search-tool.ts | 0 .../src/types/config.ts | 0 .../src/utils/client.ts | 0 .../src/utils/request-options.ts | 0 .../src/utils/schema-converter.ts | 0 .../tests/chat-completions-text.test.ts | 0 .../tests/mcp-tool.test.ts | 0 .../tests/media-adapters.test.ts | 0 .../tests/responses-text.test.ts | 0 .../tests/schema-converter.test.ts | 0 .../tsconfig.json | 0 .../vite.config.ts | 0 packages/typescript/ai-openai/package.json | 2 +- .../ai-openai/src/adapters/image.ts | 2 +- .../ai-openai/src/adapters/summarize.ts | 2 +- .../typescript/ai-openai/src/adapters/text.ts | 4 +- .../ai-openai/src/adapters/transcription.ts | 2 +- .../typescript/ai-openai/src/adapters/tts.ts | 2 +- .../ai-openai/src/adapters/video.ts | 2 +- .../ai-openai/src/tools/apply-patch-tool.ts | 4 +- .../src/tools/code-interpreter-tool.ts | 6 +- .../ai-openai/src/tools/computer-use-tool.ts | 6 +- .../ai-openai/src/tools/custom-tool.ts | 2 +- .../ai-openai/src/tools/file-search-tool.ts | 6 +- .../ai-openai/src/tools/function-tool.ts | 2 +- .../src/tools/image-generation-tool.ts | 6 +- .../typescript/ai-openai/src/tools/index.ts | 2 +- .../ai-openai/src/tools/local-shell-tool.ts | 4 +- .../ai-openai/src/tools/mcp-tool.ts | 6 +- .../ai-openai/src/tools/shell-tool.ts | 4 +- .../ai-openai/src/tools/tool-choice.ts | 2 +- .../ai-openai/src/tools/tool-converter.ts | 2 +- .../src/tools/web-search-preview-tool.ts | 6 +- .../ai-openai/src/tools/web-search-tool.ts | 6 +- .../typescript/ai-openai/src/utils/client.ts | 2 +- .../ai-openai/src/utils/schema-converter.ts | 2 +- .../typescript/ai-openrouter/package.json | 4 +- .../src/adapters/responses-text.ts | 6 +- .../ai-openrouter/src/adapters/text.ts | 4 +- pnpm-lock.yaml | 279 ++++++++++++------ 82 files changed, 267 insertions(+), 172 deletions(-) rename .changeset/{rename-openai-base-to-openai-compatible.md => rename-openai-base-to-ai-openai-compatible.md} (92%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/CHANGELOG.md (96%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/README.md (98%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/package.json (92%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/chat-completions-text.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/chat-completions-tool-converter.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/image.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/responses-text.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/responses-tool-converter.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/summarize.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/transcription.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/tts.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/adapters/video.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/index.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/apply-patch-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/code-interpreter-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/computer-use-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/custom-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/file-search-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/function-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/image-generation-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/index.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/local-shell-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/mcp-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/shell-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/tool-choice.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/tool-converter.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/web-search-preview-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/tools/web-search-tool.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/types/config.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/utils/client.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/utils/request-options.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/src/utils/schema-converter.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tests/chat-completions-text.test.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tests/mcp-tool.test.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tests/media-adapters.test.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tests/responses-text.test.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tests/schema-converter.test.ts (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/tsconfig.json (100%) rename packages/typescript/{openai-compatible => ai-openai-compatible}/vite.config.ts (100%) diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md index 255ad5068..9b6fad972 100644 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -1,5 +1,5 @@ --- -'@tanstack/openai-compatible': minor +'@tanstack/ai-openai-compatible': minor '@tanstack/ai-groq': patch '@tanstack/ai-openrouter': patch '@tanstack/ai': patch @@ -7,9 +7,9 @@ Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. -`@tanstack/openai-compatible` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. +`@tanstack/ai-openai-compatible` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. -`@tanstack/openai-compatible` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. +`@tanstack/ai-openai-compatible` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. `@tanstack/ai` normalizes abort-shaped errors (`AbortError`, `APIUserAbortError`, `RequestAbortedError`) to a stable `{ message: 'Request aborted', code: 'aborted' }` payload in `toRunErrorPayload`, so consumers can discriminate user-initiated cancellation from other failures without matching on provider-specific message strings. diff --git a/.changeset/rename-openai-base-to-openai-compatible.md b/.changeset/rename-openai-base-to-ai-openai-compatible.md similarity index 92% rename from .changeset/rename-openai-base-to-openai-compatible.md rename to .changeset/rename-openai-base-to-ai-openai-compatible.md index 9d4e82b18..851100706 100644 --- a/.changeset/rename-openai-base-to-openai-compatible.md +++ b/.changeset/rename-openai-base-to-ai-openai-compatible.md @@ -1,12 +1,12 @@ --- -'@tanstack/openai-compatible': minor +'@tanstack/ai-openai-compatible': minor '@tanstack/ai-openai': patch '@tanstack/ai-openrouter': patch '@tanstack/ai-groq': patch '@tanstack/ai-grok': patch --- -Rename `@tanstack/openai-base` → `@tanstack/openai-compatible`. +Rename `@tanstack/openai-base` → `@tanstack/ai-openai-compatible`. The previous "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped _wire-format protocols_ that multiple providers ship: @@ -21,7 +21,7 @@ If you were importing from `@tanstack/openai-base` directly (uncommon — the pa ```diff - import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' -+ import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' ++ import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' ``` `@tanstack/openai-base@0.2.x` remains published on npm for anyone with a pinned lockfile reference but will receive no further updates. diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index b76380441..c6713b434 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -44,8 +44,8 @@ "adapter" ], "dependencies": { - "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-compatible": "workspace:*" + "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/ai-utils": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 5427f3973..2e5ad1c3a 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleImageAdapter } from '@tanstack/ai-openai-compatible' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import { validateImageSize, diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 43ed2c7d4..8e72fc44d 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,7 +1,7 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/ai-openai-compatible' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' -import type { ChatStreamCapable } from '@tanstack/openai-compatible' +import type { ChatStreamCapable } from '@tanstack/ai-openai-compatible' import type { GROK_CHAT_MODELS } from '../model-meta' import type { GrokClientConfig } from '../utils' diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index e29c4a637..767c2c18c 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import type { GROK_CHAT_MODELS, @@ -35,7 +35,7 @@ export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../ * Uses OpenAI-compatible Chat Completions API (not Responses API). * * Delegates implementation to {@link OpenAICompatibleChatCompletionsTextAdapter} - * from `@tanstack/openai-compatible` and threads Grok-specific tool-capability typing + * from `@tanstack/ai-openai-compatible` and threads Grok-specific tool-capability typing * through the 5th generic of the base class. */ export class GrokTextAdapter< diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts index 2d134228d..6f39deef9 100644 --- a/packages/typescript/ai-grok/src/tools/index.ts +++ b/packages/typescript/ai-grok/src/tools/index.ts @@ -2,4 +2,4 @@ export { type ChatCompletionFunctionTool as FunctionTool, convertFunctionToolToChatCompletionsFormat as convertFunctionToolToAdapterFormat, convertToolsToChatCompletionsFormat as convertToolsToProviderFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts index 9fbe11e8f..650505b85 100644 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' +import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' export interface GrokClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts index b0b85bb1a..0d1c7aa5b 100644 --- a/packages/typescript/ai-grok/src/utils/schema-converter.ts +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -1,2 +1,2 @@ export { transformNullsToUndefined } from '@tanstack/ai-utils' -export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/openai-compatible' +export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index c623da3f9..5be447803 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -52,7 +52,7 @@ "zod": "^4.0.0" }, "dependencies": { - "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-compatible": "workspace:*" + "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/ai-utils": "workspace:*" } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 2ba00e399..a0ff1adc6 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -1,8 +1,8 @@ -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' import { getGroqApiKeyFromEnv, withGroqDefaults } from '../utils/client' import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' import type { Modality, TextOptions } from '@tanstack/ai' -import type { ChatCompletionChunk } from '@tanstack/openai-compatible' +import type { ChatCompletionChunk } from '@tanstack/ai-openai-compatible' import type { GROQ_CHAT_MODELS, GroqChatModelToolCapabilitiesByName, diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index 5318fb04d..eaaadc64f 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' +import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' export interface GroqClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 02e53221a..814e05303 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -1,4 +1,4 @@ -import { makeStructuredOutputCompatible } from '@tanstack/openai-compatible' +import { makeStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' import { transformNullsToUndefined } from '@tanstack/ai-utils' export { transformNullsToUndefined } @@ -62,7 +62,7 @@ function removeEmptyRequired(schema: Record): Record { /** * Recursively normalise object schemas so any `{ type: 'object' }` node * without `properties` gets an empty `properties: {}` object. The - * openai-compatible transformer only descends into objects that already have + * ai-openai-compatible transformer only descends into objects that already have * `properties` set, so a Zod `z.object({})` nested inside `properties`, * `items`, `additionalProperties`, or a combinator branch would otherwise * skip the strict-mode rewrite and fail Groq validation. @@ -140,7 +140,7 @@ export function makeGroqStructuredOutputCompatible( schema: Record, originalRequired: Array = [], ): Record { - // Recursively patch every `{ type: 'object' }` node so the openai-compatible + // Recursively patch every `{ type: 'object' }` node so the ai-openai-compatible // transformer descends into nested empty objects too. const normalised = normalizeObjectSchemas(schema) diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index 99f38b769..c058d621f 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -20,7 +20,7 @@ const testLogger = resolveDebugOption(false) // Stub the OpenAI SDK so adapter construction doesn't open a real network // handle. The per-test mock client is injected post-construction via // `setupMockSdkClient` (mirrors the ai-grok pattern). We avoid relying on -// vi.mock to intercept transitive openai imports — the built openai-compatible +// vi.mock to intercept transitive openai imports — the built ai-openai-compatible // dist resolves `openai` independently and is unaffected by vi.mock here. vi.mock('openai', () => { return { diff --git a/packages/typescript/ai-groq/tests/schema-converter.test.ts b/packages/typescript/ai-groq/tests/schema-converter.test.ts index 40ce8f004..38a74021a 100644 --- a/packages/typescript/ai-groq/tests/schema-converter.test.ts +++ b/packages/typescript/ai-groq/tests/schema-converter.test.ts @@ -65,7 +65,7 @@ describe('makeGroqStructuredOutputCompatible', () => { it('should normalise nested empty-object schemas in properties', () => { // Reproduces the bug where a nested `{ type: 'object' }` without - // `properties` slipped past the openai-compatible transformer because the + // `properties` slipped past the ai-openai-compatible transformer because the // ai-groq layer only normalised the top-level node. const schema = { type: 'object', @@ -79,7 +79,7 @@ describe('makeGroqStructuredOutputCompatible', () => { expect(result.properties.child.type).toBe('object') expect(result.properties.child.properties).toEqual({}) - // openai-compatible sets additionalProperties: false on every rewritten object + // ai-openai-compatible sets additionalProperties: false on every rewritten object expect(result.properties.child.additionalProperties).toBe(false) }) diff --git a/packages/typescript/openai-compatible/CHANGELOG.md b/packages/typescript/ai-openai-compatible/CHANGELOG.md similarity index 96% rename from packages/typescript/openai-compatible/CHANGELOG.md rename to packages/typescript/ai-openai-compatible/CHANGELOG.md index 74402c006..2350bbf06 100644 --- a/packages/typescript/openai-compatible/CHANGELOG.md +++ b/packages/typescript/ai-openai-compatible/CHANGELOG.md @@ -1,4 +1,4 @@ -# @tanstack/openai-compatible +# @tanstack/ai-openai-compatible > Renamed from `@tanstack/openai-base` in 0.3.0. See the [README](./README.md) for context. diff --git a/packages/typescript/openai-compatible/README.md b/packages/typescript/ai-openai-compatible/README.md similarity index 98% rename from packages/typescript/openai-compatible/README.md rename to packages/typescript/ai-openai-compatible/README.md index 96d0f8e85..13aac0a99 100644 --- a/packages/typescript/openai-compatible/README.md +++ b/packages/typescript/ai-openai-compatible/README.md @@ -1,4 +1,4 @@ -# @tanstack/openai-compatible +# @tanstack/ai-openai-compatible Shared protocol adapters for OpenAI-compatible providers in TanStack AI. diff --git a/packages/typescript/openai-compatible/package.json b/packages/typescript/ai-openai-compatible/package.json similarity index 92% rename from packages/typescript/openai-compatible/package.json rename to packages/typescript/ai-openai-compatible/package.json index 6ac972103..f0e3051cf 100644 --- a/packages/typescript/openai-compatible/package.json +++ b/packages/typescript/ai-openai-compatible/package.json @@ -1,5 +1,5 @@ { - "name": "@tanstack/openai-compatible", + "name": "@tanstack/ai-openai-compatible", "version": "0.2.1", "description": "Shared protocol adapters and utilities for OpenAI-compatible providers in TanStack AI (Chat Completions and Responses wire formats)", "author": "", @@ -7,7 +7,7 @@ "repository": { "type": "git", "url": "git+https://github.com/TanStack/ai.git", - "directory": "packages/typescript/openai-compatible" + "directory": "packages/typescript/ai-openai-compatible" }, "type": "module", "module": "./dist/esm/index.js", diff --git a/packages/typescript/openai-compatible/src/adapters/chat-completions-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/chat-completions-text.ts rename to packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts diff --git a/packages/typescript/openai-compatible/src/adapters/chat-completions-tool-converter.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/chat-completions-tool-converter.ts rename to packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts diff --git a/packages/typescript/openai-compatible/src/adapters/image.ts b/packages/typescript/ai-openai-compatible/src/adapters/image.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/image.ts rename to packages/typescript/ai-openai-compatible/src/adapters/image.ts diff --git a/packages/typescript/openai-compatible/src/adapters/responses-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/responses-text.ts rename to packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts diff --git a/packages/typescript/openai-compatible/src/adapters/responses-tool-converter.ts b/packages/typescript/ai-openai-compatible/src/adapters/responses-tool-converter.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/responses-tool-converter.ts rename to packages/typescript/ai-openai-compatible/src/adapters/responses-tool-converter.ts diff --git a/packages/typescript/openai-compatible/src/adapters/summarize.ts b/packages/typescript/ai-openai-compatible/src/adapters/summarize.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/summarize.ts rename to packages/typescript/ai-openai-compatible/src/adapters/summarize.ts diff --git a/packages/typescript/openai-compatible/src/adapters/transcription.ts b/packages/typescript/ai-openai-compatible/src/adapters/transcription.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/transcription.ts rename to packages/typescript/ai-openai-compatible/src/adapters/transcription.ts diff --git a/packages/typescript/openai-compatible/src/adapters/tts.ts b/packages/typescript/ai-openai-compatible/src/adapters/tts.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/tts.ts rename to packages/typescript/ai-openai-compatible/src/adapters/tts.ts diff --git a/packages/typescript/openai-compatible/src/adapters/video.ts b/packages/typescript/ai-openai-compatible/src/adapters/video.ts similarity index 100% rename from packages/typescript/openai-compatible/src/adapters/video.ts rename to packages/typescript/ai-openai-compatible/src/adapters/video.ts diff --git a/packages/typescript/openai-compatible/src/index.ts b/packages/typescript/ai-openai-compatible/src/index.ts similarity index 100% rename from packages/typescript/openai-compatible/src/index.ts rename to packages/typescript/ai-openai-compatible/src/index.ts diff --git a/packages/typescript/openai-compatible/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/apply-patch-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/code-interpreter-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/computer-use-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/custom-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/custom-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/file-search-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/file-search-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/function-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/function-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/function-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/image-generation-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/index.ts b/packages/typescript/ai-openai-compatible/src/tools/index.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/index.ts rename to packages/typescript/ai-openai-compatible/src/tools/index.ts diff --git a/packages/typescript/openai-compatible/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/local-shell-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/mcp-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/mcp-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/shell-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/shell-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/tool-choice.ts b/packages/typescript/ai-openai-compatible/src/tools/tool-choice.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/tool-choice.ts rename to packages/typescript/ai-openai-compatible/src/tools/tool-choice.ts diff --git a/packages/typescript/openai-compatible/src/tools/tool-converter.ts b/packages/typescript/ai-openai-compatible/src/tools/tool-converter.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/tool-converter.ts rename to packages/typescript/ai-openai-compatible/src/tools/tool-converter.ts diff --git a/packages/typescript/openai-compatible/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/web-search-preview-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts diff --git a/packages/typescript/openai-compatible/src/tools/web-search-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts similarity index 100% rename from packages/typescript/openai-compatible/src/tools/web-search-tool.ts rename to packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts diff --git a/packages/typescript/openai-compatible/src/types/config.ts b/packages/typescript/ai-openai-compatible/src/types/config.ts similarity index 100% rename from packages/typescript/openai-compatible/src/types/config.ts rename to packages/typescript/ai-openai-compatible/src/types/config.ts diff --git a/packages/typescript/openai-compatible/src/utils/client.ts b/packages/typescript/ai-openai-compatible/src/utils/client.ts similarity index 100% rename from packages/typescript/openai-compatible/src/utils/client.ts rename to packages/typescript/ai-openai-compatible/src/utils/client.ts diff --git a/packages/typescript/openai-compatible/src/utils/request-options.ts b/packages/typescript/ai-openai-compatible/src/utils/request-options.ts similarity index 100% rename from packages/typescript/openai-compatible/src/utils/request-options.ts rename to packages/typescript/ai-openai-compatible/src/utils/request-options.ts diff --git a/packages/typescript/openai-compatible/src/utils/schema-converter.ts b/packages/typescript/ai-openai-compatible/src/utils/schema-converter.ts similarity index 100% rename from packages/typescript/openai-compatible/src/utils/schema-converter.ts rename to packages/typescript/ai-openai-compatible/src/utils/schema-converter.ts diff --git a/packages/typescript/openai-compatible/tests/chat-completions-text.test.ts b/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts similarity index 100% rename from packages/typescript/openai-compatible/tests/chat-completions-text.test.ts rename to packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts diff --git a/packages/typescript/openai-compatible/tests/mcp-tool.test.ts b/packages/typescript/ai-openai-compatible/tests/mcp-tool.test.ts similarity index 100% rename from packages/typescript/openai-compatible/tests/mcp-tool.test.ts rename to packages/typescript/ai-openai-compatible/tests/mcp-tool.test.ts diff --git a/packages/typescript/openai-compatible/tests/media-adapters.test.ts b/packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts similarity index 100% rename from packages/typescript/openai-compatible/tests/media-adapters.test.ts rename to packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts diff --git a/packages/typescript/openai-compatible/tests/responses-text.test.ts b/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts similarity index 100% rename from packages/typescript/openai-compatible/tests/responses-text.test.ts rename to packages/typescript/ai-openai-compatible/tests/responses-text.test.ts diff --git a/packages/typescript/openai-compatible/tests/schema-converter.test.ts b/packages/typescript/ai-openai-compatible/tests/schema-converter.test.ts similarity index 100% rename from packages/typescript/openai-compatible/tests/schema-converter.test.ts rename to packages/typescript/ai-openai-compatible/tests/schema-converter.test.ts diff --git a/packages/typescript/openai-compatible/tsconfig.json b/packages/typescript/ai-openai-compatible/tsconfig.json similarity index 100% rename from packages/typescript/openai-compatible/tsconfig.json rename to packages/typescript/ai-openai-compatible/tsconfig.json diff --git a/packages/typescript/openai-compatible/vite.config.ts b/packages/typescript/ai-openai-compatible/vite.config.ts similarity index 100% rename from packages/typescript/openai-compatible/vite.config.ts rename to packages/typescript/ai-openai-compatible/vite.config.ts diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index 02ad11c55..d0ccb241b 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -44,8 +44,8 @@ "adapter" ], "dependencies": { + "@tanstack/ai-openai-compatible": "workspace:*", "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-compatible": "workspace:*", "openai": "^6.9.1" }, "peerDependencies": { diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index fc8e96b63..293637b91 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleImageAdapter } from '@tanstack/ai-openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateImageSize, diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index f37da1c97..ad1f6c7a3 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleSummarizeAdapter } from '@tanstack/ai-openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' import type { OpenAIChatModel } from '../model-meta' diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index d0b65a9ab..9646803d2 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/ai-openai-compatible' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' import { getOpenAIApiKeyFromEnv } from '../utils/client' @@ -68,7 +68,7 @@ type ResolveToolCapabilities = * * Tree-shakeable adapter for OpenAI chat/text completion functionality. * Delegates implementation to {@link OpenAICompatibleResponsesTextAdapter} from - * `@tanstack/openai-compatible` and threads OpenAI-specific tool-capability typing + * `@tanstack/ai-openai-compatible` and threads OpenAI-specific tool-capability typing * through the 5th generic of the base class. */ export class OpenAITextAdapter< diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 0c198bbc4..ba54c0e08 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/ai-openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import type { OpenAITranscriptionModel } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index da93b109e..dfb77bddc 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleTTSAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleTTSAdapter } from '@tanstack/ai-openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateAudioInput, diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index b22d47b75..3bc193e41 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -1,4 +1,4 @@ -import { OpenAICompatibleVideoAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleVideoAdapter } from '@tanstack/ai-openai-compatible' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { toApiSeconds, diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index 0e0967dce..0cf7ee8c8 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,11 +1,11 @@ -import { applyPatchTool as baseApplyPatchTool } from '@tanstack/openai-compatible' +import { applyPatchTool as baseApplyPatchTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type ApplyPatchToolConfig, type ApplyPatchTool, convertApplyPatchToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIApplyPatchTool = ProviderTool<'openai', 'apply_patch'> diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index 910a49289..f4d0cd5f0 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,12 +1,12 @@ -import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/openai-compatible' +import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { CodeInterpreterToolConfig } from '@tanstack/openai-compatible' +import type { CodeInterpreterToolConfig } from '@tanstack/ai-openai-compatible' export { type CodeInterpreterToolConfig, type CodeInterpreterTool, convertCodeInterpreterToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAICodeInterpreterTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index 36b7d405b..9035d698c 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,12 +1,12 @@ -import { computerUseTool as baseComputerUseTool } from '@tanstack/openai-compatible' +import { computerUseTool as baseComputerUseTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { ComputerUseToolConfig } from '@tanstack/openai-compatible' +import type { ComputerUseToolConfig } from '@tanstack/ai-openai-compatible' export { type ComputerUseToolConfig, type ComputerUseTool, convertComputerUseToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' // The brand discriminator (`computer_use`) intentionally differs from the // runtime tool name (`computer_use_preview`). The brand matches the model-meta diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index 4865b0fd1..7067ec552 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -3,4 +3,4 @@ export { type CustomTool, convertCustomToolToAdapterFormat, customTool, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index c0dccb83a..84cedb669 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,12 +1,12 @@ -import { fileSearchTool as baseFileSearchTool } from '@tanstack/openai-compatible' +import { fileSearchTool as baseFileSearchTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { FileSearchToolConfig } from '@tanstack/openai-compatible' +import type { FileSearchToolConfig } from '@tanstack/ai-openai-compatible' export { type FileSearchToolConfig, type FileSearchTool, convertFileSearchToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIFileSearchTool = ProviderTool<'openai', 'file_search'> diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index 092468bee..ae94e03d3 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -2,4 +2,4 @@ export { type FunctionToolConfig, type FunctionTool, convertFunctionToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index 8bdabea04..47e92d5a9 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,12 +1,12 @@ -import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/openai-compatible' +import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { ImageGenerationToolConfig } from '@tanstack/openai-compatible' +import type { ImageGenerationToolConfig } from '@tanstack/ai-openai-compatible' export { type ImageGenerationToolConfig, type ImageGenerationTool, convertImageGenerationToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIImageGenerationTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/index.ts b/packages/typescript/ai-openai/src/tools/index.ts index 1a359bdfe..e9ffb399f 100644 --- a/packages/typescript/ai-openai/src/tools/index.ts +++ b/packages/typescript/ai-openai/src/tools/index.ts @@ -1,4 +1,4 @@ -export { type OpenAITool } from '@tanstack/openai-compatible' +export { type OpenAITool } from '@tanstack/ai-openai-compatible' export { applyPatchTool, diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index 296bd4e77..140f7ae1b 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,11 +1,11 @@ -import { localShellTool as baseLocalShellTool } from '@tanstack/openai-compatible' +import { localShellTool as baseLocalShellTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type LocalShellToolConfig, type LocalShellTool, convertLocalShellToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAILocalShellTool = ProviderTool<'openai', 'local_shell'> diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index f3a97504f..aebe919fb 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,13 +1,13 @@ -import { mcpTool as baseMcpTool } from '@tanstack/openai-compatible' +import { mcpTool as baseMcpTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { MCPToolConfig } from '@tanstack/openai-compatible' +import type { MCPToolConfig } from '@tanstack/ai-openai-compatible' export { type MCPToolConfig, type MCPTool, validateMCPtool, convertMCPToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIMCPTool = ProviderTool<'openai', 'mcp'> diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 05a254dcb..1ca64208c 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,11 +1,11 @@ -import { shellTool as baseShellTool } from '@tanstack/openai-compatible' +import { shellTool as baseShellTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' export { type ShellToolConfig, type ShellTool, convertShellToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIShellTool = ProviderTool<'openai', 'shell'> diff --git a/packages/typescript/ai-openai/src/tools/tool-choice.ts b/packages/typescript/ai-openai/src/tools/tool-choice.ts index a682f8898..04e2e7bb8 100644 --- a/packages/typescript/ai-openai/src/tools/tool-choice.ts +++ b/packages/typescript/ai-openai/src/tools/tool-choice.ts @@ -1 +1 @@ -export { type ToolChoice } from '@tanstack/openai-compatible' +export { type ToolChoice } from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/tool-converter.ts b/packages/typescript/ai-openai/src/tools/tool-converter.ts index 17b938053..acc35f22f 100644 --- a/packages/typescript/ai-openai/src/tools/tool-converter.ts +++ b/packages/typescript/ai-openai/src/tools/tool-converter.ts @@ -1 +1 @@ -export { convertToolsToProviderFormat } from '@tanstack/openai-compatible' +export { convertToolsToProviderFormat } from '@tanstack/ai-openai-compatible' diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index 40b884a5d..0fb9e4ff3 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,12 +1,12 @@ -import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/openai-compatible' +import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchPreviewToolConfig } from '@tanstack/openai-compatible' +import type { WebSearchPreviewToolConfig } from '@tanstack/ai-openai-compatible' export { type WebSearchPreviewToolConfig, type WebSearchPreviewTool, convertWebSearchPreviewToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIWebSearchPreviewTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index 8436c437c..a0e42fb66 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,12 +1,12 @@ -import { webSearchTool as baseWebSearchTool } from '@tanstack/openai-compatible' +import { webSearchTool as baseWebSearchTool } from '@tanstack/ai-openai-compatible' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchToolConfig } from '@tanstack/openai-compatible' +import type { WebSearchToolConfig } from '@tanstack/ai-openai-compatible' export { type WebSearchToolConfig, type WebSearchTool, convertWebSearchToolToAdapterFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' export type OpenAIWebSearchTool = ProviderTool<'openai', 'web_search'> diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index b4b46795f..c1f5a1c6b 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,5 +1,5 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/openai-compatible' +import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' export interface OpenAIClientConfig extends OpenAICompatibleClientConfig {} diff --git a/packages/typescript/ai-openai/src/utils/schema-converter.ts b/packages/typescript/ai-openai/src/utils/schema-converter.ts index 518198ffb..d85cc81d0 100644 --- a/packages/typescript/ai-openai/src/utils/schema-converter.ts +++ b/packages/typescript/ai-openai/src/utils/schema-converter.ts @@ -1,5 +1,5 @@ import { transformNullsToUndefined } from '@tanstack/ai-utils' -import { makeStructuredOutputCompatible } from '@tanstack/openai-compatible' +import { makeStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' export { transformNullsToUndefined } diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 633bf4430..87faf4e9a 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -44,8 +44,8 @@ ], "dependencies": { "@openrouter/sdk": "0.12.14", - "@tanstack/ai-utils": "workspace:*", - "@tanstack/openai-compatible": "workspace:*" + "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/ai-utils": "workspace:*" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index d937010cc..e454e5955 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -2,7 +2,7 @@ import { OpenRouter } from '@openrouter/sdk' import { OpenAICompatibleResponsesTextAdapter, convertFunctionToolToResponsesFormat, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' import { isWebSearchTool } from '../tools/web-search-tool' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' @@ -19,7 +19,7 @@ import type { ResponseStreamEvent, ResponsesFunctionTool, ResponsesResponse, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' import type { ContentPart, ModelMessage, TextOptions, Tool } from '@tanstack/ai' import type { ExternalResponsesProviderOptions } from '../text/responses-provider-options' import type { @@ -201,7 +201,7 @@ export class OpenRouterResponsesTextAdapter< | ResponsesRequest['input'] | undefined - // Reuse the openai-compatible function-tool converter. ResponsesFunctionTool + // Reuse the ai-openai-compatible function-tool converter. ResponsesFunctionTool // already matches OpenRouter's ResponsesRequestToolFunction shape: // `{ type:'function', name, parameters, description, strict }`. const tools: Array | undefined = options.tools diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 1f374e5de..4bc902ae9 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -1,5 +1,5 @@ import { OpenRouter } from '@openrouter/sdk' -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-compatible' +import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' import { convertToolsToProviderFormat } from '../tools' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' @@ -8,7 +8,7 @@ import type { ChatCompletionChunk, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, -} from '@tanstack/openai-compatible' +} from '@tanstack/ai-openai-compatible' import type { ChatContentItems, ChatMessages, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4dad875dd..9162bff0e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -642,7 +642,7 @@ importers: version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vitest: specifier: ^4.0.14 - version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) web-vitals: specifier: ^5.1.0 version: 5.1.0 @@ -944,7 +944,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -963,7 +963,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -982,7 +982,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1001,7 +1001,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.3.6 @@ -1029,7 +1029,7 @@ importers: version: link:../ai-openai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) commander: specifier: ^13.1.0 version: 13.1.0 @@ -1106,7 +1106,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1143,7 +1143,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-event-client: dependencies: @@ -1156,7 +1156,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-fal: dependencies: @@ -1172,7 +1172,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1191,7 +1191,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1201,12 +1201,12 @@ importers: packages/typescript/ai-grok: dependencies: + '@tanstack/ai-openai-compatible': + specifier: workspace:* + version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-compatible': - specifier: workspace:* - version: link:../openai-compatible zod: specifier: ^4.0.0 version: 4.3.6 @@ -1219,19 +1219,19 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) packages/typescript/ai-groq: dependencies: + '@tanstack/ai-openai-compatible': + specifier: workspace:* + version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-compatible': - specifier: workspace:* - version: link:../openai-compatible zod: specifier: ^4.0.0 version: 4.3.6 @@ -1241,7 +1241,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1257,7 +1257,7 @@ importers: version: 4.20260317.1 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) wrangler: specifier: ^4.88.0 version: 4.88.0(@cloudflare/workers-types@4.20260317.1) @@ -1273,7 +1273,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-isolate-quickjs: dependencies: @@ -1286,7 +1286,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) packages/typescript/ai-ollama: dependencies: @@ -1302,19 +1302,19 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) packages/typescript/ai-openai: dependencies: + '@tanstack/ai-openai-compatible': + specifier: workspace:* + version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-compatible': - specifier: workspace:* - version: link:../openai-compatible openai: specifier: ^6.9.1 version: 6.10.0(ws@8.19.0)(zod@4.2.1) @@ -1327,7 +1327,7 @@ importers: version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1335,24 +1335,43 @@ importers: specifier: ^4.2.0 version: 4.2.1 + packages/typescript/ai-openai-compatible: + dependencies: + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) + devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-openrouter: dependencies: '@openrouter/sdk': specifier: 0.12.14 version: 0.12.14 + '@tanstack/ai-openai-compatible': + specifier: workspace:* + version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - '@tanstack/openai-compatible': - specifier: workspace:* - version: link:../openai-compatible devDependencies: '@tanstack/ai': specifier: workspace:* version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1374,7 +1393,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1402,7 +1421,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1442,7 +1461,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1513,7 +1532,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -1541,7 +1560,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.9) @@ -1565,7 +1584,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1630,7 +1649,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1641,25 +1660,6 @@ importers: specifier: ^2.2.10 version: 2.2.12(typescript@5.9.3) - packages/typescript/openai-compatible: - dependencies: - '@tanstack/ai-utils': - specifier: workspace:* - version: link:../ai-utils - openai: - specifier: ^6.9.1 - version: 6.10.0(ws@8.19.0)(zod@4.3.6) - devDependencies: - '@tanstack/ai': - specifier: workspace:* - version: link:../ai - '@vitest/coverage-v8': - specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) - vite: - specifier: ^7.2.7 - version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - packages/typescript/preact-ai-devtools: dependencies: '@tanstack/ai-devtools-core': @@ -1674,7 +1674,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1693,7 +1693,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1712,7 +1712,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.1.4) + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -6757,12 +6757,26 @@ packages: '@vitest/browser': optional: true + '@vitest/expect@4.0.14': + resolution: {integrity: sha512-RHk63V3zvRiYOWAV0rGEBRO820ce17hz7cI2kDmEdfQsBjT2luEKB5tCOc91u1oSQoUOZkSv3ZyzkdkSLD7lKw==} + '@vitest/expect@4.0.15': resolution: {integrity: sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==} '@vitest/expect@4.1.4': resolution: {integrity: sha512-iPBpra+VDuXmBFI3FMKHSFXp3Gx5HfmSCE8X67Dn+bwephCnQCaB7qWK2ldHa+8ncN8hJU8VTMcxjPpyMkUjww==} + '@vitest/mocker@4.0.14': + resolution: {integrity: sha512-RzS5NujlCzeRPF1MK7MXLiEFpkIXeMdQ+rN3Kk3tDI9j0mtbr7Nmuq67tpkOJQpgyClbOltCXMjLZicJHsH5Cg==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} peerDependencies: @@ -6794,18 +6808,27 @@ packages: '@vitest/pretty-format@4.1.4': resolution: {integrity: sha512-ddmDHU0gjEUyEVLxtZa7xamrpIefdEETu3nZjWtHeZX4QxqJ7tRxSteHVXJOcr8jhiLoGAhkK4WJ3WqBpjx42A==} + '@vitest/runner@4.0.14': + resolution: {integrity: sha512-BsAIk3FAqxICqREbX8SetIteT8PiaUL/tgJjmhxJhCsigmzzH8xeadtp7LRnTpCVzvf0ib9BgAfKJHuhNllKLw==} + '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} '@vitest/runner@4.1.4': resolution: {integrity: sha512-xTp7VZ5aXP5ZJrn15UtJUWlx6qXLnGtF6jNxHepdPHpMfz/aVPx+htHtgcAL2mDXJgKhpoo2e9/hVJsIeFbytQ==} + '@vitest/snapshot@4.0.14': + resolution: {integrity: sha512-aQVBfT1PMzDSA16Y3Fp45a0q8nKexx6N5Amw3MX55BeTeZpoC08fGqEZqVmPcqN0ueZsuUQ9rriPMhZ3Mu19Ag==} + '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} '@vitest/snapshot@4.1.4': resolution: {integrity: sha512-MCjCFgaS8aZz+m5nTcEcgk/xhWv0rEH4Yl53PPlMXOZ1/Ka2VcZU6CJ+MgYCZbcJvzGhQRjVrGQNZqkGPttIKw==} + '@vitest/spy@4.0.14': + resolution: {integrity: sha512-JmAZT1UtZooO0tpY3GRyiC/8W7dCs05UOq9rfsUUgEZEdq+DuHLmWhPsrTt0TiW7WYeL/hXpaE07AZ2RCk44hg==} + '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} @@ -11829,6 +11852,40 @@ packages: vite: optional: true + vitest@4.0.14: + resolution: {integrity: sha512-d9B2J9Cm9dN9+6nxMnnNJKJCtcyKfnHj15N6YNJfaFHRLua/d3sRKU9RuKmO9mB0XdFtUizlxfz/VPbd3OxGhw==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.14 + '@vitest/browser-preview': 4.0.14 + '@vitest/browser-webdriverio': 4.0.14 + '@vitest/ui': 4.0.14 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vitest@4.0.15: resolution: {integrity: sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} @@ -17292,7 +17349,7 @@ snapshots: vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) vue: 3.5.25(typescript@5.9.3) - '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17305,11 +17362,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.1.4)': + '@vitest/coverage-v8@4.0.14(vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -17322,10 +17379,19 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.1.0 - vitest: 4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vitest: 4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color + '@vitest/expect@4.0.14': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.14 + '@vitest/utils': 4.0.14 + chai: 6.2.2 + tinyrainbow: 3.1.0 + '@vitest/expect@4.0.15': dependencies: '@standard-schema/spec': 1.1.0 @@ -17344,6 +17410,14 @@ snapshots: chai: 6.2.2 tinyrainbow: 3.1.0 + '@vitest/mocker@4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + dependencies: + '@vitest/spy': 4.0.14 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + '@vitest/mocker@4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@vitest/spy': 4.0.15 @@ -17372,6 +17446,11 @@ snapshots: dependencies: tinyrainbow: 3.1.0 + '@vitest/runner@4.0.14': + dependencies: + '@vitest/utils': 4.0.14 + pathe: 2.0.3 + '@vitest/runner@4.0.15': dependencies: '@vitest/utils': 4.0.15 @@ -17382,6 +17461,12 @@ snapshots: '@vitest/utils': 4.1.4 pathe: 2.0.3 + '@vitest/snapshot@4.0.14': + dependencies: + '@vitest/pretty-format': 4.0.14 + magic-string: 0.30.21 + pathe: 2.0.3 + '@vitest/snapshot@4.0.15': dependencies: '@vitest/pretty-format': 4.0.15 @@ -17395,6 +17480,8 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 + '@vitest/spy@4.0.14': {} + '@vitest/spy@4.0.15': {} '@vitest/spy@4.1.4': {} @@ -23494,26 +23581,26 @@ snapshots: optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.15 - '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.15 - '@vitest/runner': 4.0.15 - '@vitest/snapshot': 4.0.15 - '@vitest/spy': 4.0.15 - '@vitest/utils': 4.0.15 + '@vitest/expect': 4.0.14 + '@vitest/mocker': 4.0.14(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.14 + '@vitest/runner': 4.0.14 + '@vitest/snapshot': 4.0.14 + '@vitest/spy': 4.0.14 + '@vitest/utils': 4.0.14 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.3 + picomatch: 4.0.4 std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 1.0.2 - tinyglobby: 0.2.15 - tinyrainbow: 3.0.3 + tinyexec: 0.3.2 + tinyglobby: 0.2.16 + tinyrainbow: 3.1.0 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: @@ -23534,36 +23621,45 @@ snapshots: - tsx - yaml - vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(@vitest/coverage-v8@4.0.14)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): + vitest@4.0.15(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.1.4 - '@vitest/mocker': 4.1.4(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.1.4 - '@vitest/runner': 4.1.4 - '@vitest/snapshot': 4.1.4 - '@vitest/spy': 4.1.4 - '@vitest/utils': 4.1.4 - es-module-lexer: 2.0.0 + '@vitest/expect': 4.0.15 + '@vitest/mocker': 4.0.15(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.15 + '@vitest/runner': 4.0.15 + '@vitest/snapshot': 4.0.15 + '@vitest/spy': 4.0.15 + '@vitest/utils': 4.0.15 + es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 - picomatch: 4.0.4 - std-env: 4.0.0 + picomatch: 4.0.3 + std-env: 3.10.0 tinybench: 2.9.0 - tinyexec: 1.1.1 - tinyglobby: 0.2.16 - tinyrainbow: 3.1.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.1 '@types/node': 24.10.3 - '@vitest/coverage-v8': 4.0.14(vitest@4.1.4) happy-dom: 20.0.11 jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: + - jiti + - less + - lightningcss - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml vitest@4.1.4(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jsdom@27.3.0(postcss@8.5.9))(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)): dependencies: @@ -23594,7 +23690,6 @@ snapshots: jsdom: 27.3.0(postcss@8.5.9) transitivePeerDependencies: - msw - optional: true vscode-uri@3.1.0: {} From 813e29663e1a90f711d2d60be9f9532c5cebdf0e Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 10:52:08 +1000 Subject: [PATCH 26/49] docs(ai-openai-compatible, ai-openrouter): explain the protocol-vs-product framing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rewrite the `@tanstack/ai-openai-compatible` README to lead with the thinking — OpenAI authored two wire formats (Chat Completions and Responses) that many vendors implement, so the package contains the shared logic for talking to any server that speaks one of those wire formats, not "the base for OpenAI." Add a side-by-side table for "what goes here vs in @tanstack/ai-openai" and a contributor rule of thumb ("a field belongs here only if at least two compatible providers support it"). Expand the leading docstrings on both OpenRouter text adapters to explicitly answer "why does this extend from @tanstack/ai-openai-compatible?" — OpenRouter implements OpenAI's wire formats verbatim (Chat Completions natively, Responses as a beta routing layer that fans out to Claude/Gemini/etc.), so the shared base lets us inherit ~1k LOC of stream accumulation, partial-JSON buffering, AG-UI lifecycle, and structured-output coercion rather than duplicating it. No code change. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../typescript/ai-openai-compatible/README.md | 145 ++++++++++++------ .../src/adapters/responses-text.ts | 18 ++- .../ai-openrouter/src/adapters/text.ts | 20 ++- 3 files changed, 128 insertions(+), 55 deletions(-) diff --git a/packages/typescript/ai-openai-compatible/README.md b/packages/typescript/ai-openai-compatible/README.md index 13aac0a99..5fff259fd 100644 --- a/packages/typescript/ai-openai-compatible/README.md +++ b/packages/typescript/ai-openai-compatible/README.md @@ -1,70 +1,121 @@ # @tanstack/ai-openai-compatible -Shared protocol adapters for OpenAI-compatible providers in TanStack AI. +Shared adapters for providers that implement OpenAI's wire-format protocols. -> Renamed from `@tanstack/openai-base` in 0.3.0. The "base" name implied this package -> tracked OpenAI's product roadmap; in fact it implements two OpenAI-shaped _wire -> formats_ that multiple providers ship — see below. +> Renamed from `@tanstack/openai-base` in 0.3.0. The "base" name was misleading. +> See [Why this package exists](#why-this-package-exists). -## What this package is +## TL;DR -This package holds the shared implementation of the two OpenAI-compatible -wire-format protocols: +OpenAI authored two wire formats — `/v1/chat/completions` and `/v1/responses` — +that other vendors have implemented to varying degrees. This package contains +the shared logic for talking to **any** server that speaks one of those wire +formats. OpenAI is one such server. OpenRouter, Groq, Grok, vLLM, SGLang, +Together, Ollama's compat layer, Fireworks, and others are too. -- **Chat Completions** (`/v1/chat/completions`) — mature, natively implemented by - OpenAI, Groq, Grok, OpenRouter, vLLM, SGLang, Together, Ollama (compat layer), - and many others. -- **Responses** (`/v1/responses`) — newer; OpenAI's reference implementation plus - OpenRouter's beta routing implementation (which fans out to Anthropic, Google, - etc. under the hood). Younger protocol, fewer native implementers today. - -Both are exposed as abstract classes that providers subclass: +The package holds two abstract classes: - `OpenAICompatibleChatCompletionsTextAdapter` - `OpenAICompatibleResponsesTextAdapter` -Subclasses customize SDK-shape variance via a small set of protected hook -methods: `callChatCompletion`, `callChatCompletionStream`, `extractReasoning`, -`convertMessage`, `mapOptionsToRequest`, `transformStructuredOutput`, -`makeStructuredOutputCompatible`, `processStreamChunks` (and the equivalent set -on the Responses adapter). +Provider packages (`@tanstack/ai-openai`, `@tanstack/ai-openrouter`, +`@tanstack/ai-groq`, `@tanstack/ai-grok`) subclass these and override a small +set of protected hooks for SDK-shape variance. + +## Why this package exists + +The old name, `@tanstack/openai-base`, implied that OpenAI's evolving API +*was* the contract — that everyone else inherits from OpenAI. That framing +broke down in two ways: + +1. **OpenAI doesn't define the protocol; the ecosystem does.** Many providers + ship `/v1/chat/completions` as their native API (Groq, Together, vLLM, + SGLang, Fireworks, Ollama's compat layer). When OpenAI ships a new field + that no other provider supports, that field belongs to *OpenAI's product*, + not to the protocol. +2. **The Responses API has the same shape.** OpenRouter's beta Responses + endpoint routes requests with OpenAI's Responses wire format to Claude, + Gemini, and other underlying models. So Responses is also a multi-vendor + protocol, not an OpenAI-only product surface. -## What this package is not +Calling the protocol "OpenAI-compatible" matches the actual industry term — +Vercel publishes `@ai-sdk/openai-compatible`, BentoML and Lightning AI docs +use the same phrase, LiteLLM calls them "OpenAI-compatible endpoints." There +is no neutral standard name; the protocol is named after the vendor who +originally shipped it. -It is **not** the base for OpenAI's evolving product surface. OpenAI-specific -tools (e.g. `web_search_preview`, `code_interpreter`, `local_shell`), -OpenAI-only models, and OpenAI's product behaviors live in -[`@tanstack/ai-openai`](../ai-openai), not here. +## What goes here vs. in `@tanstack/ai-openai` -The distinction matters because it tells contributors where to add things: +| Belongs in `@tanstack/ai-openai-compatible` | Belongs in `@tanstack/ai-openai` | +| ------------------------------------------- | -------------------------------- | +| Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | +| Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | +| Streaming chunk assembly, AG-UI lifecycle, partial-JSON tool-arg buffering, tool-call deduplication | OpenAI-only request/response fields that no other vendor supports | +| Schema converters and structured-output coercion that all OpenAI-compatible servers accept | OpenAI's media adapters (image/TTS/video/transcription) that other providers don't implement | -- Adding a field to a class in this package is a claim that the field is - supported by **multiple** OpenAI-compatible providers (not just OpenAI). - Otherwise it belongs as an override or extension in the provider's own - package. -- If OpenAI ships a new field that no other provider supports yet, it goes in - `@tanstack/ai-openai` and is plumbed into this base only once a second - provider has adopted it. +**Rule of thumb**: if you'd add a field here, it should be supported by at +least two OpenAI-compatible providers. Otherwise it belongs in the +provider's own package, plumbed in via a subclass override or a hook. + +## How providers extend the bases + +Subclasses customize SDK-shape variance via a small set of protected hook +methods: + +- `callChatCompletion`, `callChatCompletionStream` — substitute a different + SDK or HTTP client (OpenRouter uses `@openrouter/sdk` here; OpenAI and + Groq use the OpenAI SDK with a `baseURL` override). +- `convertMessage`, `mapOptionsToRequest` — bridge request-shape differences + (camelCase vs snake_case, additional provider fields). +- `extractReasoning` — surface a provider's reasoning channel into the + shared `REASONING_*` lifecycle. +- `transformStructuredOutput`, `makeStructuredOutputCompatible` — + adjust structured-output handling for provider quirks. +- `processStreamChunks` — wrap the shared chunk processor for last-mile + fixups (e.g. Groq's `x_groq.usage` → `chunk.usage`). + +Each provider typically overrides 2–6 hooks and inherits everything else. ## Architecture context Every text adapter in TanStack AI — regardless of provider — emits [AG-UI](https://github.com/CopilotKit/ag-ui) events (`RUN_STARTED`, -`TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. That -is the _universal_ unification. - -Input protocols are different. The OpenAI-compatible family (covered by this -package) has many implementers and warrants a shared base. Anthropic, Google -Gemini, and Ollama have single-provider input protocols and their adapters -extend `BaseTextAdapter` from `@tanstack/ai` directly — no compatible base -exists because no compatible family exists. +`TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. +That is the _universal_ unification. + +Input protocols are different. The OpenAI-compatible family (this package) +has many implementers and warrants shared classes. Anthropic, Google Gemini, +and Ollama have single-provider input protocols, so their adapters extend +`BaseTextAdapter` from `@tanstack/ai` directly — no compatible family exists +because no compatible family exists. + +``` +@tanstack/ai +└── BaseTextAdapter (abstract — emits AG-UI events) + │ + ├── @tanstack/ai-openai-compatible::OpenAICompatibleChatCompletionsTextAdapter + │ ├── ai-openai (chat-completions side) + │ ├── ai-openrouter + │ ├── ai-groq + │ └── ai-grok + │ + ├── @tanstack/ai-openai-compatible::OpenAICompatibleResponsesTextAdapter + │ ├── ai-openai (primary text adapter) + │ └── ai-openrouter (beta — routes to any underlying model) + │ + ├── ai-anthropic::AnthropicTextAdapter extends BaseTextAdapter directly + ├── ai-gemini::GeminiTextAdapter extends BaseTextAdapter directly + └── ai-ollama::OllamaTextAdapter extends BaseTextAdapter directly +``` ## Direct use Most users don't import from this package directly; they install a provider -package (`@tanstack/ai-openai`, `@tanstack/ai-openrouter`, -`@tanstack/ai-groq`, `@tanstack/ai-grok`) which extends the bases here. - -If you're building a custom OpenAI-compatible provider adapter (e.g. for vLLM, -Together, Fireworks), you can extend the bases from this package directly. See -the existing providers as worked examples. +package and the adapter from there does the work. + +If you're building an adapter for a new OpenAI-compatible provider (vLLM, +Together, Fireworks, a self-hosted gateway, …), import the abstract +adapters from this package and subclass them. The existing providers are +worked examples — `@tanstack/ai-grok` is the simplest (xAI's API is a +direct OpenAI clone), `@tanstack/ai-openrouter` is the most heavily +overridden (different SDK, camelCase fields, multi-provider routing). diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index e454e5955..f0e9fdfb0 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -54,10 +54,20 @@ type ResolveToolCapabilities = /** * OpenRouter Responses (beta) Adapter. * - * Extends the OpenAI Responses base so the streaming event lifecycle, - * structured-output flow, tool-call accumulator, and RUN_ERROR taxonomy are - * shared with the rest of the OpenAI-Responses-compatible providers (OpenAI, - * Azure, …). + * Why this extends `OpenAICompatibleResponsesTextAdapter` from + * `@tanstack/ai-openai-compatible`: + * + * OpenRouter's `/v1/responses` (beta) endpoint accepts OpenAI's Responses + * wire format and fans out to any underlying model — including Anthropic + * Claude and Google Gemini, neither of which has a native Responses + * endpoint. That makes Responses a multi-vendor protocol from OpenRouter's + * perspective, not an OpenAI-only product, and the shared compatible base + * is the right place for the streaming event lifecycle, structured-output + * flow, tool-call accumulator, and RUN_ERROR taxonomy that any Responses + * implementer needs. If we duplicated that here we'd ship the same ~1.2k + * LOC in OpenRouter and OpenAI separately and have to keep them in sync. + * + * What's different about OpenRouter (and why we still need overrides): * * The wire format is OpenAI-Responses-compatible, but the `@openrouter/sdk` * SDK exposes a different call shape — `client.beta.responses.send diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 4bc902ae9..bf6089479 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -52,16 +52,28 @@ type ResolveToolCapabilities = /** * OpenRouter Text (Chat) Adapter. * - * Extends the OpenAI Chat Completions base so it shares the stream - * accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and - * lifecycle gates with the rest of the OpenAI-compatible providers. + * Why this extends `OpenAICompatibleChatCompletionsTextAdapter` from + * `@tanstack/ai-openai-compatible`: + * + * OpenRouter's `/v1/chat/completions` endpoint implements OpenAI's Chat + * Completions wire format verbatim (it's how OpenRouter routes a single + * client request to GPT, Claude, Gemini, Llama, etc.). Extending the shared + * compatible base means we inherit ~1k LOC of stream accumulation, + * partial-JSON tool-call buffering, AG-UI lifecycle emission, RUN_ERROR + * taxonomy, and structured-output coercion that every OpenAI-compatible + * provider needs — without copy-pasting it. The compatible package is + * deliberately not "the OpenAI adapter"; it is the shared implementation of + * the wire-format protocol that OpenAI, OpenRouter, Groq, Grok, vLLM, + * SGLang, and others all speak. + * + * What's different about OpenRouter (and why we still need overrides): * * The wire format is identical to OpenAI's Chat Completions, but the * `@openrouter/sdk` SDK exposes a different call shape — `client.chat.send * ({ chatRequest })` with camelCase fields. We override the two SDK-call * hooks (`callChatCompletion` / `callChatCompletionStream`) to bridge that, * plus a small chunk-shape adapter on the way back, and `extractReasoning` - * to surface OpenRouter's reasoning deltas through the base's REASONING_* + * to surface OpenRouter's reasoning deltas through the shared REASONING_* * lifecycle. * * Behaviour preserved from the pre-migration implementation: From c4070ae8cbb65ca53e0802d431fb538dc0f67f75 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 00:53:12 +0000 Subject: [PATCH 27/49] ci: apply automated fixes --- .../typescript/ai-openai-compatible/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/typescript/ai-openai-compatible/README.md b/packages/typescript/ai-openai-compatible/README.md index 5fff259fd..d9345634f 100644 --- a/packages/typescript/ai-openai-compatible/README.md +++ b/packages/typescript/ai-openai-compatible/README.md @@ -25,13 +25,13 @@ set of protected hooks for SDK-shape variance. ## Why this package exists The old name, `@tanstack/openai-base`, implied that OpenAI's evolving API -*was* the contract — that everyone else inherits from OpenAI. That framing +_was_ the contract — that everyone else inherits from OpenAI. That framing broke down in two ways: 1. **OpenAI doesn't define the protocol; the ecosystem does.** Many providers ship `/v1/chat/completions` as their native API (Groq, Together, vLLM, SGLang, Fireworks, Ollama's compat layer). When OpenAI ships a new field - that no other provider supports, that field belongs to *OpenAI's product*, + that no other provider supports, that field belongs to _OpenAI's product_, not to the protocol. 2. **The Responses API has the same shape.** OpenRouter's beta Responses endpoint routes requests with OpenAI's Responses wire format to Claude, @@ -46,12 +46,12 @@ originally shipped it. ## What goes here vs. in `@tanstack/ai-openai` -| Belongs in `@tanstack/ai-openai-compatible` | Belongs in `@tanstack/ai-openai` | -| ------------------------------------------- | -------------------------------- | -| Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | -| Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | -| Streaming chunk assembly, AG-UI lifecycle, partial-JSON tool-arg buffering, tool-call deduplication | OpenAI-only request/response fields that no other vendor supports | -| Schema converters and structured-output coercion that all OpenAI-compatible servers accept | OpenAI's media adapters (image/TTS/video/transcription) that other providers don't implement | +| Belongs in `@tanstack/ai-openai-compatible` | Belongs in `@tanstack/ai-openai` | +| --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | +| Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | +| Streaming chunk assembly, AG-UI lifecycle, partial-JSON tool-arg buffering, tool-call deduplication | OpenAI-only request/response fields that no other vendor supports | +| Schema converters and structured-output coercion that all OpenAI-compatible servers accept | OpenAI's media adapters (image/TTS/video/transcription) that other providers don't implement | **Rule of thumb**: if you'd add a field here, it should be supported by at least two OpenAI-compatible providers. Otherwise it belongs in the From 72c8aeadffaaf250ff56e45203fff10891102e6e Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 10:54:48 +1000 Subject: [PATCH 28/49] docs(adapters/openrouter): add Chat Completions vs Responses (beta) section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The OpenRouter adapter package now ships two adapters (`openRouterText` for /v1/chat/completions and `openRouterResponsesText` for /v1/responses beta). Document the difference for consumers: both route to any underlying model in the catalogue (Anthropic, Google, Meta, …); the wire format describes the client → OpenRouter call, not which provider answers. Add a side-by-side table, a basic example for the Responses adapter, and beta caveats (no branded server-tools yet; prefer the chat-completions adapter if in doubt). No mention of the internal shared-base package — that's an implementation detail consumers don't need to track. Co-Authored-By: Claude Opus 4.7 (1M context) --- docs/adapters/openrouter.md | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/docs/adapters/openrouter.md b/docs/adapters/openrouter.md index c61fcff96..2bcb792bd 100644 --- a/docs/adapters/openrouter.md +++ b/docs/adapters/openrouter.md @@ -133,7 +133,42 @@ const stream = chat({ }, }); ``` - + +## Chat Completions vs Responses (beta) + +OpenRouter exposes two OpenAI-compatible wire formats, and the adapter +package ships one of each: + +| Adapter | Endpoint | Status | When to use | +| -------------------------- | ------------------------- | -------- | ---------------------------------------------------------------------------- | +| `openRouterText` | `/v1/chat/completions` | Stable | Default for almost everything. Broadest model + tool support. | +| `openRouterResponsesText` | `/v1/responses` | Beta | OpenAI Responses-shaped request/response; richer multi-turn state on OpenAI-style models. | + +Both adapters route to any underlying model OpenRouter supports +(`anthropic/...`, `google/...`, `meta-llama/...`, etc.) — the wire format +describes how your client talks to OpenRouter, not which provider answers. +`/v1/responses` is OpenAI's newer API surface; OpenRouter implements it so +clients that prefer that wire format can use it across the same 300+ +model catalogue. + +```typescript +import { chat } from "@tanstack/ai"; +import { openRouterResponsesText } from "@tanstack/ai-openrouter"; + +const stream = chat({ + adapter: openRouterResponsesText("anthropic/claude-sonnet-4.5"), + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +Caveats while the Responses adapter is in beta: + +- Function tools are supported; OpenRouter's branded server-tools (web + search, file search, …) are not yet wired through this path — use + `openRouterText` if you need those. +- If in doubt, prefer `openRouterText`. The Chat Completions endpoint has + broader provider coverage and feature parity today. + ## Next Steps - [Getting Started](../getting-started/quick-start) - Learn the basics From dad9e553ea33000c49d6649823a3d6e0464d7313 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 13:09:55 +1000 Subject: [PATCH 29/49] refactor(ai-openai-compatible): narrow to chat/responses; decouple from openai SDK MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make `OpenAICompatibleChatCompletionsTextAdapter` and `OpenAICompatibleResponsesTextAdapter` abstract. Subclasses now own SDK client construction and implement the `callChatCompletion*` / `callResponse*` hooks. The base never imports `openai` at runtime — only as types — so `dist/esm/*.js` is openai-free and the package's `openai` dep moves to optional `peerDependencies` + `devDependencies`. Delete image/tts/transcription/video bases (single-user; only ai-openai extended them, so they're now standalone classes there). Move summarize to `@tanstack/ai` core as `ChatStreamSummarizeAdapter` — it's protocol-agnostic, wraps any `ChatStreamCapable`. Provider-specific `*SummarizeAdapter` classes deleted, replaced by thin factory functions returning `ChatStreamSummarizeAdapter` directly. ai-grok duplicates its image adapter standalone (~150 LOC; shared base wasn't worth the indirection for thin SDK wrappers). Also fix the round-3 double `TOOL_CALL_END` regression in the Responses adapter: `function_call_arguments.done` now gates on `!metadata.ended` so the output_item.done backfill path can't emit a duplicate close. Regression test added. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../migrate-groq-openrouter-to-openai-base.md | 2 +- docs/adapters/openrouter.md | 26 +- packages/typescript/ai-grok/package.json | 3 +- .../typescript/ai-grok/src/adapters/image.ts | 143 ++--- .../ai-grok/src/adapters/summarize.ts | 77 +-- .../typescript/ai-grok/src/adapters/text.ts | 21 +- packages/typescript/ai-grok/src/index.ts | 3 +- .../typescript/ai-grok/src/utils/client.ts | 10 +- packages/typescript/ai-groq/package.json | 3 +- .../typescript/ai-groq/src/adapters/text.ts | 21 +- .../typescript/ai-groq/src/utils/client.ts | 12 +- .../typescript/ai-openai-compatible/README.md | 10 +- .../ai-openai-compatible/package.json | 12 +- .../src/adapters/chat-completions-text.ts | 88 +-- .../src/adapters/image.ts | 158 ----- .../src/adapters/responses-text.ts | 387 ++++++++++--- .../src/adapters/transcription.ts | 194 ------- .../ai-openai-compatible/src/adapters/tts.ts | 124 ---- .../src/adapters/video.ts | 385 ------------- .../ai-openai-compatible/src/index.ts | 22 +- .../ai-openai-compatible/src/types/config.ts | 5 - .../ai-openai-compatible/src/utils/client.ts | 8 - .../tests/chat-completions-text.test.ts | 90 +-- .../tests/media-adapters.test.ts | 367 ------------ .../tests/responses-text.test.ts | 540 ++++++++++++++++-- .../ai-openai/src/adapters/image.ts | 140 ++--- .../ai-openai/src/adapters/summarize.ts | 66 +-- .../typescript/ai-openai/src/adapters/text.ts | 20 +- .../ai-openai/src/adapters/transcription.ts | 213 ++++--- .../typescript/ai-openai/src/adapters/tts.ts | 138 ++--- .../ai-openai/src/adapters/video.ts | 337 ++++++++--- packages/typescript/ai-openai/src/index.ts | 3 +- .../typescript/ai-openai/src/utils/client.ts | 11 +- .../typescript/ai-openrouter/package.json | 1 + .../src/adapters/responses-text.ts | 54 +- .../ai-openrouter/src/adapters/text.ts | 38 +- .../src/text/responses-provider-options.ts | 5 +- .../tests/openrouter-adapter.test.ts | 226 ++++++-- .../openrouter-responses-adapter.test.ts | 383 ++++++++++++- .../ai/src/activities/error-payload.ts | 15 +- .../typescript/ai/src/activities/index.ts | 4 + .../summarize/chat-stream-wrapper.ts} | 29 +- .../ai/src/activities/summarize/index.ts | 4 + .../typescript/ai/tests/error-payload.test.ts | 20 +- pnpm-lock.yaml | 15 +- 45 files changed, 2306 insertions(+), 2127 deletions(-) delete mode 100644 packages/typescript/ai-openai-compatible/src/adapters/image.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/adapters/transcription.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/adapters/tts.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/adapters/video.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/types/config.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/utils/client.ts delete mode 100644 packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts rename packages/typescript/{ai-openai-compatible/src/adapters/summarize.ts => ai/src/activities/summarize/chat-stream-wrapper.ts} (84%) diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md index 9b6fad972..3ec7467bd 100644 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ b/.changeset/migrate-groq-openrouter-to-openai-base.md @@ -7,7 +7,7 @@ Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. -`@tanstack/ai-openai-compatible` adds three protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), and `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle). Also adds `transformStructuredOutput` for subclasses (like OpenRouter) that preserve nulls in structured output instead of converting them to undefined. +`@tanstack/ai-openai-compatible` adds four protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle), and `transformStructuredOutput` (subclasses like OpenRouter can preserve nulls in structured output instead of converting them to undefined). `@tanstack/ai-openai-compatible` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. diff --git a/docs/adapters/openrouter.md b/docs/adapters/openrouter.md index 2bcb792bd..b54fa8dbd 100644 --- a/docs/adapters/openrouter.md +++ b/docs/adapters/openrouter.md @@ -35,16 +35,17 @@ const stream = chat({ ## Configuration ```typescript -import { createOpenRouter, type OpenRouterConfig } from "@tanstack/ai-openrouter"; - -const config: OpenRouterConfig = { - apiKey: process.env.OPENROUTER_API_KEY!, - baseURL: "https://openrouter.ai/api/v1", // Optional - httpReferer: "https://your-app.com", // Optional, for rankings - xTitle: "Your App Name", // Optional, for rankings -}; - -const adapter = createOpenRouter(config.apiKey, config); +import { createOpenRouterText } from "@tanstack/ai-openrouter"; + +const adapter = createOpenRouterText( + "openai/gpt-5", + process.env.OPENROUTER_API_KEY!, + { + serverURL: "https://openrouter.ai/api/v1", // Optional + httpReferer: "https://your-app.com", // Optional, for rankings + appTitle: "Your App Name", // Optional, for rankings + }, +); ``` ## Available Models @@ -122,14 +123,13 @@ OpenRouter can automatically route requests to the best available provider: ```typescript const stream = chat({ adapter: openRouterText("openrouter/auto"), - messages, - providerOptions: { + messages, + modelOptions: { models: [ "openai/gpt-4o", "anthropic/claude-3.5-sonnet", "google/gemini-pro", ], - route: "fallback", // Use fallback if primary fails }, }); ``` diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index c6713b434..254d7c8e7 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -45,7 +45,8 @@ ], "dependencies": { "@tanstack/ai-openai-compatible": "workspace:*", - "@tanstack/ai-utils": "workspace:*" + "@tanstack/ai-utils": "workspace:*", + "openai": "^6.9.1" }, "devDependencies": { "@tanstack/ai": "workspace:*", diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 2e5ad1c3a..13e17e8ca 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -1,10 +1,19 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/ai-openai-compatible' +import OpenAI from 'openai' +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { generateId } from '@tanstack/ai-utils' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import { validateImageSize, validateNumberOfImages, validatePrompt, } from '../image/image-provider-options' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' import type { GrokImageModel } from '../model-meta' import type { GrokImageModelProviderOptionsByName, @@ -19,19 +28,11 @@ import type { GrokClientConfig } from '../utils' export interface GrokImageConfig extends GrokClientConfig {} /** - * Grok Image Generation Adapter - * - * Tree-shakeable adapter for Grok image generation functionality. - * Supports grok-2-image-1212 model. - * - * Features: - * - Model-specific type-safe provider options - * - Size validation per model - * - Number of images validation + * Grok Image Generation Adapter. Supports grok-2-image-1212. */ export class GrokImageAdapter< TModel extends GrokImageModel, -> extends OpenAICompatibleImageAdapter< +> extends BaseImageAdapter< TModel, GrokImageProviderOptions, GrokImageModelProviderOptionsByName, @@ -40,51 +41,75 @@ export class GrokImageAdapter< readonly kind = 'image' as const readonly name = 'grok' as const + protected client: OpenAI + constructor(config: GrokImageConfig, model: TModel) { - super(withGrokDefaults(config), model, 'grok') + super(model, {}) + this.client = new OpenAI(withGrokDefaults(config)) } - protected override validatePrompt(options: { - prompt: string - model: string - }): void { - validatePrompt(options) - } + async generateImages( + options: ImageGenerationOptions, + ): Promise { + const { model, prompt, numberOfImages, size, modelOptions } = options - protected override validateImageSize( - model: string, - size: string | undefined, - ): void { + validatePrompt({ prompt, model }) validateImageSize(model, size) - } - - protected override validateNumberOfImages( - model: string, - numberOfImages: number | undefined, - ): void { validateNumberOfImages(model, numberOfImages) + + const request: OpenAI_SDK.Images.ImageGenerateParams = { + model, + prompt, + n: numberOfImages ?? 1, + size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, + } + + try { + options.logger.request( + `activity=image provider=${this.name} model=${model} n=${request.n ?? 1} size=${request.size ?? 'default'}`, + { provider: this.name, model }, + ) + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + const images: Array = (response.data ?? []).flatMap( + (item): Array => { + const revisedPrompt = item.revised_prompt + if (item.b64_json) { + return [{ b64Json: item.b64_json, revisedPrompt }] + } + if (item.url) { + return [{ url: item.url, revisedPrompt }] + } + return [] + }, + ) + + return { + id: generateId(this.name), + model, + images, + usage: response.usage + ? { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.total_tokens, + } + : undefined, + } + } catch (error: unknown) { + options.logger.errors(`${this.name}.generateImages fatal`, { + error: toRunErrorPayload(error, `${this.name}.generateImages failed`), + source: `${this.name}.generateImages`, + }) + throw error + } } } -/** - * Creates a Grok image adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'grok-2-image-1212') - * @param apiKey - Your xAI API key - * @param config - Optional additional configuration - * @returns Configured Grok image adapter instance with resolved types - * - * @example - * ```typescript - * const adapter = createGrokImage('grok-2-image-1212', "xai-..."); - * - * const result = await generateImage({ - * adapter, - * prompt: 'A cute baby sea otter' - * }); - * ``` - */ export function createGrokImage( model: TModel, apiKey: string, @@ -93,30 +118,6 @@ export function createGrokImage( return new GrokImageAdapter({ apiKey, ...config }, model) } -/** - * Creates a Grok image adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `XAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'grok-2-image-1212') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Grok image adapter instance with resolved types - * @throws Error if XAI_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses XAI_API_KEY from environment - * const adapter = grokImage('grok-2-image-1212'); - * - * const result = await generateImage({ - * adapter, - * prompt: 'A beautiful sunset over mountains' - * }); - * ``` - */ export function grokImage( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 8e72fc44d..301d5e5e7 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,18 +1,11 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/ai-openai-compatible' +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' -import type { ChatStreamCapable } from '@tanstack/ai-openai-compatible' import type { GROK_CHAT_MODELS } from '../model-meta' import type { GrokClientConfig } from '../utils' -/** - * Configuration for Grok summarize adapter - */ export interface GrokSummarizeConfig extends GrokClientConfig {} -/** - * Grok-specific provider options for summarization - */ export interface GrokSummarizeProviderOptions { /** Temperature for response generation (0-2) */ temperature?: number @@ -20,46 +13,10 @@ export interface GrokSummarizeProviderOptions { maxTokens?: number } -/** Model type for Grok summarization */ export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] -/** - * Grok Summarize Adapter - * - * A thin wrapper around the text adapter that adds summarization-specific prompting. - * Delegates all API calls to the GrokTextAdapter. - */ -export class GrokSummarizeAdapter< - TModel extends GrokSummarizeModel, -> extends OpenAICompatibleSummarizeAdapter< - TModel, - GrokSummarizeProviderOptions -> { - readonly kind = 'summarize' as const - readonly name = 'grok' as const - - constructor(config: GrokSummarizeConfig, model: TModel) { - // The text adapter accepts richer provider options than the summarize adapter needs, - // but we only pass basic options (model, messages, systemPrompts, etc.) at call time. - super( - new GrokTextAdapter( - config, - model, - ) as unknown as ChatStreamCapable, - model, - 'grok', - ) - } -} - /** * Creates a Grok summarize adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'grok-3', 'grok-4') - * @param apiKey - Your xAI API key - * @param config - Optional additional configuration - * @returns Configured Grok summarize adapter instance with resolved types * * @example * ```typescript @@ -70,38 +27,26 @@ export function createGrokSummarize( model: TModel, apiKey: string, config?: Omit, -): GrokSummarizeAdapter { - return new GrokSummarizeAdapter({ apiKey, ...config }, model) +): ChatStreamSummarizeAdapter { + return new ChatStreamSummarizeAdapter( + new GrokTextAdapter({ apiKey, ...config }, model), + model, + 'grok', + ) } /** - * Creates a Grok summarize adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `XAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'grok-3', 'grok-4') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Grok summarize adapter instance with resolved types - * @throws Error if XAI_API_KEY is not found in environment + * Creates a Grok summarize adapter with API key from `XAI_API_KEY`. * * @example * ```typescript - * // Automatically uses XAI_API_KEY from environment * const adapter = grokSummarize('grok-3'); - * - * await summarize({ - * adapter, - * text: "Long article text..." - * }); + * await summarize({ adapter, text: "Long article text..." }); * ``` */ export function grokSummarize( model: TModel, config?: Omit, -): GrokSummarizeAdapter { - const apiKey = getGrokApiKeyFromEnv() - return createGrokSummarize(model, apiKey, config) +): ChatStreamSummarizeAdapter { + return createGrokSummarize(model, getGrokApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index 767c2c18c..c1cbb3521 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,3 +1,4 @@ +import OpenAI from 'openai' import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import type { @@ -6,6 +7,7 @@ import type { ResolveInputModalities, ResolveProviderOptions, } from '../model-meta' +import type OpenAI_SDK from 'openai' import type { Modality } from '@tanstack/ai' import type { GrokMessageMetadataByModality } from '../message-types' import type { GrokClientConfig } from '../utils' @@ -55,8 +57,25 @@ export class GrokTextAdapter< readonly kind = 'text' as const readonly name = 'grok' as const + protected client: OpenAI + constructor(config: GrokTextConfig, model: TModel) { - super(withGrokDefaults(config), model, 'grok') + super(model, 'grok') + this.client = new OpenAI(withGrokDefaults(config)) + } + + protected async callChatCompletion( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise { + return this.client.chat.completions.create(params, requestOptions) + } + + protected async callChatCompletionStream( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise> { + return this.client.chat.completions.create(params, requestOptions) } } diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts index 85b8999f8..a313cf06f 100644 --- a/packages/typescript/ai-grok/src/index.ts +++ b/packages/typescript/ai-grok/src/index.ts @@ -11,9 +11,8 @@ export { type GrokTextProviderOptions, } from './adapters/text' -// Summarize adapter - for text summarization +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - GrokSummarizeAdapter, createGrokSummarize, grokSummarize, type GrokSummarizeConfig, diff --git a/packages/typescript/ai-grok/src/utils/client.ts b/packages/typescript/ai-grok/src/utils/client.ts index 650505b85..8ca0c047e 100644 --- a/packages/typescript/ai-grok/src/utils/client.ts +++ b/packages/typescript/ai-grok/src/utils/client.ts @@ -1,7 +1,9 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' +import type { ClientOptions } from 'openai' -export interface GrokClientConfig extends OpenAICompatibleClientConfig {} +export interface GrokClientConfig extends Omit { + apiKey: string +} /** * Gets Grok API key from environment variables @@ -21,9 +23,7 @@ export function getGrokApiKeyFromEnv(): string { * Returns a Grok client config with the default xAI base URL applied * when not already set. */ -export function withGrokDefaults( - config: GrokClientConfig, -): OpenAICompatibleClientConfig { +export function withGrokDefaults(config: GrokClientConfig): GrokClientConfig { return { ...config, baseURL: config.baseURL || 'https://api.x.ai/v1', diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index 5be447803..a16486515 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -53,6 +53,7 @@ }, "dependencies": { "@tanstack/ai-openai-compatible": "workspace:*", - "@tanstack/ai-utils": "workspace:*" + "@tanstack/ai-utils": "workspace:*", + "openai": "^6.9.1" } } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index a0ff1adc6..c92e52e93 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -1,6 +1,8 @@ +import OpenAI from 'openai' import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' import { getGroqApiKeyFromEnv, withGroqDefaults } from '../utils/client' import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' +import type OpenAI_SDK from 'openai' import type { Modality, TextOptions } from '@tanstack/ai' import type { ChatCompletionChunk } from '@tanstack/ai-openai-compatible' import type { @@ -57,8 +59,25 @@ export class GroqTextAdapter< readonly kind = 'text' as const readonly name = 'groq' as const + protected client: OpenAI + constructor(config: GroqTextConfig, model: TModel) { - super(withGroqDefaults(config), model, 'groq') + super(model, 'groq') + this.client = new OpenAI(withGroqDefaults(config)) + } + + protected async callChatCompletion( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise { + return this.client.chat.completions.create(params, requestOptions) + } + + protected async callChatCompletionStream( + params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise> { + return this.client.chat.completions.create(params, requestOptions) } protected override makeStructuredOutputCompatible( diff --git a/packages/typescript/ai-groq/src/utils/client.ts b/packages/typescript/ai-groq/src/utils/client.ts index eaaadc64f..f121cb384 100644 --- a/packages/typescript/ai-groq/src/utils/client.ts +++ b/packages/typescript/ai-groq/src/utils/client.ts @@ -1,7 +1,9 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' +import type { ClientOptions } from 'openai' -export interface GroqClientConfig extends OpenAICompatibleClientConfig {} +export interface GroqClientConfig extends Omit { + apiKey: string +} /** * Gets Groq API key from environment variables @@ -20,11 +22,9 @@ export function getGroqApiKeyFromEnv(): string { /** * Returns a Groq client config with Groq's OpenAI-compatible base URL * applied when not already set. The Groq endpoint accepts the OpenAI SDK - * verbatim, so the base adapter can drive it without a separate SDK. + * verbatim, so the adapter drives it via the OpenAI SDK with this baseURL. */ -export function withGroqDefaults( - config: GroqClientConfig, -): OpenAICompatibleClientConfig { +export function withGroqDefaults(config: GroqClientConfig): GroqClientConfig { return { ...config, baseURL: config.baseURL || 'https://api.groq.com/openai/v1', diff --git a/packages/typescript/ai-openai-compatible/README.md b/packages/typescript/ai-openai-compatible/README.md index d9345634f..65d2f8db7 100644 --- a/packages/typescript/ai-openai-compatible/README.md +++ b/packages/typescript/ai-openai-compatible/README.md @@ -13,7 +13,7 @@ the shared logic for talking to **any** server that speaks one of those wire formats. OpenAI is one such server. OpenRouter, Groq, Grok, vLLM, SGLang, Together, Ollama's compat layer, Fireworks, and others are too. -The package holds two abstract classes: +The package holds two shared base classes: - `OpenAICompatibleChatCompletionsTextAdapter` - `OpenAICompatibleResponsesTextAdapter` @@ -94,13 +94,12 @@ because no compatible family exists. └── BaseTextAdapter (abstract — emits AG-UI events) │ ├── @tanstack/ai-openai-compatible::OpenAICompatibleChatCompletionsTextAdapter - │ ├── ai-openai (chat-completions side) │ ├── ai-openrouter │ ├── ai-groq │ └── ai-grok │ ├── @tanstack/ai-openai-compatible::OpenAICompatibleResponsesTextAdapter - │ ├── ai-openai (primary text adapter) + │ ├── ai-openai (primary text adapter — Responses is OpenAI's preferred API) │ └── ai-openrouter (beta — routes to any underlying model) │ ├── ai-anthropic::AnthropicTextAdapter extends BaseTextAdapter directly @@ -108,6 +107,11 @@ because no compatible family exists. └── ai-ollama::OllamaTextAdapter extends BaseTextAdapter directly ``` +Note: `ai-openai` ships only the Responses-based adapter. For pure Chat +Completions use cases without OpenAI-specific behaviour, use `ai-grok` +(xAI's API is a direct OpenAI Chat Completions clone) or build a new +provider package extending `OpenAICompatibleChatCompletionsTextAdapter`. + ## Direct use Most users don't import from this package directly; they install a provider diff --git a/packages/typescript/ai-openai-compatible/package.json b/packages/typescript/ai-openai-compatible/package.json index f0e3051cf..e66ee09eb 100644 --- a/packages/typescript/ai-openai-compatible/package.json +++ b/packages/typescript/ai-openai-compatible/package.json @@ -41,15 +41,21 @@ "protocol" ], "dependencies": { - "@tanstack/ai-utils": "workspace:*", - "openai": "^6.9.1" + "@tanstack/ai-utils": "workspace:*" }, "peerDependencies": { - "@tanstack/ai": "workspace:^" + "@tanstack/ai": "workspace:^", + "openai": "^6.9.1" + }, + "peerDependenciesMeta": { + "openai": { + "optional": true + } }, "devDependencies": { "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", + "openai": "^6.9.1", "vite": "^7.2.7" } } diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts index 7ea5fffa8..ee3a39c69 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts @@ -2,7 +2,6 @@ import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' import { extractRequestOptions } from '../utils/request-options' import { makeStructuredOutputCompatible } from '../utils/schema-converter' import { convertToolsToChatCompletionsFormat } from './chat-completions-tool-converter' @@ -19,24 +18,17 @@ import type { StreamChunk, TextOptions, } from '@tanstack/ai' -import type { OpenAICompatibleClientConfig } from '../types/config' /** - * OpenAI-compatible Chat Completions Text Adapter - * - * A generalized base class for providers that use the OpenAI Chat Completions API - * (`/v1/chat/completions`). Providers like Grok, Groq, OpenRouter, and others can - * extend this class and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override specific methods for quirks - * - * All methods that build requests or process responses are `protected` so subclasses - * can override them. + * Shared implementation of the OpenAI Chat Completions wire format. Holds the + * stream-accumulator + AG-UI lifecycle logic; subclasses provide the actual + * SDK calls via the abstract `callChatCompletion*` hooks. The base never + * imports the OpenAI SDK at runtime — it only borrows the SDK's TypeScript + * shapes as the canonical reference for the protocol. */ -export class OpenAICompatibleChatCompletionsTextAdapter< +export abstract class OpenAICompatibleChatCompletionsTextAdapter< TModel extends string, - TProviderOptions extends Record = Record, + TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, TMessageMetadata extends DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, @@ -51,23 +43,14 @@ export class OpenAICompatibleChatCompletionsTextAdapter< readonly kind = 'text' as const readonly name: string - protected client: OpenAI_SDK - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible', - ) { + constructor(model: TModel, name: string = 'openai-compatible') { super({}, model) this.name = name - this.client = createOpenAICompatibleClient(config) } async *chatStream( options: TextOptions, ): AsyncIterable { - const requestParams = this.mapOptionsToRequest(options) - // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) const aguiState = { runId: generateId(this.name), @@ -77,6 +60,13 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } try { + // mapOptionsToRequest can throw (e.g. fail-loud guards in convertMessage + // for empty content or unsupported parts). Keep it inside the try so + // those failures surface as a single RUN_ERROR event, matching every + // other failure mode here — callers iterating chatStream then only need + // one error-handling path instead of both a try/catch around iteration + // and a RUN_ERROR handler. + const requestParams = this.mapOptionsToRequest(options) options.logger.request( `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, @@ -232,50 +222,34 @@ export class OpenAICompatibleChatCompletionsTextAdapter< } /** - * Performs the non-streaming Chat Completions network call. The default - * uses the OpenAI SDK (`client.chat.completions.create`), which covers any - * provider whose endpoint accepts the OpenAI SDK verbatim (e.g. xAI/Grok, - * Groq with a `baseURL` override, DeepSeek, Together, Fireworks). - * - * Override in subclasses whose SDK has a different call shape — for - * example `@openrouter/sdk` exposes `client.chat.send({ chatRequest })` - * with camelCase fields. The override is responsible for converting the - * params shape on the way in and returning an object structurally - * compatible with `ChatCompletion` (the base only reads documented fields - * like `response.choices[0].message.content`). + * Performs the non-streaming Chat Completions network call. Subclasses + * implement against whatever SDK or HTTP client they bridge to. Must + * return a value structurally compatible with `ChatCompletion` — the base + * reads documented fields like `response.choices[0].message.content`. */ - protected async callChatCompletion( + protected abstract callChatCompletion( params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, requestOptions: ReturnType, - ): Promise { - return this.client.chat.completions.create(params, requestOptions) - } + ): Promise /** - * Performs the streaming Chat Completions network call. Same pattern as - * {@link callChatCompletion} — default uses the OpenAI SDK; override for - * providers whose SDK exposes a different streaming entry point. Returns - * an `AsyncIterable` because the base's - * {@link processStreamChunks} only needs structural iteration over chunks. + * Performs the streaming Chat Completions network call. Returns an + * `AsyncIterable`; the base's `processStreamChunks` + * only needs structural iteration over chunks. */ - protected async callChatCompletionStream( + protected abstract callChatCompletionStream( params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, requestOptions: ReturnType, - ): Promise> { - return this.client.chat.completions.create(params, requestOptions) - } + ): Promise> /** * Extract reasoning content from a stream chunk. Default returns - * `undefined` because OpenAI Chat Completions doesn't carry reasoning in - * the chunk format. Providers that DO carry reasoning on this wire (e.g. - * OpenRouter's `delta.reasoningDetails`) override this to yield reasoning - * text — the base then folds it into a single REASONING_* lifecycle - * without each subclass duplicating `processStreamChunks`. + * `undefined` because the OpenAI Chat Completions chunk shape doesn't + * carry reasoning. The chunk param is typed `unknown` so an override can + * narrow to its own SDK chunk type without an `as` dance — the base only + * passes through `processStreamChunks`'s structurally-iterated chunk. */ - protected extractReasoning( - _chunk: OpenAI_SDK.Chat.Completions.ChatCompletionChunk, - ): { text: string } | undefined { + protected extractReasoning(_chunk: unknown): { text: string } | undefined { return undefined } diff --git a/packages/typescript/ai-openai-compatible/src/adapters/image.ts b/packages/typescript/ai-openai-compatible/src/adapters/image.ts deleted file mode 100644 index 89b8f283f..000000000 --- a/packages/typescript/ai-openai-compatible/src/adapters/image.ts +++ /dev/null @@ -1,158 +0,0 @@ -import { BaseImageAdapter } from '@tanstack/ai/adapters' -import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' -import { generateId } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' -import type { - GeneratedImage, - ImageGenerationOptions, - ImageGenerationResult, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' -import type { OpenAICompatibleClientConfig } from '../types/config' - -/** - * OpenAI-Compatible Image Generation Adapter - * - * A generalized base class for providers that implement OpenAI-compatible image - * generation APIs. Providers like OpenAI, Grok, and others can extend this class - * and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override validation or request building methods for provider-specific constraints - * - * All methods that validate inputs, build requests, or transform responses are - * `protected` so subclasses can override them. - */ -export class OpenAICompatibleImageAdapter< - TModel extends string, - TProviderOptions extends object = Record, - TModelProviderOptionsByName extends Record = Record, - TModelSizeByName extends Record = Record, -> extends BaseImageAdapter< - TModel, - TProviderOptions, - TModelProviderOptionsByName, - TModelSizeByName -> { - readonly kind = 'image' as const - readonly name: string - - protected client: OpenAI_SDK - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible', - ) { - super(model, {}) - this.name = name - this.client = createOpenAICompatibleClient(config) - } - - async generateImages( - options: ImageGenerationOptions, - ): Promise { - const { model, prompt, numberOfImages, size } = options - - // Validate inputs - this.validatePrompt({ prompt, model }) - this.validateImageSize(model, size) - this.validateNumberOfImages(model, numberOfImages) - - // Build request based on model type - const request = this.buildRequest(options) - - try { - options.logger.request( - `activity=image provider=${this.name} model=${model} n=${request.n ?? 1} size=${request.size ?? 'default'}`, - { provider: this.name, model }, - ) - const response = await this.client.images.generate({ - ...request, - stream: false, - }) - - return this.transformResponse(model, response) - } catch (error: unknown) { - // Narrow before logging: raw SDK errors can carry request metadata - // (including auth headers) which we must never surface to user loggers. - options.logger.errors(`${this.name}.generateImages fatal`, { - error: toRunErrorPayload(error, `${this.name}.generateImages failed`), - source: `${this.name}.generateImages`, - }) - throw error - } - } - - protected buildRequest( - options: ImageGenerationOptions, - ): OpenAI_SDK.Images.ImageGenerateParams { - const { model, prompt, numberOfImages, size, modelOptions } = options - - return { - model, - prompt, - n: numberOfImages ?? 1, - size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], - ...modelOptions, - } - } - - protected transformResponse( - model: string, - response: OpenAI_SDK.Images.ImagesResponse, - ): ImageGenerationResult { - const images: Array = (response.data ?? []).flatMap( - (item): Array => { - const revisedPrompt = item.revised_prompt - if (item.b64_json) { - return [{ b64Json: item.b64_json, revisedPrompt }] - } - if (item.url) { - return [{ url: item.url, revisedPrompt }] - } - return [] - }, - ) - - return { - id: generateId(this.name), - model, - images, - usage: response.usage - ? { - inputTokens: response.usage.input_tokens, - outputTokens: response.usage.output_tokens, - totalTokens: response.usage.total_tokens, - } - : undefined, - } - } - - protected validatePrompt(options: { prompt: string; model: string }): void { - if (options.prompt.length === 0) { - throw new Error('Prompt cannot be empty.') - } - } - - protected validateImageSize(_model: string, _size: string | undefined): void { - // Default: no size validation — subclasses can override - } - - protected validateNumberOfImages( - _model: string, - numberOfImages: number | undefined, - ): void { - if (numberOfImages === undefined) return - - // The base adapter only enforces "must be at least 1". Per-provider / - // per-model upper bounds vary widely (some support 4, some 10, some - // unlimited), so concrete adapter subclasses are expected to override - // this method with a model-specific cap. - if (numberOfImages < 1) { - throw new Error( - `Number of images must be at least 1. Requested: ${numberOfImages}`, - ) - } - } -} diff --git a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts index 93c48bcc6..61a48bf2b 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts @@ -2,7 +2,6 @@ import { EventType } from '@tanstack/ai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' import { extractRequestOptions } from '../utils/request-options' import { makeStructuredOutputCompatible } from '../utils/schema-converter' import { convertToolsToResponsesFormat } from './responses-tool-converter' @@ -20,34 +19,16 @@ import type { StreamChunk, TextOptions, } from '@tanstack/ai' -import type { OpenAICompatibleClientConfig } from '../types/config' /** - * OpenAI-compatible Responses API Text Adapter - * - * A generalized base class for providers that use the OpenAI Responses API - * (`/v1/responses`). Providers like OpenAI (native), Azure OpenAI, and others - * that implement the Responses API can extend this class and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override specific methods for quirks - * - * Key differences from the Chat Completions adapter: - * - Uses `client.responses.create()` instead of `client.chat.completions.create()` - * - Messages use `ResponseInput` format - * - System prompts go in `instructions` field, not as array messages - * - Streaming events are completely different (9+ event types vs simple delta chunks) - * - Supports reasoning/thinking tokens via `response.reasoning_text.delta` - * - Structured output uses `text.format` in the request (not `response_format`) - * - Tool calls use `response.function_call_arguments.delta` - * - Content parts are `input_text`, `input_image`, `input_file` - * - * All methods that build requests or process responses are `protected` so subclasses - * can override them. + * Shared implementation of the OpenAI Responses wire format. Holds the + * stream-event accumulator + AG-UI lifecycle; subclasses provide the actual + * SDK calls via the abstract `callResponse*` hooks. The base never imports + * the OpenAI SDK at runtime — it only borrows the SDK's TypeScript shapes. */ -export class OpenAICompatibleResponsesTextAdapter< +export abstract class OpenAICompatibleResponsesTextAdapter< TModel extends string, - TProviderOptions extends Record = Record, + TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, TMessageMetadata extends DefaultMessageMetadataByModality = DefaultMessageMetadataByModality, @@ -62,16 +43,9 @@ export class OpenAICompatibleResponsesTextAdapter< readonly kind = 'text' as const readonly name: string - protected client: OpenAI_SDK - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible-responses', - ) { + constructor(model: TModel, name: string = 'openai-compatible-responses') { super({}, model) this.name = name - this.client = createOpenAICompatibleClient(config) } async *chatStream( @@ -83,9 +57,19 @@ export class OpenAICompatibleResponsesTextAdapter< // We assign our own indices as we encounter unique tool call IDs. const toolCallMetadata = new Map< string, - { index: number; name: string; started: boolean } + { + index: number + name: string + started: boolean + // Set once TOOL_CALL_END has been emitted (via args.done or the + // output_item.done backfill) so the two paths don't double-emit. + ended?: boolean + // Set when args.done arrives before TOOL_CALL_START could fire + // (output_item.added lacked a name). output_item.done picks these + // up to emit the missing END. + pendingArguments?: string + } >() - const requestParams = this.mapOptionsToRequest(options) // AG-UI lifecycle tracking const aguiState = { @@ -96,6 +80,11 @@ export class OpenAICompatibleResponsesTextAdapter< } try { + // mapOptionsToRequest can throw on caller-side validation failures + // (empty user content, unsupported parts, webSearchTool() rejection in + // the OpenRouter override). Keep it inside the try so those failures + // surface as RUN_ERROR events instead of iterator throws. + const requestParams = this.mapOptionsToRequest(options) options.logger.request( `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, @@ -218,6 +207,16 @@ export class OpenAICompatibleResponsesTextAdapter< response satisfies OpenAI_SDK.Responses.Response, ) + // Fail loud on empty content rather than letting it cascade into a + // confusing "Failed to parse JSON. Content: " error — the root cause + // (the model returned no text content for the structured request) is + // then visible in logs. Mirrors the chat-completions sibling. + if (rawText.length === 0) { + throw new Error( + `${this.name}.structuredOutput: response contained no content`, + ) + } + // Parse the JSON response let parsed: unknown try { @@ -276,37 +275,25 @@ export class OpenAICompatibleResponsesTextAdapter< } /** - * Performs the non-streaming Responses API network call. The default uses - * the OpenAI SDK (`client.responses.create`), which covers any provider - * whose endpoint accepts the OpenAI SDK verbatim. - * - * Override in subclasses whose SDK has a different call shape — for - * example `@openrouter/sdk` exposes `client.beta.responses.send - * ({ responsesRequest })` with camelCase fields. The override is - * responsible for converting the params shape on the way in and returning - * an object structurally compatible with `OpenAI_SDK.Responses.Response` - * (the base only reads documented fields like `response.output[…]`). + * Performs the non-streaming Responses API network call. Subclasses + * implement against whatever SDK or HTTP client they bridge to. Must + * return a value structurally compatible with `Response` — the base reads + * documented fields like `response.output[...]`. */ - protected async callResponse( + protected abstract callResponse( params: OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, requestOptions: ReturnType, - ): Promise { - return this.client.responses.create(params, requestOptions) - } + ): Promise /** - * Performs the streaming Responses API network call. Same pattern as - * {@link callResponse} — default uses the OpenAI SDK; override for - * providers whose SDK exposes a different streaming entry point. Returns - * an `AsyncIterable` because the base's - * {@link processStreamChunks} only needs structural iteration over events. + * Performs the streaming Responses API network call. Returns an + * `AsyncIterable`; the base's `processStreamChunks` + * only needs structural iteration over events. */ - protected async callResponseStream( + protected abstract callResponseStream( params: OpenAI_SDK.Responses.ResponseCreateParamsStreaming, requestOptions: ReturnType, - ): Promise> { - return this.client.responses.create(params, requestOptions) - } + ): Promise> /** * Extract text content from a non-streaming Responses API response. @@ -317,17 +304,29 @@ export class OpenAICompatibleResponsesTextAdapter< ): string { let textContent = '' let refusal: string | undefined + let sawMessageItem = false + const observedItemTypes = new Set() for (const item of response.output) { + observedItemTypes.add(item.type) if (item.type === 'message') { + sawMessageItem = true for (const part of item.content) { - if (part.type === 'output_text') { - textContent += part.text + // Cast off the discriminated union before the type discrimination + // so future SDK variants (e.g. `output_audio`, `output_image`) hit + // the explicit error path rather than being misreported as refusals + // when they get added to the union. Mirrors the streaming side's + // handleContentPart. + const partType = (part as { type: string }).type + if (partType === 'output_text') { + textContent += (part as { text?: string }).text ?? '' + } else if (partType === 'refusal') { + const refusalText = (part as { refusal?: string }).refusal + refusal = refusalText || refusal || 'Refused without explanation' } else { - // The Responses SDK currently models message content as - // `output_text | refusal`, so the only non-text branch is a - // refusal. Capture it so we can surface a distinct error below. - refusal = part.refusal || refusal || 'Refused without explanation' + throw new Error( + `${this.name}.extractTextFromResponse: unsupported message content part type "${partType}"`, + ) } } } @@ -342,6 +341,16 @@ export class OpenAICompatibleResponsesTextAdapter< throw err } + // Response had items but none carried message text (e.g. only + // function_call or reasoning items). Surface that explicitly so a + // downstream structured-output caller doesn't see a misleading + // "Failed to parse JSON. Content: " from an empty string. + if (!textContent && response.output.length > 0 && !sawMessageItem) { + throw new Error( + `${this.name}.extractTextFromResponse: response.output contained items of type(s) [${[...observedItemTypes].sort().join(', ')}] but no message text — the model returned a non-text response`, + ) + } + return textContent } @@ -364,7 +373,13 @@ export class OpenAICompatibleResponsesTextAdapter< stream: AsyncIterable, toolCallMetadata: Map< string, - { index: number; name: string; started: boolean } + { + index: number + name: string + started: boolean + ended?: boolean + pendingArguments?: string + } >, options: TextOptions, aguiState: { @@ -724,6 +739,40 @@ export class OpenAICompatibleResponsesTextAdapter< continue } + // Upstreams that emit `content_part.done` without any preceding + // deltas (or `content_part.added`) still need a START event before + // CONTENT — otherwise consumers tracking start/end pairs see content + // without a start and never see an end. Emit the lifecycle opener + // for whichever stream this content_part belongs to before yielding + // the CONTENT chunk; the post-loop block emits the matching END. + if ( + contentPart.type === 'output_text' && + !hasEmittedTextMessageStart + ) { + hasEmittedTextMessageStart = true + yield { + type: EventType.TEXT_MESSAGE_START, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + role: 'assistant', + } satisfies StreamChunk + } else if ( + contentPart.type === 'reasoning_text' && + !hasEmittedStepStarted + ) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + // Only emit if we haven't been streaming deltas (e.g., for non-streaming responses) const doneChunk = handleContentPart(contentPart) yield doneChunk @@ -738,27 +787,35 @@ export class OpenAICompatibleResponsesTextAdapter< const item = chunk.item if (item.type === 'function_call' && item.id) { const existing = toolCallMetadata.get(item.id) - // Only emit TOOL_CALL_START on the FIRST output_item.added for - // an item id. A duplicate emission (which can happen on retried - // streams or replay) would violate AG-UI's start-once contract. - if (!existing?.started) { - if (!existing) { - toolCallMetadata.set(item.id, { - index: chunk.output_index, - name: item.name || '', - started: false, - }) - } + // Track the item as soon as we see it so subsequent arg deltas + // aren't logged as orphans, but only emit TOOL_CALL_START when + // both id AND name are populated. Emitting START with an empty + // name would propagate into TOOL_CALL_END (which reads the same + // metadata) and route the tool call to whatever name happens to + // match `''` downstream — a silent misroute. + if (!existing) { + toolCallMetadata.set(item.id, { + index: chunk.output_index, + name: item.name || '', + started: false, + }) + } else if (!existing.name && item.name) { + // A later output_item.added for the same id finally carries + // the name. Update so the gated emission below can fire. + existing.name = item.name + } + const metadata = toolCallMetadata.get(item.id)! + if (!metadata.started && metadata.name) { yield { type: EventType.TOOL_CALL_START, toolCallId: item.id, - toolCallName: item.name || '', - toolName: item.name || '', + toolCallName: metadata.name, + toolName: metadata.name, model: model || options.model, timestamp: Date.now(), index: chunk.output_index, } satisfies StreamChunk - toolCallMetadata.get(item.id)!.started = true + metadata.started = true } } } @@ -805,13 +862,19 @@ export class OpenAICompatibleResponsesTextAdapter< // Get the function name from metadata (captured in output_item.added) const metadata = toolCallMetadata.get(item_id) - // Skip TOOL_CALL_END for items whose start was never emitted (no - // matching `output_item.added`). Emitting END without START would - // produce an unbalanced AG-UI lifecycle event downstream consumers - // can't pair. + // If the matching START was never emitted (the upstream sent an + // `output_item.added` without a name and no later event has filled + // it in yet), defer END until `output_item.done` or + // `response.completed` can backfill the name. We stash the raw + // arguments so the late emission has them. Emitting END without + // START would produce an unbalanced AG-UI lifecycle event + // downstream consumers can't pair. if (!metadata?.started) { + if (metadata) { + metadata.pendingArguments = chunk.arguments + } options.logger.errors( - `${this.name}.processStreamChunks orphan function_call_arguments.done`, + `${this.name}.processStreamChunks deferring function_call_arguments.done — TOOL_CALL_START not yet emitted (waiting for name)`, { source: `${this.name}.processStreamChunks`, toolCallId: item_id, @@ -820,7 +883,12 @@ export class OpenAICompatibleResponsesTextAdapter< ) continue } + // The output_item.done backstop may have already emitted END (when + // it arrived before args.done with a populated item.arguments). + // Skip so we never produce a duplicate close for the same id. + if (metadata.ended) continue const name = metadata.name || '' + metadata.ended = true // Parse arguments. Surface parse failures via the logger so a // model emitting malformed JSON is debuggable instead of silently @@ -859,7 +927,158 @@ export class OpenAICompatibleResponsesTextAdapter< } satisfies StreamChunk } + // `output_item.done` is the last point at which a function_call's + // name is guaranteed to be on the wire — it carries the fully-formed + // ResponseFunctionToolCall. Use it as a backstop to recover any + // tool call whose name was missing from `output_item.added` (and + // whose START + END therefore never fired). + if (chunk.type === 'response.output_item.done') { + const item = chunk.item + if (item.type === 'function_call' && item.id) { + const metadata = toolCallMetadata.get(item.id) ?? { + index: chunk.output_index, + name: item.name || '', + started: false, + } + if (!toolCallMetadata.has(item.id)) { + toolCallMetadata.set(item.id, metadata) + } else if (!metadata.name && item.name) { + metadata.name = item.name + } + // Emit gated START if we now have a name and never started. + if (!metadata.started && metadata.name) { + yield { + type: EventType.TOOL_CALL_START, + toolCallId: item.id, + toolCallName: metadata.name, + toolName: metadata.name, + model: model || options.model, + timestamp: Date.now(), + index: metadata.index, + } satisfies StreamChunk + metadata.started = true + } + // Emit END if we have args (either from a previously-deferred + // args.done OR from item.arguments) and haven't already ended. + const rawArgs = + typeof item.arguments === 'string' && item.arguments.length > 0 + ? item.arguments + : metadata.pendingArguments + if (metadata.started && !metadata.ended && rawArgs !== undefined) { + const name = metadata.name || '' + let parsedInput: unknown = {} + if (rawArgs) { + try { + const parsed = JSON.parse(rawArgs) + parsedInput = + parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (output_item.done backfill)`, + { + error: toRunErrorPayload( + parseError, + `tool ${name} (${item.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: item.id, + toolName: name, + rawArguments: rawArgs, + }, + ) + parsedInput = {} + } + } + yield { + type: EventType.TOOL_CALL_END, + toolCallId: item.id, + toolCallName: name, + toolName: name, + model: model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + metadata.ended = true + metadata.pendingArguments = undefined + } + } + } + if (chunk.type === 'response.completed') { + // Final backstop for function_call lifecycle: if a function_call + // appears in `response.output[]` but was never matched by an + // output_item.added/done with a name, recover the missing START + // (and END if args were pending). Without this, a tool call could + // be silently dropped from the AG-UI stream while `hasFunctionCalls` + // below still routes the run's finishReason to 'tool_calls' — + // leaving consumers waiting for tool results they never saw start. + for (const item of chunk.response.output) { + if (item.type !== 'function_call' || !item.id) continue + const metadata = toolCallMetadata.get(item.id) ?? { + index: 0, + name: item.name || '', + started: false, + } + if (!toolCallMetadata.has(item.id)) { + toolCallMetadata.set(item.id, metadata) + } else if (!metadata.name && item.name) { + metadata.name = item.name + } + if (!metadata.started && metadata.name) { + yield { + type: EventType.TOOL_CALL_START, + toolCallId: item.id, + toolCallName: metadata.name, + toolName: metadata.name, + model: model || options.model, + timestamp: Date.now(), + index: metadata.index, + } satisfies StreamChunk + metadata.started = true + } + const rawArgs = + typeof item.arguments === 'string' && item.arguments.length > 0 + ? item.arguments + : metadata.pendingArguments + if (metadata.started && !metadata.ended) { + const name = metadata.name || '' + let parsedInput: unknown = {} + if (rawArgs) { + try { + const parsed = JSON.parse(rawArgs) + parsedInput = + parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (response.completed backfill)`, + { + error: toRunErrorPayload( + parseError, + `tool ${name} (${item.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: item.id, + toolName: name, + rawArguments: rawArgs, + }, + ) + parsedInput = {} + } + } + yield { + type: EventType.TOOL_CALL_END, + toolCallId: item.id, + toolCallName: name, + toolName: name, + model: model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + metadata.ended = true + metadata.pendingArguments = undefined + } + } + // Emit TEXT_MESSAGE_END if we had text content if (hasEmittedTextMessageStart) { yield { diff --git a/packages/typescript/ai-openai-compatible/src/adapters/transcription.ts b/packages/typescript/ai-openai-compatible/src/adapters/transcription.ts deleted file mode 100644 index 702dc6479..000000000 --- a/packages/typescript/ai-openai-compatible/src/adapters/transcription.ts +++ /dev/null @@ -1,194 +0,0 @@ -import { BaseTranscriptionAdapter } from '@tanstack/ai/adapters' -import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' -import { base64ToArrayBuffer, generateId } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' -import type { - TranscriptionOptions, - TranscriptionResult, - TranscriptionSegment, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' -import type { OpenAICompatibleClientConfig } from '../types/config' - -/** - * OpenAI-Compatible Transcription (Speech-to-Text) Adapter - * - * A generalized base class for providers that implement OpenAI-compatible audio - * transcription APIs. Providers can extend this class and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override audio handling or response mapping methods as needed - * - * All methods that handle audio input or map response formats are `protected` - * so subclasses can override them. - */ -export class OpenAICompatibleTranscriptionAdapter< - TModel extends string, - TProviderOptions extends object = Record, -> extends BaseTranscriptionAdapter { - readonly name: string - - protected client: OpenAI_SDK - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible', - ) { - super(model, {}) - this.name = name - this.client = createOpenAICompatibleClient(config) - } - - async transcribe( - options: TranscriptionOptions, - ): Promise { - const { model, audio, language, prompt, responseFormat, modelOptions } = - options - - // Convert audio input to File object - const file = this.prepareAudioFile(audio) - - // Build request - const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { - model, - file, - language, - prompt, - response_format: this.mapResponseFormat(responseFormat), - ...modelOptions, - } - - // Call API - use verbose_json to get timestamps when available - const useVerbose = - responseFormat === 'verbose_json' || - (!responseFormat && this.shouldDefaultToVerbose(model)) - - try { - options.logger.request( - `activity=transcription provider=${this.name} model=${model} verbose=${useVerbose}`, - { provider: this.name, model }, - ) - if (useVerbose) { - const response = await this.client.audio.transcriptions.create({ - ...request, - response_format: 'verbose_json', - }) - - return { - id: generateId(this.name), - model, - text: response.text, - language: response.language, - duration: response.duration, - segments: response.segments?.map( - (seg): TranscriptionSegment => ({ - id: seg.id, - start: seg.start, - end: seg.end, - text: seg.text, - // The OpenAI SDK types `avg_logprob` as `number`, so call Math.exp - // directly. Previously this was guarded with `seg.avg_logprob ?` - // which treated `0` (perfect-confidence) as missing. - confidence: Math.exp(seg.avg_logprob), - }), - ), - words: response.words?.map((w) => ({ - word: w.word, - start: w.start, - end: w.end, - })), - } - } else { - const response = await this.client.audio.transcriptions.create(request) - - return { - id: generateId(this.name), - model, - text: typeof response === 'string' ? response : response.text, - language, - } - } - } catch (error: unknown) { - // Narrow before logging: raw SDK errors can carry request metadata - // (including auth headers) which we must never surface to user loggers. - options.logger.errors(`${this.name}.transcribe fatal`, { - error: toRunErrorPayload(error, `${this.name}.transcribe failed`), - source: `${this.name}.transcribe`, - }) - throw error - } - } - - protected prepareAudioFile(audio: string | File | Blob | ArrayBuffer): File { - // If already a File, return it - if (typeof File !== 'undefined' && audio instanceof File) { - return audio - } - - // If Blob, convert to File - if (typeof Blob !== 'undefined' && audio instanceof Blob) { - this.ensureFileSupport() - return new File([audio], 'audio.mp3', { - type: audio.type || 'audio/mpeg', - }) - } - - // If ArrayBuffer, convert to File - if (typeof ArrayBuffer !== 'undefined' && audio instanceof ArrayBuffer) { - this.ensureFileSupport() - return new File([audio], 'audio.mp3', { type: 'audio/mpeg' }) - } - - // If base64 string, decode and convert to File - if (typeof audio === 'string') { - this.ensureFileSupport() - - // Check if it's a data URL - if (audio.startsWith('data:')) { - const parts = audio.split(',') - const header = parts[0] - const base64Data = parts[1] || '' - const mimeMatch = header?.match(/data:([^;]+)/) - const mimeType = mimeMatch?.[1] || 'audio/mpeg' - const bytes = base64ToArrayBuffer(base64Data) - const extension = mimeType.split('/')[1] || 'mp3' - return new File([bytes], `audio.${extension}`, { type: mimeType }) - } - - // Assume raw base64 - const bytes = base64ToArrayBuffer(audio) - return new File([bytes], 'audio.mp3', { type: 'audio/mpeg' }) - } - - throw new Error('Invalid audio input type') - } - - /** - * Checks that the global `File` constructor is available. - * Throws a descriptive error in environments that lack it (e.g. Node < 20). - */ - private ensureFileSupport(): void { - if (typeof File === 'undefined') { - throw new Error( - '`File` is not available in this environment. ' + - 'Use Node.js 20 or newer, or pass a File object directly.', - ) - } - } - - /** - * Whether the adapter should default to verbose_json when no response format is specified. - * Override in provider-specific subclasses for model-specific behavior. - */ - protected shouldDefaultToVerbose(_model: string): boolean { - return false - } - - protected mapResponseFormat( - format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt', - ): OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] { - if (!format) return 'json' - return format as OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] - } -} diff --git a/packages/typescript/ai-openai-compatible/src/adapters/tts.ts b/packages/typescript/ai-openai-compatible/src/adapters/tts.ts deleted file mode 100644 index b61c9c095..000000000 --- a/packages/typescript/ai-openai-compatible/src/adapters/tts.ts +++ /dev/null @@ -1,124 +0,0 @@ -import { BaseTTSAdapter } from '@tanstack/ai/adapters' -import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' -import { arrayBufferToBase64, generateId } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' -import type { TTSOptions, TTSResult } from '@tanstack/ai' -import type OpenAI_SDK from 'openai' -import type { OpenAICompatibleClientConfig } from '../types/config' - -/** - * OpenAI-Compatible Text-to-Speech Adapter - * - * A generalized base class for providers that implement OpenAI-compatible TTS APIs. - * Providers can extend this class and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override validation methods or request building for provider-specific constraints - * - * All methods that validate inputs or build requests are `protected` so subclasses - * can override them. - */ -export class OpenAICompatibleTTSAdapter< - TModel extends string, - TProviderOptions extends object = Record, -> extends BaseTTSAdapter { - readonly name: string - - protected client: OpenAI_SDK - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible', - ) { - super(model, {}) - this.name = name - this.client = createOpenAICompatibleClient(config) - } - - async generateSpeech( - options: TTSOptions, - ): Promise { - const { model, text, voice, format, speed, modelOptions } = options - - // Validate inputs - this.validateAudioInput(text) - this.validateSpeed(speed) - this.validateInstructions(model, modelOptions) - - // Build request - const request: OpenAI_SDK.Audio.SpeechCreateParams = { - model, - input: text, - voice: (voice || 'alloy') as OpenAI_SDK.Audio.SpeechCreateParams['voice'], - response_format: format, - speed, - ...modelOptions, - } - - try { - options.logger.request( - `activity=tts provider=${this.name} model=${model} format=${request.response_format ?? 'default'} voice=${request.voice}`, - { provider: this.name, model }, - ) - const response = await this.client.audio.speech.create(request) - - // Convert response to base64. Buffer is Node-only; use atob fallback in - // browser/edge runtimes where the SDK can run. - const arrayBuffer = await response.arrayBuffer() - const base64 = arrayBufferToBase64(arrayBuffer) - - const outputFormat = (request.response_format as string) || 'mp3' - const contentType = this.getContentType(outputFormat) - - return { - id: generateId(this.name), - model, - audio: base64, - format: outputFormat, - contentType, - } - } catch (error: unknown) { - // Narrow before logging: raw SDK errors can carry request metadata - // (including auth headers) which we must never surface to user loggers. - options.logger.errors(`${this.name}.generateSpeech fatal`, { - error: toRunErrorPayload(error, `${this.name}.generateSpeech failed`), - source: `${this.name}.generateSpeech`, - }) - throw error - } - } - - protected validateAudioInput(text: string): void { - if (text.length > 4096) { - throw new Error('Input text exceeds maximum length of 4096 characters.') - } - } - - protected validateSpeed(speed?: number): void { - if (speed !== undefined) { - if (speed < 0.25 || speed > 4.0) { - throw new Error('Speed must be between 0.25 and 4.0.') - } - } - } - - protected validateInstructions( - _model: string, - _modelOptions?: TProviderOptions, - ): void { - // Default: no instructions validation — subclasses can override - } - - protected getContentType(format: string): string { - const contentTypes: Record = { - mp3: 'audio/mpeg', - opus: 'audio/opus', - aac: 'audio/aac', - flac: 'audio/flac', - wav: 'audio/wav', - pcm: 'audio/pcm', - } - return contentTypes[format] || 'audio/mpeg' - } -} diff --git a/packages/typescript/ai-openai-compatible/src/adapters/video.ts b/packages/typescript/ai-openai-compatible/src/adapters/video.ts deleted file mode 100644 index 8aaf1ad77..000000000 --- a/packages/typescript/ai-openai-compatible/src/adapters/video.ts +++ /dev/null @@ -1,385 +0,0 @@ -import { BaseVideoAdapter } from '@tanstack/ai/adapters' -import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' -import { arrayBufferToBase64 } from '@tanstack/ai-utils' -import { createOpenAICompatibleClient } from '../utils/client' -import type { - VideoGenerationOptions, - VideoJobResult, - VideoStatusResult, - VideoUrlResult, -} from '@tanstack/ai' -import type OpenAI_SDK from 'openai' -import type { OpenAICompatibleClientConfig } from '../types/config' - -/** - * Threshold for emitting a "this download will probably OOM serverless - * runtimes" warning. Anything larger than this (in bytes) gets surfaced via - * console.warn — workers and small isolates routinely run out of memory once - * a downloaded video is base64-encoded (the encoded form is ~33% larger and - * resides in V8 heap rather than streaming through the runtime's network - * layer). - */ -const LARGE_MEDIA_BUFFER_BYTES = 10 * 1024 * 1024 - -function warnIfLargeMediaBuffer( - byteLength: number, - source: string, - providerName: string, -): void { - if (byteLength <= LARGE_MEDIA_BUFFER_BYTES) return - // No InternalLogger plumbed through to these download paths yet; surface - // via console.warn so Workers / Lambda dashboards still capture it. - console.warn( - `[${providerName}.${source}] downloaded ${(byteLength / 1024 / 1024).toFixed(1)} MiB into memory before base64 encoding. ` + - `Workers/serverless runtimes commonly run out of memory above ~10 MiB. ` + - `Consider streaming the video through a CDN or your own storage layer instead.`, - ) -} - -/** - * OpenAI-Compatible Video Generation Adapter - * - * A generalized base class for providers that implement OpenAI-compatible video - * generation APIs. Uses a job/polling architecture for async video generation. - * - * Providers can extend this class and only need to: - * - Set `baseURL` in the config - * - Lock the generic type parameters to provider-specific types - * - Override validation or request building methods as needed - * - * All methods that validate inputs, build requests, or map responses are `protected` - * so subclasses can override them. - * - * @experimental Video generation is an experimental feature and may change. - */ -export class OpenAICompatibleVideoAdapter< - TModel extends string, - TProviderOptions extends object = Record, - TModelProviderOptionsByName extends Record = Record, - TModelSizeByName extends Record = Record, -> extends BaseVideoAdapter< - TModel, - TProviderOptions, - TModelProviderOptionsByName, - TModelSizeByName -> { - readonly name: string - - protected client: OpenAI_SDK - protected clientConfig: OpenAICompatibleClientConfig - - constructor( - config: OpenAICompatibleClientConfig, - model: TModel, - name: string = 'openai-compatible', - ) { - super(config, model) - this.name = name - this.clientConfig = config - this.client = createOpenAICompatibleClient(config) - } - - /** - * Create a new video generation job. - * - * @experimental Video generation is an experimental feature and may change. - */ - async createVideoJob( - options: VideoGenerationOptions, - ): Promise { - const { model, size, duration, modelOptions } = options - - // Validate inputs - this.validateVideoSize(model, size) - const seconds = duration ?? (modelOptions as any)?.seconds - this.validateVideoSeconds(model, seconds) - - // Build request - const request = this.buildRequest(options) - - try { - options.logger.request( - `activity=video.create provider=${this.name} model=${model} size=${request.size ?? 'default'} seconds=${request.seconds ?? 'default'}`, - { provider: this.name, model }, - ) - // The video API on the OpenAI SDK is still experimental and shipped on - // some SDK versions but not others; access through `videosClient` lets - // subclasses override the entry point or supply a polyfill without - // forcing every call site through `as any`. - const videosClient = this.getVideosClient() - const response = await videosClient.create(request) - - return { - jobId: response.id, - model, - } - } catch (error: any) { - options.logger.errors(`${this.name}.createVideoJob fatal`, { - error: toRunErrorPayload(error, `${this.name}.createVideoJob failed`), - source: `${this.name}.createVideoJob`, - }) - if (error?.message?.includes('videos') || error?.code === 'invalid_api') { - throw new Error( - `Video generation API is not available. The API may require special access. ` + - `Original error: ${error.message}`, - ) - } - throw error - } - } - - /** - * Returns the underlying OpenAI Videos resource. Pulled out as a protected - * accessor so subclasses targeting forks of the SDK can swap the access - * path without forcing each call site to cast through `any`. - */ - protected getVideosClient(): { - create: (req: Record) => Promise<{ id: string }> - retrieve: (id: string) => Promise<{ - id: string - status: string - progress?: number - url?: string - expires_at?: number - error?: { message?: string } - }> - downloadContent?: (id: string) => Promise - content?: (id: string) => Promise - getContent?: (id: string) => Promise - download?: (id: string) => Promise - } { - return (this.client as unknown as { videos: any }).videos - } - - /** - * Get the current status of a video generation job. - * - * @experimental Video generation is an experimental feature and may change. - */ - async getVideoStatus(jobId: string): Promise { - try { - const videosClient = this.getVideosClient() - const response = await videosClient.retrieve(jobId) - - return { - jobId, - status: this.mapStatus(response.status), - progress: response.progress, - error: response.error?.message, - } - } catch (error: any) { - if (error.status === 404) { - return { - jobId, - status: 'failed', - error: 'Job not found', - } - } - throw error - } - } - - /** - * Get the URL to download/view the generated video. - * - * @experimental Video generation is an experimental feature and may change. - */ - async getVideoUrl(jobId: string): Promise { - try { - const videosClient = this.getVideosClient() - - // Prefer retrieve() because many openai-compatible backends (and the - // aimock test harness) return the URL directly on the video resource - // and do not implement a separate /content endpoint. Subclasses can - // override this method if they need to download raw bytes via - // downloadContent()/content(). - const videoInfo = await videosClient.retrieve(jobId) - if (videoInfo.url) { - return { - jobId, - url: videoInfo.url, - expiresAt: videoInfo.expires_at - ? new Date(videoInfo.expires_at) - : undefined, - } - } - - // SDK download fall-through: try the various possible method names in - // decreasing order of modernity. - if (typeof videosClient.downloadContent === 'function') { - const contentResponse = await videosClient.downloadContent(jobId) - const videoBlob = await contentResponse.blob() - const buffer = await videoBlob.arrayBuffer() - warnIfLargeMediaBuffer( - buffer.byteLength, - 'video.downloadContent', - this.name, - ) - const base64 = arrayBufferToBase64(buffer) - const mimeType = - contentResponse.headers.get('content-type') || 'video/mp4' - return { - jobId, - url: `data:${mimeType};base64,${base64}`, - expiresAt: undefined, - } - } - - // The remaining SDK fall-throughs all return a binary payload - // (Blob/Response/ArrayBuffer-shaped), NOT an `{ url, expires_at }` - // object the way the bottom return assumed. Convert to a data URL - // here so the caller actually receives a usable URL. - let response: any - if (typeof videosClient.content === 'function') { - response = await videosClient.content(jobId) - } else if (typeof videosClient.getContent === 'function') { - response = await videosClient.getContent(jobId) - } else if (typeof videosClient.download === 'function') { - response = await videosClient.download(jobId) - } else { - // Last resort: raw fetch with auth header. - const baseUrl = this.clientConfig.baseURL || 'https://api.openai.com/v1' - const apiKey = this.clientConfig.apiKey - - const contentResponse = await fetch( - `${baseUrl}/videos/${jobId}/content`, - { - method: 'GET', - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }, - ) - - if (!contentResponse.ok) { - const contentType = contentResponse.headers.get('content-type') - if (contentType?.includes('application/json')) { - const errorData = await contentResponse.json().catch(() => ({})) - throw new Error( - errorData.error?.message || - `Failed to get video content: ${contentResponse.status}`, - ) - } - throw new Error( - `Failed to get video content: ${contentResponse.status}`, - ) - } - - const videoBlob = await contentResponse.blob() - const buffer = await videoBlob.arrayBuffer() - warnIfLargeMediaBuffer(buffer.byteLength, 'video.fetch', this.name) - const base64 = arrayBufferToBase64(buffer) - const mimeType = - contentResponse.headers.get('content-type') || 'video/mp4' - - return { - jobId, - url: `data:${mimeType};base64,${base64}`, - expiresAt: undefined, - } - } - - // The fall-through SDK methods produce a Blob-ish or fetch-`Response`-ish - // object. Read it as bytes and wrap in a data URL so callers see an - // actual playable URL instead of the API endpoint URL (which is what - // `response.url` would be on a fetch Response). - const fallthroughBlob = - typeof response?.blob === 'function' - ? await response.blob() - : response instanceof Blob - ? response - : null - if (!fallthroughBlob) { - throw new Error( - `Video content download via SDK fall-through returned an unexpected shape (no blob()). ` + - `Override getVideoUrl() in your subclass to handle this provider.`, - ) - } - const fallthroughBuffer = await fallthroughBlob.arrayBuffer() - warnIfLargeMediaBuffer( - fallthroughBuffer.byteLength, - 'video.sdkFallthrough', - this.name, - ) - const fallthroughBase64 = arrayBufferToBase64(fallthroughBuffer) - const fallthroughMime = - (typeof response?.headers?.get === 'function' - ? response.headers.get('content-type') - : undefined) || - fallthroughBlob.type || - 'video/mp4' - return { - jobId, - url: `data:${fallthroughMime};base64,${fallthroughBase64}`, - expiresAt: undefined, - } - } catch (error: any) { - if (error.status === 404) { - throw new Error(`Video job not found: ${jobId}`) - } - if (error.status === 400) { - throw new Error( - `Video is not ready for download. Check status first. Job ID: ${jobId}`, - ) - } - throw error - } - } - - protected buildRequest( - options: VideoGenerationOptions, - ): Record { - const { model, prompt, size, duration, modelOptions } = options - - const request: Record = { - model, - prompt, - } - - if (size) { - request['size'] = size - } else if ((modelOptions as any)?.size) { - request['size'] = (modelOptions as any).size - } - - const seconds = duration ?? (modelOptions as any)?.seconds - if (seconds !== undefined) { - request['seconds'] = String(seconds) - } - - return request - } - - protected validateVideoSize(_model: string, _size?: string): void { - // Default: no size validation — subclasses can override - } - - protected validateVideoSeconds( - _model: string, - _seconds?: number | string, - ): void { - // Default: no duration validation — subclasses can override - } - - protected mapStatus( - apiStatus: string, - ): 'pending' | 'processing' | 'completed' | 'failed' { - switch (apiStatus) { - case 'queued': - case 'pending': - return 'pending' - case 'processing': - case 'in_progress': - return 'processing' - case 'completed': - case 'succeeded': - return 'completed' - case 'failed': - case 'error': - case 'cancelled': - return 'failed' - default: - return 'processing' - } - } -} diff --git a/packages/typescript/ai-openai-compatible/src/index.ts b/packages/typescript/ai-openai-compatible/src/index.ts index 22b3f429a..371bbc0b7 100644 --- a/packages/typescript/ai-openai-compatible/src/index.ts +++ b/packages/typescript/ai-openai-compatible/src/index.ts @@ -1,11 +1,10 @@ export { makeStructuredOutputCompatible } from './utils/schema-converter' -export { createOpenAICompatibleClient } from './utils/client' -export type { OpenAICompatibleClientConfig } from './types/config' export * from './tools/index' export { OpenAICompatibleChatCompletionsTextAdapter } from './adapters/chat-completions-text' -// Re-export the OpenAI SDK types subclasses need when overriding the -// `callChatCompletion*` / `processStreamChunks` hooks, so they don't need -// to declare `openai` as a direct dependency. +// Re-export the OpenAI SDK types subclasses need when implementing the +// `callChatCompletion*` hooks. Type-only — `openai` is an optional peer in +// this package, so consumers that use these types must declare `openai` +// in their own deps (or devDeps if they only need types). export type { ChatCompletion, ChatCompletionChunk, @@ -18,10 +17,7 @@ export { type ChatCompletionFunctionTool, } from './adapters/chat-completions-tool-converter' export { OpenAICompatibleResponsesTextAdapter } from './adapters/responses-text' -// Re-export the OpenAI Responses SDK types subclasses need when overriding -// the `callResponse*` / `processStreamChunks` / `extractTextFromResponse` -// hooks, so subclass packages don't need to declare `openai` as a direct -// dependency. +// Type-only re-exports for subclasses implementing the `callResponse*` hooks. export type { Response as ResponsesResponse, ResponseCreateParams, @@ -35,11 +31,3 @@ export { convertToolsToResponsesFormat, type ResponsesFunctionTool, } from './adapters/responses-tool-converter' -export { OpenAICompatibleImageAdapter } from './adapters/image' -export { - OpenAICompatibleSummarizeAdapter, - type ChatStreamCapable, -} from './adapters/summarize' -export { OpenAICompatibleTranscriptionAdapter } from './adapters/transcription' -export { OpenAICompatibleTTSAdapter } from './adapters/tts' -export { OpenAICompatibleVideoAdapter } from './adapters/video' diff --git a/packages/typescript/ai-openai-compatible/src/types/config.ts b/packages/typescript/ai-openai-compatible/src/types/config.ts deleted file mode 100644 index 976336b42..000000000 --- a/packages/typescript/ai-openai-compatible/src/types/config.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { ClientOptions } from 'openai' - -export interface OpenAICompatibleClientConfig extends ClientOptions { - apiKey: string -} diff --git a/packages/typescript/ai-openai-compatible/src/utils/client.ts b/packages/typescript/ai-openai-compatible/src/utils/client.ts deleted file mode 100644 index 8dd54b2fc..000000000 --- a/packages/typescript/ai-openai-compatible/src/utils/client.ts +++ /dev/null @@ -1,8 +0,0 @@ -import OpenAI from 'openai' -import type { OpenAICompatibleClientConfig } from '../types/config' - -export function createOpenAICompatibleClient( - config: OpenAICompatibleClientConfig, -): OpenAI { - return new OpenAI(config) -} diff --git a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts b/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts index 7eac9c634..4346e4770 100644 --- a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts +++ b/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts @@ -8,18 +8,30 @@ const testLogger = resolveDebugOption(false) // Declare mockCreate at module level let mockCreate: ReturnType -// Mock the OpenAI SDK -vi.mock('openai', () => { - return { - default: class { - chat = { - completions: { - create: (...args: Array) => mockCreate(...args), - }, - } - }, +/** + * Concrete test subclass — the base is abstract, so tests need a class that + * implements `callChatCompletion*`. The hooks delegate to `mockCreate` so + * each test can configure the SDK response without spinning up a real client. + * The constructor accepts (config, model, name) so test call sites read + * naturally; config is ignored since the base no longer constructs a client. + */ +class TestChatCompletionsAdapter extends OpenAICompatibleChatCompletionsTextAdapter { + constructor(_config: unknown, model: string, name?: string) { + super(model, name) } -}) + protected async callChatCompletion( + params: any, + requestOptions: any, + ): Promise { + return mockCreate(params, requestOptions) + } + protected async callChatCompletionStream( + params: any, + requestOptions: any, + ): Promise { + return mockCreate(params, requestOptions) + } +} // Helper to create async iterable from chunks function createAsyncIterable(chunks: Array): AsyncIterable { @@ -72,7 +84,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { describe('instantiation', () => { it('creates an adapter with default name', () => { - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -84,7 +96,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { }) it('creates an adapter with custom name', () => { - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', 'my-provider', @@ -95,7 +107,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { }) it('creates an adapter with custom baseURL', () => { - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( { apiKey: 'test-key', baseURL: 'https://custom.api.example.com/v1', @@ -139,7 +151,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -190,7 +202,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -252,7 +264,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -322,7 +334,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -395,7 +407,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -491,7 +503,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -565,7 +577,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { mockCreate = vi.fn().mockResolvedValue(errorIterable) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -590,7 +602,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { it('emits RUN_STARTED then RUN_ERROR when client.create throws', async () => { mockCreate = vi.fn().mockRejectedValue(new Error('API key invalid')) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -628,7 +640,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -677,7 +689,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -716,7 +728,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -745,7 +757,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { } setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -770,7 +782,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { } setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -820,7 +832,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient(streamChunks) const errorsSpy = vi.spyOn(testLogger, 'errors') - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -852,12 +864,14 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { describe('subclassing', () => { it('allows subclassing with custom name', () => { class MyProviderAdapter extends OpenAICompatibleChatCompletionsTextAdapter { - constructor(apiKey: string, model: string) { - super( - { apiKey, baseURL: 'https://my-provider.com/v1' }, - model, - 'my-provider', - ) + constructor(_apiKey: string, model: string) { + super(model, 'my-provider') + } + protected async callChatCompletion(): Promise { + throw new Error('not called in this test') + } + protected async callChatCompletionStream(): Promise { + throw new Error('not called in this test') } } @@ -885,7 +899,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -926,7 +940,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -957,7 +971,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) @@ -997,7 +1011,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new OpenAICompatibleChatCompletionsTextAdapter( + const adapter = new TestChatCompletionsAdapter( testConfig, 'test-model', ) diff --git a/packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts b/packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts deleted file mode 100644 index d49dbf222..000000000 --- a/packages/typescript/ai-openai-compatible/tests/media-adapters.test.ts +++ /dev/null @@ -1,367 +0,0 @@ -/** - * Smoke tests for the OpenAI-compatible media adapters (image, summarize, - * transcription, TTS, video). Each test verifies the adapter instantiates, - * forwards arguments to the OpenAI SDK shape we expect, and surfaces errors - * via `logger.errors` / `RUN_ERROR` rather than swallowing them. The mocks - * stand in for the OpenAI SDK; the real SDK is exercised in the e2e suite. - */ -import { describe, expect, it, beforeEach, vi } from 'vitest' -import { resolveDebugOption } from '@tanstack/ai/adapter-internals' -import { OpenAICompatibleImageAdapter } from '../src/adapters/image' -import { OpenAICompatibleSummarizeAdapter } from '../src/adapters/summarize' -import { OpenAICompatibleTranscriptionAdapter } from '../src/adapters/transcription' -import { OpenAICompatibleTTSAdapter } from '../src/adapters/tts' -import { OpenAICompatibleVideoAdapter } from '../src/adapters/video' -import type { ChatStreamCapable } from '../src/adapters/summarize' -import type { StreamChunk } from '@tanstack/ai' - -const testLogger = resolveDebugOption(false) - -let mockImagesGenerate: ReturnType -let mockTranscriptionsCreate: ReturnType -let mockSpeechCreate: ReturnType -let mockVideosCreate: ReturnType -let mockVideosRetrieve: ReturnType - -vi.mock('openai', () => { - return { - default: class { - images = { - generate: (...args: Array) => mockImagesGenerate(...args), - } - audio = { - transcriptions: { - create: (...args: Array) => - mockTranscriptionsCreate(...args), - }, - speech: { - create: (...args: Array) => mockSpeechCreate(...args), - }, - } - videos = { - create: (...args: Array) => mockVideosCreate(...args), - retrieve: (...args: Array) => mockVideosRetrieve(...args), - } - }, - } -}) - -const config = { - apiKey: 'test-key', - baseURL: 'https://api.test-provider.com/v1', -} - -beforeEach(() => { - vi.clearAllMocks() - mockImagesGenerate = vi.fn() - mockTranscriptionsCreate = vi.fn() - mockSpeechCreate = vi.fn() - mockVideosCreate = vi.fn() - mockVideosRetrieve = vi.fn() -}) - -describe('OpenAICompatibleImageAdapter', () => { - it('forwards model, prompt, n, and size to images.generate', async () => { - mockImagesGenerate.mockResolvedValue({ - data: [{ url: 'https://example.com/img.png' }], - }) - - const adapter = new OpenAICompatibleImageAdapter(config, 'test-model') - const result = await adapter.generateImages({ - logger: testLogger, - model: 'test-model', - prompt: 'a cat', - numberOfImages: 2, - size: '1024x1024', - }) - - expect(mockImagesGenerate).toHaveBeenCalledWith( - expect.objectContaining({ - model: 'test-model', - prompt: 'a cat', - n: 2, - size: '1024x1024', - stream: false, - }), - ) - expect(result.images).toHaveLength(1) - expect(result.images[0]).toMatchObject({ - url: 'https://example.com/img.png', - }) - }) - - it('rejects invalid number of images via base validator', async () => { - const adapter = new OpenAICompatibleImageAdapter(config, 'test-model') - await expect( - adapter.generateImages({ - logger: testLogger, - model: 'test-model', - prompt: 'a cat', - numberOfImages: 0, - }), - ).rejects.toThrow('at least 1') - }) - - it('logs to errors and rethrows on SDK failure', async () => { - const errors = vi.fn() - // testLogger is a class instance — spreading drops prototype methods, so - // wrap with a Proxy that overrides `errors` and forwards everything else. - const logger = new Proxy(testLogger, { - get(target, key) { - if (key === 'errors') return errors - return Reflect.get(target, key) - }, - }) - mockImagesGenerate.mockRejectedValue(new Error('boom')) - - const adapter = new OpenAICompatibleImageAdapter(config, 'test-model') - await expect( - adapter.generateImages({ - logger, - model: 'test-model', - prompt: 'a cat', - }), - ).rejects.toThrow('boom') - expect(errors).toHaveBeenCalled() - }) -}) - -describe('OpenAICompatibleSummarizeAdapter', () => { - function fakeTextAdapter( - chunks: Array, - ): ChatStreamCapable> { - return { - async *chatStream() { - for (const c of chunks) { - yield c - } - }, - } - } - - it('accumulates content from TEXT_MESSAGE_CONTENT chunks', async () => { - const adapter = new OpenAICompatibleSummarizeAdapter( - fakeTextAdapter([ - { - type: 'TEXT_MESSAGE_CONTENT', - delta: 'Hello ', - messageId: 'm1', - model: 'test-model', - timestamp: 1, - } as unknown as StreamChunk, - { - type: 'TEXT_MESSAGE_CONTENT', - delta: 'world', - messageId: 'm1', - model: 'test-model', - timestamp: 2, - } as unknown as StreamChunk, - { - type: 'RUN_FINISHED', - runId: 'r1', - model: 'test-model', - timestamp: 3, - usage: { promptTokens: 10, completionTokens: 5, totalTokens: 15 }, - finishReason: 'stop', - } as unknown as StreamChunk, - ]), - 'test-model', - 'test-provider', - ) - - const result = await adapter.summarize({ - logger: testLogger, - model: 'test-model', - text: 'Long text to summarise.', - }) - - expect(result.summary).toBe('Hello world') - expect(result.usage).toEqual({ - promptTokens: 10, - completionTokens: 5, - totalTokens: 15, - }) - }) - - it('throws and logs when the underlying chatStream emits RUN_ERROR', async () => { - const errors = vi.fn() - // testLogger is a class instance — spreading drops prototype methods, so - // wrap with a Proxy that overrides `errors` and forwards everything else. - const logger = new Proxy(testLogger, { - get(target, key) { - if (key === 'errors') return errors - return Reflect.get(target, key) - }, - }) - - const adapter = new OpenAICompatibleSummarizeAdapter( - { - async *chatStream() { - yield { - type: 'RUN_ERROR', - runId: 'r1', - model: 'test-model', - timestamp: 1, - error: { message: 'upstream rate limit', code: 'rate_limited' }, - } as unknown as StreamChunk - }, - }, - 'test-model', - 'test-provider', - ) - - await expect( - adapter.summarize({ - logger, - model: 'test-model', - text: 'irrelevant', - }), - ).rejects.toThrow('upstream rate limit') - expect(errors).toHaveBeenCalled() - }) -}) - -describe('OpenAICompatibleTranscriptionAdapter', () => { - it('forwards model and language and returns text-only result for non-verbose formats', async () => { - mockTranscriptionsCreate.mockResolvedValue({ text: 'hello world' }) - - const adapter = new OpenAICompatibleTranscriptionAdapter( - config, - 'whisper-1', - ) - const result = await adapter.transcribe({ - logger: testLogger, - model: 'whisper-1', - audio: new Blob([new Uint8Array([1, 2, 3])], { type: 'audio/mpeg' }), - language: 'en', - responseFormat: 'json', - }) - - expect(mockTranscriptionsCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: 'whisper-1', - language: 'en', - }), - ) - expect(result.text).toBe('hello world') - expect(result.segments).toBeUndefined() - }) - - it('decodes a base64 audio string to a File on the request path', async () => { - mockTranscriptionsCreate.mockResolvedValue({ text: 'decoded' }) - - const adapter = new OpenAICompatibleTranscriptionAdapter( - config, - 'whisper-1', - ) - // 3 raw bytes encoded as base64 - const base64 = 'AQID' - await adapter.transcribe({ - logger: testLogger, - model: 'whisper-1', - audio: base64, - responseFormat: 'json', - }) - - const callArgs = mockTranscriptionsCreate.mock.calls[0]?.[0] - expect(callArgs?.file).toBeDefined() - expect(callArgs?.file).toBeInstanceOf(File) - }) -}) - -describe('OpenAICompatibleTTSAdapter', () => { - it('forwards model/voice/format/speed and returns base64 audio', async () => { - const fakeBuffer = new Uint8Array([1, 2, 3, 4]).buffer - mockSpeechCreate.mockResolvedValue({ - arrayBuffer: () => Promise.resolve(fakeBuffer), - }) - - const adapter = new OpenAICompatibleTTSAdapter(config, 'tts-1') - const result = await adapter.generateSpeech({ - logger: testLogger, - model: 'tts-1', - text: 'Hello', - voice: 'alloy', - format: 'mp3', - speed: 1.0, - }) - - expect(mockSpeechCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: 'tts-1', - input: 'Hello', - voice: 'alloy', - response_format: 'mp3', - speed: 1.0, - }), - ) - expect(result.audio).toBeTruthy() - expect(result.contentType).toBe('audio/mpeg') - expect(result.format).toBe('mp3') - }) - - it('rejects out-of-range speed via base validator', async () => { - const adapter = new OpenAICompatibleTTSAdapter(config, 'tts-1') - await expect( - adapter.generateSpeech({ - logger: testLogger, - model: 'tts-1', - text: 'Hello', - speed: 5.0, - }), - ).rejects.toThrow('Speed') - }) -}) - -describe('OpenAICompatibleVideoAdapter', () => { - it('createVideoJob forwards model/prompt/size/duration and returns jobId', async () => { - mockVideosCreate.mockResolvedValue({ id: 'job-123' }) - - const adapter = new OpenAICompatibleVideoAdapter(config, 'sora-2') - const result = await adapter.createVideoJob({ - logger: testLogger, - model: 'sora-2', - prompt: 'a sunset', - size: '1080x1920', - duration: 4, - }) - - expect(mockVideosCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: 'sora-2', - prompt: 'a sunset', - size: '1080x1920', - seconds: '4', - }), - ) - expect(result.jobId).toBe('job-123') - }) - - it('getVideoStatus maps SDK status strings to the AG-UI vocabulary', async () => { - mockVideosRetrieve.mockResolvedValue({ - id: 'job-123', - status: 'queued', - progress: 5, - }) - - const adapter = new OpenAICompatibleVideoAdapter(config, 'sora-2') - const status = await adapter.getVideoStatus('job-123') - - expect(status.status).toBe('pending') - expect(status.progress).toBe(5) - }) - - it('getVideoUrl returns the URL directly when retrieve() exposes one', async () => { - mockVideosRetrieve.mockResolvedValue({ - id: 'job-123', - url: 'https://cdn.example.com/job-123.mp4', - expires_at: 1700000000, - }) - - const adapter = new OpenAICompatibleVideoAdapter(config, 'sora-2') - const result = await adapter.getVideoUrl('job-123') - - expect(result.url).toBe('https://cdn.example.com/job-123.mp4') - expect(result.expiresAt).toBeInstanceOf(Date) - }) -}) diff --git a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts b/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts index abf9729f2..87a25a4d3 100644 --- a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts +++ b/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts @@ -8,16 +8,28 @@ const testLogger = resolveDebugOption(false) // Declare mockCreate at module level let mockResponsesCreate: ReturnType -// Mock the OpenAI SDK -vi.mock('openai', () => { - return { - default: class { - responses = { - create: (...args: Array) => mockResponsesCreate(...args), - } - }, +/** + * Concrete test subclass — the base is abstract, so tests need a class that + * implements `callResponse*`. Hooks delegate to `mockResponsesCreate` so + * each test can configure the SDK response without spinning up a real client. + */ +class TestResponsesAdapter extends OpenAICompatibleResponsesTextAdapter { + constructor(_config: unknown, model: string, name?: string) { + super(model, name) } -}) + protected async callResponse( + params: any, + requestOptions: any, + ): Promise { + return mockResponsesCreate(params, requestOptions) + } + protected async callResponseStream( + params: any, + requestOptions: any, + ): Promise { + return mockResponsesCreate(params, requestOptions) + } +} // Helper to create async iterable from chunks function createAsyncIterable(chunks: Array): AsyncIterable { @@ -70,7 +82,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { describe('instantiation', () => { it('creates an adapter with default name', () => { - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -82,7 +94,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }) it('creates an adapter with custom name', () => { - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', 'my-provider', @@ -93,7 +105,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }) it('creates an adapter with custom baseURL', () => { - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( { apiKey: 'test-key', baseURL: 'https://custom.api.example.com/v1', @@ -138,7 +150,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -190,7 +202,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -253,7 +265,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -324,7 +336,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -392,7 +404,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -466,7 +478,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -550,7 +562,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -637,7 +649,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -763,7 +775,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -852,7 +864,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -889,6 +901,309 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { expect(toolEnd.toolCallId).toBe('fc_internal_001') } }) + + it('does not emit TOOL_CALL_START until the item carries a name (no empty-name misroute)', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-x', + model: 'test-model', + status: 'in_progress', + }, + }, + // First added event lacks a name — should NOT emit TOOL_CALL_START + { + type: 'response.output_item.added', + output_index: 0, + item: { type: 'function_call', id: 'call_late_name' }, + }, + // Second added event for the same id finally carries the name + { + type: 'response.output_item.added', + output_index: 0, + item: { + type: 'function_call', + id: 'call_late_name', + name: 'lookup_weather', + }, + }, + { + type: 'response.function_call_arguments.done', + item_id: 'call_late_name', + arguments: '{"location":"NYC"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-x', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'call_late_name', + name: 'lookup_weather', + arguments: '{"location":"NYC"}', + }, + ], + usage: { input_tokens: 1, output_tokens: 1, total_tokens: 2 }, + }, + }, + ] + setupMockResponsesClient(streamChunks) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + const starts = chunks.filter((c) => c.type === 'TOOL_CALL_START') + expect(starts.length).toBe(1) + if (starts[0]?.type === 'TOOL_CALL_START') { + expect(starts[0].toolName).toBe('lookup_weather') + expect(starts[0].toolCallName).toBe('lookup_weather') + } + const end = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(end).toBeDefined() + if (end?.type === 'TOOL_CALL_END') { + expect(end.toolName).toBe('lookup_weather') + } + }) + + it('backfills tool call via output_item.done when name was missing from output_item.added', async () => { + // Some upstreams send `output_item.added` with no `name`, then carry + // the full function_call item (including name and arguments) in + // `output_item.done`. Without a backfill, the tool call would be + // silently dropped from the AG-UI stream. + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-bf', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { type: 'function_call', id: 'fc_bf' }, + }, + // Orphan args deltas + done arrive before name is known. + { + type: 'response.function_call_arguments.delta', + item_id: 'fc_bf', + delta: '{"location":', + }, + { + type: 'response.function_call_arguments.done', + item_id: 'fc_bf', + arguments: '{"location":"NYC"}', + }, + // output_item.done finally carries the name + full arguments. + { + type: 'response.output_item.done', + output_index: 0, + item: { + type: 'function_call', + id: 'fc_bf', + name: 'lookup_weather', + arguments: '{"location":"NYC"}', + }, + }, + { + type: 'response.completed', + response: { + id: 'resp-bf', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'fc_bf', + name: 'lookup_weather', + arguments: '{"location":"NYC"}', + }, + ], + usage: { input_tokens: 1, output_tokens: 1, total_tokens: 2 }, + }, + }, + ] + setupMockResponsesClient(streamChunks) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + const starts = chunks.filter((c) => c.type === 'TOOL_CALL_START') + const ends = chunks.filter((c) => c.type === 'TOOL_CALL_END') + expect(starts).toHaveLength(1) + expect(ends).toHaveLength(1) + if (starts[0]?.type === 'TOOL_CALL_START') { + expect(starts[0].toolName).toBe('lookup_weather') + } + if (ends[0]?.type === 'TOOL_CALL_END') { + expect(ends[0].toolName).toBe('lookup_weather') + expect(ends[0].input).toEqual({ location: 'NYC' }) + } + }) + + it('backfills tool call from response.completed.output[] when name never arrived mid-stream', async () => { + // Defense-in-depth backstop: if neither output_item.added nor + // output_item.done carried the name (very off-spec upstream), the + // function_call item in response.completed.output[] still has it. + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-final', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { type: 'function_call', id: 'fc_final' }, + }, + { + type: 'response.function_call_arguments.done', + item_id: 'fc_final', + arguments: '{"location":"Berlin"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-final', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'fc_final', + name: 'lookup_weather', + arguments: '{"location":"Berlin"}', + }, + ], + usage: { input_tokens: 1, output_tokens: 1, total_tokens: 2 }, + }, + }, + ] + setupMockResponsesClient(streamChunks) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + const starts = chunks.filter((c) => c.type === 'TOOL_CALL_START') + const ends = chunks.filter((c) => c.type === 'TOOL_CALL_END') + expect(starts).toHaveLength(1) + expect(ends).toHaveLength(1) + if (ends[0]?.type === 'TOOL_CALL_END') { + expect(ends[0].toolName).toBe('lookup_weather') + expect(ends[0].input).toEqual({ location: 'Berlin' }) + } + }) + + it('does not emit duplicate TOOL_CALL_END when output_item.done precedes args.done', async () => { + // Reverse ordering: output_item.done arrives with full arguments and + // emits START + END (sets ended=true), then a late args.done arrives + // for the same id. Without the metadata.ended guard, args.done would + // emit a second TOOL_CALL_END. + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-rev', + model: 'test-model', + status: 'in_progress', + }, + }, + { + type: 'response.output_item.added', + output_index: 0, + item: { + type: 'function_call', + id: 'fc_rev', + name: 'lookup_weather', + }, + }, + { + type: 'response.output_item.done', + output_index: 0, + item: { + type: 'function_call', + id: 'fc_rev', + name: 'lookup_weather', + arguments: '{"location":"Tokyo"}', + }, + }, + { + type: 'response.function_call_arguments.done', + item_id: 'fc_rev', + arguments: '{"location":"Tokyo"}', + }, + { + type: 'response.completed', + response: { + id: 'resp-rev', + model: 'test-model', + status: 'completed', + output: [ + { + type: 'function_call', + id: 'fc_rev', + name: 'lookup_weather', + arguments: '{"location":"Tokyo"}', + }, + ], + usage: { input_tokens: 1, output_tokens: 1, total_tokens: 2 }, + }, + }, + ] + setupMockResponsesClient(streamChunks) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + const starts = chunks.filter((c) => c.type === 'TOOL_CALL_START') + const ends = chunks.filter((c) => c.type === 'TOOL_CALL_END') + expect(starts).toHaveLength(1) + expect(ends).toHaveLength(1) + }) }) describe('content_part events', () => { @@ -926,7 +1241,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -992,7 +1307,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1012,6 +1327,54 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ) expect(contentChunks.length).toBe(2) }) + + it('emits TEXT_MESSAGE_START before content when only content_part.done fires (no preceding deltas or added)', async () => { + const streamChunks = [ + { + type: 'response.created', + response: { + id: 'resp-d', + model: 'test-model', + status: 'in_progress', + }, + }, + // No deltas, no content_part.added — only the done event. + { + type: 'response.content_part.done', + part: { type: 'output_text', text: 'whole message at once' }, + }, + { + type: 'response.completed', + response: { + id: 'resp-d', + model: 'test-model', + status: 'completed', + output: [], + usage: { input_tokens: 5, output_tokens: 3, total_tokens: 8 }, + }, + }, + ] + setupMockResponsesClient(streamChunks) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'hi' }], + })) { + chunks.push(chunk) + } + const types = chunks.map((c) => c.type) + const startIdx = types.indexOf('TEXT_MESSAGE_START') + const contentIdx = types.indexOf('TEXT_MESSAGE_CONTENT') + const endIdx = types.indexOf('TEXT_MESSAGE_END') + expect(startIdx).toBeGreaterThanOrEqual(0) + expect(contentIdx).toBeGreaterThan(startIdx) + expect(endIdx).toBeGreaterThan(contentIdx) + }) }) describe('error handling', () => { @@ -1048,7 +1411,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { mockResponsesCreate = vi.fn().mockResolvedValue(errorIterable) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1075,7 +1438,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { .fn() .mockRejectedValue(new Error('API key invalid')) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1115,7 +1478,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1152,7 +1515,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1193,7 +1556,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1236,7 +1599,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1293,7 +1656,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1336,7 +1699,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1358,6 +1721,99 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }), ).rejects.toThrow('Failed to parse structured output as JSON') }) + + it('fails loud when response has only message items with empty text', async () => { + const nonStreamResponse = { + output: [ + { + type: 'message', + content: [{ type: 'output_text', text: '' }], + }, + ], + } + setupMockResponsesClient([], nonStreamResponse) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + await expect( + adapter.structuredOutput({ + chatOptions: { + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + }, + outputSchema: { + type: 'object', + properties: { name: { type: 'string' } }, + required: ['name'], + }, + }), + ).rejects.toThrow('response contained no content') + }) + + it('fails loud when response.output has only non-message items (function_call, reasoning)', async () => { + const nonStreamResponse = { + output: [ + { + type: 'function_call', + id: 'fc_1', + call_id: 'call_1', + name: 'do_thing', + arguments: '{}', + }, + ], + } + setupMockResponsesClient([], nonStreamResponse) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + await expect( + adapter.structuredOutput({ + chatOptions: { + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + }, + outputSchema: { + type: 'object', + properties: { name: { type: 'string' } }, + required: ['name'], + }, + }), + ).rejects.toThrow(/function_call/) + }) + + it('throws on unknown message content_part type rather than misreporting as refusal', async () => { + const nonStreamResponse = { + output: [ + { + type: 'message', + content: [{ type: 'output_audio', audio: 'base64data' }], + }, + ], + } + setupMockResponsesClient([], nonStreamResponse) + const adapter = new TestResponsesAdapter( + testConfig, + 'test-model', + ) + await expect( + adapter.structuredOutput({ + chatOptions: { + logger: testLogger, + model: 'test-model', + messages: [{ role: 'user', content: 'q' }], + }, + outputSchema: { + type: 'object', + properties: { name: { type: 'string' } }, + required: ['name'], + }, + }), + ).rejects.toThrow(/unsupported message content part type "output_audio"/) + }) }) describe('request mapping', () => { @@ -1388,7 +1844,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1459,7 +1915,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1510,7 +1966,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new OpenAICompatibleResponsesTextAdapter( + const adapter = new TestResponsesAdapter( testConfig, 'test-model', ) @@ -1572,12 +2028,14 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { describe('subclassing', () => { it('allows subclassing with custom name', () => { class MyProviderAdapter extends OpenAICompatibleResponsesTextAdapter { - constructor(apiKey: string, model: string) { - super( - { apiKey, baseURL: 'https://my-provider.com/v1' }, - model, - 'my-provider', - ) + constructor(_apiKey: string, model: string) { + super(model, 'my-provider') + } + protected async callResponse(): Promise { + throw new Error('not called in this test') + } + protected async callResponseStream(): Promise { + throw new Error('not called in this test') } } diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index 293637b91..2a5159e50 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -1,10 +1,19 @@ -import { OpenAICompatibleImageAdapter } from '@tanstack/ai-openai-compatible' +import OpenAI from 'openai' +import { BaseImageAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { generateId } from '@tanstack/ai-utils' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateImageSize, validateNumberOfImages, validatePrompt, } from '../image/image-provider-options' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' import type { OpenAIImageModel } from '../model-meta' import type { OpenAIImageModelProviderOptionsByName, @@ -23,15 +32,10 @@ export interface OpenAIImageConfig extends OpenAIClientConfig {} * * Tree-shakeable adapter for OpenAI image generation functionality. * Supports gpt-image-1, gpt-image-1-mini, dall-e-3, and dall-e-2 models. - * - * Features: - * - Model-specific type-safe provider options - * - Size validation per model - * - Number of images validation */ export class OpenAIImageAdapter< TModel extends OpenAIImageModel, -> extends OpenAICompatibleImageAdapter< +> extends BaseImageAdapter< TModel, OpenAIImageProviderOptions, OpenAIImageModelProviderOptionsByName, @@ -40,51 +44,77 @@ export class OpenAIImageAdapter< readonly kind = 'image' as const readonly name = 'openai' as const + protected client: OpenAI + constructor(config: OpenAIImageConfig, model: TModel) { - super(config, model, 'openai') + super(model, {}) + this.client = new OpenAI(config) } - protected override validatePrompt(options: { - prompt: string - model: string - }): void { - validatePrompt(options) - } + async generateImages( + options: ImageGenerationOptions, + ): Promise { + const { model, prompt, numberOfImages, size, modelOptions } = options - protected override validateImageSize( - model: string, - size: string | undefined, - ): void { + validatePrompt({ prompt, model }) validateImageSize(model, size) - } - - protected override validateNumberOfImages( - model: string, - numberOfImages: number | undefined, - ): void { validateNumberOfImages(model, numberOfImages) + + const request: OpenAI_SDK.Images.ImageGenerateParams = { + model, + prompt, + n: numberOfImages ?? 1, + size: size as OpenAI_SDK.Images.ImageGenerateParams['size'], + ...modelOptions, + } + + try { + options.logger.request( + `activity=image provider=${this.name} model=${model} n=${request.n ?? 1} size=${request.size ?? 'default'}`, + { provider: this.name, model }, + ) + const response = await this.client.images.generate({ + ...request, + stream: false, + }) + + const images: Array = (response.data ?? []).flatMap( + (item): Array => { + const revisedPrompt = item.revised_prompt + if (item.b64_json) { + return [{ b64Json: item.b64_json, revisedPrompt }] + } + if (item.url) { + return [{ url: item.url, revisedPrompt }] + } + return [] + }, + ) + + return { + id: generateId(this.name), + model, + images, + usage: response.usage + ? { + inputTokens: response.usage.input_tokens, + outputTokens: response.usage.output_tokens, + totalTokens: response.usage.total_tokens, + } + : undefined, + } + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + options.logger.errors(`${this.name}.generateImages fatal`, { + error: toRunErrorPayload(error, `${this.name}.generateImages failed`), + source: `${this.name}.generateImages`, + }) + throw error + } } } -/** - * Creates an OpenAI image adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'dall-e-3', 'gpt-image-1') - * @param apiKey - Your OpenAI API key - * @param config - Optional additional configuration - * @returns Configured OpenAI image adapter instance with resolved types - * - * @example - * ```typescript - * const adapter = createOpenaiImage('dall-e-3', "sk-..."); - * - * const result = await generateImage({ - * adapter, - * prompt: 'A cute baby sea otter' - * }); - * ``` - */ export function createOpenaiImage( model: TModel, apiKey: string, @@ -93,30 +123,6 @@ export function createOpenaiImage( return new OpenAIImageAdapter({ apiKey, ...config }, model) } -/** - * Creates an OpenAI image adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'dall-e-3', 'gpt-image-1') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenAI image adapter instance with resolved types - * @throws Error if OPENAI_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses OPENAI_API_KEY from environment - * const adapter = openaiImage('dall-e-3'); - * - * const result = await generateImage({ - * adapter, - * prompt: 'A beautiful sunset over mountains' - * }); - * ``` - */ export function openaiImage( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index ad1f6c7a3..17b652b7c 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,17 +1,11 @@ -import { OpenAICompatibleSummarizeAdapter } from '@tanstack/ai-openai-compatible' +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' import type { OpenAIChatModel } from '../model-meta' import type { OpenAIClientConfig } from '../utils/client' -/** - * Configuration for OpenAI summarize adapter - */ export interface OpenAISummarizeConfig extends OpenAIClientConfig {} -/** - * OpenAI-specific provider options for summarization - */ export interface OpenAISummarizeProviderOptions { /** Temperature for response generation (0-2) */ temperature?: number @@ -19,34 +13,8 @@ export interface OpenAISummarizeProviderOptions { maxTokens?: number } -/** - * OpenAI Summarize Adapter - * - * A thin wrapper around the text adapter that adds summarization-specific prompting. - * Delegates all API calls to the OpenAITextAdapter. - */ -export class OpenAISummarizeAdapter< - TModel extends OpenAIChatModel, -> extends OpenAICompatibleSummarizeAdapter< - TModel, - OpenAISummarizeProviderOptions -> { - readonly kind = 'summarize' as const - readonly name = 'openai' as const - - constructor(config: OpenAISummarizeConfig, model: TModel) { - super(new OpenAITextAdapter(config, model), model, 'openai') - } -} - /** * Creates an OpenAI summarize adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'gpt-4o-mini', 'gpt-4o') - * @param apiKey - Your OpenAI API key - * @param config - Optional additional configuration - * @returns Configured OpenAI summarize adapter instance with resolved types * * @example * ```typescript @@ -57,38 +25,26 @@ export function createOpenaiSummarize( model: TModel, apiKey: string, config?: Omit, -): OpenAISummarizeAdapter { - return new OpenAISummarizeAdapter({ apiKey, ...config }, model) +): ChatStreamSummarizeAdapter { + return new ChatStreamSummarizeAdapter( + new OpenAITextAdapter({ apiKey, ...config }, model), + model, + 'openai', + ) } /** - * Creates an OpenAI summarize adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'gpt-4o-mini', 'gpt-4o') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenAI summarize adapter instance with resolved types - * @throws Error if OPENAI_API_KEY is not found in environment + * Creates an OpenAI summarize adapter with API key from `OPENAI_API_KEY`. * * @example * ```typescript - * // Automatically uses OPENAI_API_KEY from environment * const adapter = openaiSummarize('gpt-4o-mini'); - * - * await summarize({ - * adapter, - * text: "Long article text..." - * }); + * await summarize({ adapter, text: "Long article text..." }); * ``` */ export function openaiSummarize( model: TModel, config?: Omit, -): OpenAISummarizeAdapter { - const apiKey = getOpenAIApiKeyFromEnv() - return createOpenaiSummarize(model, apiKey, config) +): ChatStreamSummarizeAdapter { + return createOpenaiSummarize(model, getOpenAIApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 9646803d2..5854452c1 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,3 +1,4 @@ +import OpenAI from 'openai' import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/ai-openai-compatible' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' @@ -88,8 +89,25 @@ export class OpenAITextAdapter< readonly kind = 'text' as const readonly name = 'openai' as const + protected client: OpenAI + constructor(config: OpenAITextConfig, model: TModel) { - super(config, model, 'openai') + super(model, 'openai') + this.client = new OpenAI(config) + } + + protected async callResponse( + params: OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise { + return this.client.responses.create(params, requestOptions) + } + + protected async callResponseStream( + params: OpenAI_SDK.Responses.ResponseCreateParamsStreaming, + requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, + ): Promise> { + return this.client.responses.create(params, requestOptions) } /** diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index ba54c0e08..556aa0f8a 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -1,5 +1,14 @@ -import { OpenAICompatibleTranscriptionAdapter } from '@tanstack/ai-openai-compatible' +import OpenAI from 'openai' +import { BaseTranscriptionAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { base64ToArrayBuffer, generateId } from '@tanstack/ai-utils' import { getOpenAIApiKeyFromEnv } from '../utils/client' +import type { + TranscriptionOptions, + TranscriptionResult, + TranscriptionSegment, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' import type { OpenAITranscriptionModel } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' import type { OpenAIClientConfig } from '../utils/client' @@ -10,58 +19,152 @@ import type { OpenAIClientConfig } from '../utils/client' export interface OpenAITranscriptionConfig extends OpenAIClientConfig {} /** - * OpenAI Transcription (Speech-to-Text) Adapter - * - * Tree-shakeable adapter for OpenAI audio transcription functionality. - * Supports whisper-1, gpt-4o-transcribe, gpt-4o-mini-transcribe, and gpt-4o-transcribe-diarize models. - * - * Features: - * - Multiple transcription models with different capabilities - * - Language detection or specification - * - Multiple output formats: json, text, srt, verbose_json, vtt - * - Word and segment-level timestamps (with verbose_json) - * - Speaker diarization (with gpt-4o-transcribe-diarize) + * OpenAI Transcription (Speech-to-Text) Adapter. + * Supports whisper-1 and gpt-4o-transcribe* models. Verbose JSON output + * (timestamps + segments) only available on whisper-1. */ export class OpenAITranscriptionAdapter< TModel extends OpenAITranscriptionModel, -> extends OpenAICompatibleTranscriptionAdapter< - TModel, - OpenAITranscriptionProviderOptions -> { +> extends BaseTranscriptionAdapter { readonly name = 'openai' as const + protected client: OpenAI + constructor(config: OpenAITranscriptionConfig, model: TModel) { - super(config, model, 'openai') + super(model, {}) + this.client = new OpenAI(config) + } + + async transcribe( + options: TranscriptionOptions, + ): Promise { + const { model, audio, language, prompt, responseFormat, modelOptions } = + options + + const file = this.prepareAudioFile(audio) + + const request: OpenAI_SDK.Audio.TranscriptionCreateParams = { + model, + file, + language, + prompt, + response_format: this.mapResponseFormat(responseFormat), + ...modelOptions, + } + + // Only Whisper supports verbose_json. The gpt-4o-* transcribe models + // accept only json/text and reject verbose_json with HTTP 400. + const useVerbose = + responseFormat === 'verbose_json' || + (!responseFormat && model === 'whisper-1') + + try { + options.logger.request( + `activity=transcription provider=${this.name} model=${model} verbose=${useVerbose}`, + { provider: this.name, model }, + ) + if (useVerbose) { + const response = (await this.client.audio.transcriptions.create({ + ...request, + response_format: 'verbose_json', + })) as OpenAI_SDK.Audio.Transcriptions.TranscriptionVerbose + + return { + id: generateId(this.name), + model, + text: response.text, + language: response.language, + duration: response.duration, + segments: response.segments?.map( + (seg): TranscriptionSegment => ({ + id: seg.id, + start: seg.start, + end: seg.end, + text: seg.text, + // The OpenAI SDK types `avg_logprob` as `number`, so call Math.exp + // directly. Guarding with `seg.avg_logprob ?` would treat `0` + // (perfect confidence) as missing. + confidence: Math.exp(seg.avg_logprob), + }), + ), + words: response.words?.map((w) => ({ + word: w.word, + start: w.start, + end: w.end, + })), + } + } else { + const response = await this.client.audio.transcriptions.create(request) + + return { + id: generateId(this.name), + model, + text: typeof response === 'string' ? response : response.text, + language, + } + } + } catch (error: unknown) { + options.logger.errors(`${this.name}.transcribe fatal`, { + error: toRunErrorPayload(error, `${this.name}.transcribe failed`), + source: `${this.name}.transcribe`, + }) + throw error + } + } + + protected prepareAudioFile(audio: string | File | Blob | ArrayBuffer): File { + if (typeof File !== 'undefined' && audio instanceof File) { + return audio + } + if (typeof Blob !== 'undefined' && audio instanceof Blob) { + this.ensureFileSupport() + return new File([audio], 'audio.mp3', { + type: audio.type || 'audio/mpeg', + }) + } + if (typeof ArrayBuffer !== 'undefined' && audio instanceof ArrayBuffer) { + this.ensureFileSupport() + return new File([audio], 'audio.mp3', { type: 'audio/mpeg' }) + } + if (typeof audio === 'string') { + this.ensureFileSupport() + + if (audio.startsWith('data:')) { + const parts = audio.split(',') + const header = parts[0] + const base64Data = parts[1] || '' + const mimeMatch = header?.match(/data:([^;]+)/) + const mimeType = mimeMatch?.[1] || 'audio/mpeg' + const bytes = base64ToArrayBuffer(base64Data) + const extension = mimeType.split('/')[1] || 'mp3' + return new File([bytes], `audio.${extension}`, { type: mimeType }) + } + + const bytes = base64ToArrayBuffer(audio) + return new File([bytes], 'audio.mp3', { type: 'audio/mpeg' }) + } + + throw new Error('Invalid audio input type') + } + + // Throws on Node < 20 where the global `File` constructor isn't available. + private ensureFileSupport(): void { + if (typeof File === 'undefined') { + throw new Error( + '`File` is not available in this environment. ' + + 'Use Node.js 20 or newer, or pass a File object directly.', + ) + } } - protected override shouldDefaultToVerbose(model: string): boolean { - // Only Whisper supports `verbose_json`. The gpt-4o-* transcribe models - // accept only `json` and `text` and reject `verbose_json` with HTTP 400, - // so they must NOT default to verbose. The previous logic was inverted. - return model === 'whisper-1' + protected mapResponseFormat( + format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt', + ): OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] { + if (!format) return 'json' + return format as OpenAI_SDK.Audio.TranscriptionCreateParams['response_format'] } } -/** - * Creates an OpenAI transcription adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'whisper-1') - * @param apiKey - Your OpenAI API key - * @param config - Optional additional configuration - * @returns Configured OpenAI transcription adapter instance with resolved types - * - * @example - * ```typescript - * const adapter = createOpenaiTranscription('whisper-1', "sk-..."); - * - * const result = await generateTranscription({ - * adapter, - * audio: audioFile, - * language: 'en' - * }); - * ``` - */ export function createOpenaiTranscription< TModel extends OpenAITranscriptionModel, >( @@ -72,32 +175,6 @@ export function createOpenaiTranscription< return new OpenAITranscriptionAdapter({ apiKey, ...config }, model) } -/** - * Creates an OpenAI transcription adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'whisper-1') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenAI transcription adapter instance with resolved types - * @throws Error if OPENAI_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses OPENAI_API_KEY from environment - * const adapter = openaiTranscription('whisper-1'); - * - * const result = await generateTranscription({ - * adapter, - * audio: audioFile - * }); - * - * console.log(result.text) - * ``` - */ export function openaiTranscription( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index dfb77bddc..d453f4d14 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -1,10 +1,15 @@ -import { OpenAICompatibleTTSAdapter } from '@tanstack/ai-openai-compatible' +import OpenAI from 'openai' +import { BaseTTSAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { arrayBufferToBase64, generateId } from '@tanstack/ai-utils' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { validateAudioInput, validateInstructions, validateSpeed, } from '../audio/audio-provider-options' +import type { TTSOptions, TTSResult } from '@tanstack/ai' +import type OpenAI_SDK from 'openai' import type { OpenAITTSModel } from '../model-meta' import type { OpenAITTSProviderOptions } from '../audio/tts-provider-options' import type { OpenAIClientConfig } from '../utils/client' @@ -17,38 +22,32 @@ export interface OpenAITTSConfig extends OpenAIClientConfig {} /** * OpenAI Text-to-Speech Adapter * - * Tree-shakeable adapter for OpenAI TTS functionality. * Supports tts-1, tts-1-hd, and gpt-4o-audio-preview models. - * - * Features: - * - Multiple voice options: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, verse - * - Multiple output formats: mp3, opus, aac, flac, wav, pcm - * - Speed control (0.25 to 4.0) + * Voices: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, verse. + * Formats: mp3, opus, aac, flac, wav, pcm. Speed 0.25 to 4.0. */ -export class OpenAITTSAdapter< - TModel extends OpenAITTSModel, -> extends OpenAICompatibleTTSAdapter { +export class OpenAITTSAdapter extends BaseTTSAdapter< + TModel, + OpenAITTSProviderOptions +> { readonly name = 'openai' as const + protected client: OpenAI + constructor(config: OpenAITTSConfig, model: TModel) { - super(config, model, 'openai') + super(model, {}) + this.client = new OpenAI(config) } - protected override validateAudioInput(text: string): void { - // Delegate to OpenAI-specific validation that also validates model/voice/format - validateAudioInput({ input: text, model: this.model, voice: 'alloy' }) - } + async generateSpeech( + options: TTSOptions, + ): Promise { + const { model, text, voice, format, speed, modelOptions } = options - protected override validateSpeed(speed?: number): void { + validateAudioInput({ input: text, model: this.model, voice: 'alloy' }) if (speed !== undefined) { validateSpeed({ speed, model: this.model, input: '', voice: 'alloy' }) } - } - - protected override validateInstructions( - model: string, - modelOptions?: OpenAITTSProviderOptions, - ): void { if (modelOptions) { validateInstructions({ ...modelOptions, @@ -57,29 +56,58 @@ export class OpenAITTSAdapter< voice: 'alloy', }) } + + const request: OpenAI_SDK.Audio.SpeechCreateParams = { + model, + input: text, + voice: (voice || 'alloy') as OpenAI_SDK.Audio.SpeechCreateParams['voice'], + response_format: format, + speed, + ...modelOptions, + } + + try { + options.logger.request( + `activity=tts provider=${this.name} model=${model} format=${request.response_format ?? 'default'} voice=${request.voice}`, + { provider: this.name, model }, + ) + const response = await this.client.audio.speech.create(request) + + // Convert response to base64. Buffer is Node-only; use atob fallback in + // browser/edge runtimes where the SDK can run. + const arrayBuffer = await response.arrayBuffer() + const base64 = arrayBufferToBase64(arrayBuffer) + + const outputFormat = (request.response_format as string) || 'mp3' + const contentTypes: Record = { + mp3: 'audio/mpeg', + opus: 'audio/opus', + aac: 'audio/aac', + flac: 'audio/flac', + wav: 'audio/wav', + pcm: 'audio/pcm', + } + const contentType = contentTypes[outputFormat] || 'audio/mpeg' + + return { + id: generateId(this.name), + model, + audio: base64, + format: outputFormat, + contentType, + } + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + options.logger.errors(`${this.name}.generateSpeech fatal`, { + error: toRunErrorPayload(error, `${this.name}.generateSpeech failed`), + source: `${this.name}.generateSpeech`, + }) + throw error + } } } -/** - * Creates an OpenAI speech adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'tts-1', 'tts-1-hd') - * @param apiKey - Your OpenAI API key - * @param config - Optional additional configuration - * @returns Configured OpenAI speech adapter instance with resolved types - * - * @example - * ```typescript - * const adapter = createOpenaiSpeech('tts-1-hd', "sk-..."); - * - * const result = await generateSpeech({ - * adapter, - * text: 'Hello, world!', - * voice: 'nova' - * }); - * ``` - */ export function createOpenaiSpeech( model: TModel, apiKey: string, @@ -88,32 +116,6 @@ export function createOpenaiSpeech( return new OpenAITTSAdapter({ apiKey, ...config }, model) } -/** - * Creates an OpenAI speech adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'tts-1', 'tts-1-hd') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenAI speech adapter instance with resolved types - * @throws Error if OPENAI_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses OPENAI_API_KEY from environment - * const adapter = openaiSpeech('tts-1'); - * - * const result = await generateSpeech({ - * adapter, - * text: 'Welcome to TanStack AI!', - * voice: 'alloy', - * format: 'mp3' - * }); - * ``` - */ export function openaiSpeech( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index 3bc193e41..e219d796c 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -1,4 +1,7 @@ -import { OpenAICompatibleVideoAdapter } from '@tanstack/ai-openai-compatible' +import OpenAI from 'openai' +import { BaseVideoAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { arrayBufferToBase64 } from '@tanstack/ai-utils' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { toApiSeconds, @@ -6,16 +9,38 @@ import { validateVideoSize, } from '../video/video-provider-options' import type { VideoModel } from 'openai/resources' +import type { + VideoGenerationOptions, + VideoJobResult, + VideoStatusResult, + VideoUrlResult, +} from '@tanstack/ai' +import type OpenAI_SDK from 'openai' import type { OpenAIVideoModel } from '../model-meta' import type { OpenAIVideoModelProviderOptionsByName, OpenAIVideoModelSizeByName, OpenAIVideoProviderOptions, } from '../video/video-provider-options' -import type { VideoGenerationOptions } from '@tanstack/ai' -import type OpenAI_SDK from 'openai' import type { OpenAIClientConfig } from '../utils/client' +/** + * Threshold for emitting a "this download will probably OOM serverless + * runtimes" warning. Anything larger than this (in bytes) gets surfaced via + * console.warn — workers and small isolates routinely run out of memory once + * a downloaded video is base64-encoded. + */ +const LARGE_MEDIA_BUFFER_BYTES = 10 * 1024 * 1024 + +function warnIfLargeMediaBuffer(byteLength: number, source: string): void { + if (byteLength <= LARGE_MEDIA_BUFFER_BYTES) return + console.warn( + `[openai.${source}] downloaded ${(byteLength / 1024 / 1024).toFixed(1)} MiB into memory before base64 encoding. ` + + `Workers/serverless runtimes commonly run out of memory above ~10 MiB. ` + + `Consider streaming the video through a CDN or your own storage layer instead.`, + ) +} + /** * Configuration for OpenAI video adapter. * @@ -24,22 +49,13 @@ import type { OpenAIClientConfig } from '../utils/client' export interface OpenAIVideoConfig extends OpenAIClientConfig {} /** - * OpenAI Video Generation Adapter - * - * Tree-shakeable adapter for OpenAI video generation functionality using Sora-2. - * Uses a jobs/polling architecture for async video generation. + * OpenAI Video Generation Adapter (Sora-2). Job/polling architecture. * * @experimental Video generation is an experimental feature and may change. - * - * Features: - * - Async job-based video generation - * - Status polling for job progress - * - URL retrieval for completed videos - * - Model-specific type-safe provider options */ export class OpenAIVideoAdapter< TModel extends OpenAIVideoModel, -> extends OpenAICompatibleVideoAdapter< +> extends BaseVideoAdapter< TModel, OpenAIVideoProviderOptions, OpenAIVideoModelProviderOptionsByName, @@ -47,71 +63,243 @@ export class OpenAIVideoAdapter< > { readonly name = 'openai' as const + protected client: OpenAI + protected clientConfig: OpenAIVideoConfig + constructor(config: OpenAIVideoConfig, model: TModel) { - super(config, model, 'openai') + super(config, model) + this.clientConfig = config + this.client = new OpenAI(config) } - protected override validateVideoSize(model: string, size?: string): void { - validateVideoSize(model, size) - } + async createVideoJob( + options: VideoGenerationOptions, + ): Promise { + const { model, size, duration, modelOptions } = options - protected override validateVideoSeconds( - model: string, - seconds?: number | string, - ): void { + validateVideoSize(model, size) + const seconds = duration ?? modelOptions?.seconds validateVideoSeconds(model, seconds) - } - - protected override buildRequest( - options: VideoGenerationOptions, - ): OpenAI_SDK.Videos.VideoCreateParams { - const { model, prompt, size, duration, modelOptions } = options const request: OpenAI_SDK.Videos.VideoCreateParams = { model: model as VideoModel, - prompt, + prompt: options.prompt, } - - // Add size/resolution - // Supported: '1280x720', '720x1280', '1792x1024', '1024x1792' if (size) { request.size = size as OpenAI_SDK.Videos.VideoCreateParams['size'] } else if (modelOptions?.size) { request.size = modelOptions.size } - - // Add seconds (duration) - // Supported: '4', '8', or '12' - yes, the API wants strings - const seconds = duration ?? modelOptions?.seconds if (seconds !== undefined) { request.seconds = toApiSeconds(seconds) } - return request + try { + options.logger.request( + `activity=video.create provider=${this.name} model=${model} size=${request.size ?? 'default'} seconds=${request.seconds ?? 'default'}`, + { provider: this.name, model }, + ) + const videosClient = this.getVideosClient() + const response = await videosClient.create(request) + return { jobId: response.id, model } + } catch (error: any) { + options.logger.errors(`${this.name}.createVideoJob fatal`, { + error: toRunErrorPayload(error, `${this.name}.createVideoJob failed`), + source: `${this.name}.createVideoJob`, + }) + if (error?.message?.includes('videos') || error?.code === 'invalid_api') { + throw new Error( + `Video generation API is not available. The API may require special access. ` + + `Original error: ${error.message}`, + ) + } + throw error + } + } + + /** + * The video API on the OpenAI SDK is still experimental and shipped on some + * SDK versions but not others; access through `videosClient` lets us treat + * the path uniformly even when the SDK lacks first-class typings here. + */ + private getVideosClient(): { + create: (req: Record) => Promise<{ id: string }> + retrieve: (id: string) => Promise<{ + id: string + status: string + progress?: number + url?: string + expires_at?: number + error?: { message?: string } + }> + downloadContent?: (id: string) => Promise + content?: (id: string) => Promise + getContent?: (id: string) => Promise + download?: (id: string) => Promise + } { + return (this.client as unknown as { videos: any }).videos + } + + async getVideoStatus(jobId: string): Promise { + try { + const videosClient = this.getVideosClient() + const response = await videosClient.retrieve(jobId) + return { + jobId, + status: this.mapStatus(response.status), + progress: response.progress, + error: response.error?.message, + } + } catch (error: any) { + if (error.status === 404) { + return { jobId, status: 'failed', error: 'Job not found' } + } + throw error + } + } + + async getVideoUrl(jobId: string): Promise { + try { + const videosClient = this.getVideosClient() + + // Prefer retrieve() because many openai-compatible backends (and the + // aimock test harness) return the URL directly on the video resource + // and do not implement a separate /content endpoint. + const videoInfo = await videosClient.retrieve(jobId) + if (videoInfo.url) { + return { + jobId, + url: videoInfo.url, + expiresAt: videoInfo.expires_at + ? new Date(videoInfo.expires_at) + : undefined, + } + } + + // SDK download fall-through: try the various possible method names. + if (typeof videosClient.downloadContent === 'function') { + const contentResponse = await videosClient.downloadContent(jobId) + const videoBlob = await contentResponse.blob() + const buffer = await videoBlob.arrayBuffer() + warnIfLargeMediaBuffer(buffer.byteLength, 'video.downloadContent') + const base64 = arrayBufferToBase64(buffer) + const mimeType = + contentResponse.headers.get('content-type') || 'video/mp4' + return { + jobId, + url: `data:${mimeType};base64,${base64}`, + expiresAt: undefined, + } + } + + let response: any + if (typeof videosClient.content === 'function') { + response = await videosClient.content(jobId) + } else if (typeof videosClient.getContent === 'function') { + response = await videosClient.getContent(jobId) + } else if (typeof videosClient.download === 'function') { + response = await videosClient.download(jobId) + } else { + // Last resort: raw fetch with auth header. + const baseUrl = this.clientConfig.baseURL || 'https://api.openai.com/v1' + const apiKey = this.clientConfig.apiKey + + const contentResponse = await fetch( + `${baseUrl}/videos/${jobId}/content`, + { method: 'GET', headers: { Authorization: `Bearer ${apiKey}` } }, + ) + + if (!contentResponse.ok) { + const contentType = contentResponse.headers.get('content-type') + if (contentType?.includes('application/json')) { + const errorData = await contentResponse.json().catch(() => ({})) + throw new Error( + errorData.error?.message || + `Failed to get video content: ${contentResponse.status}`, + ) + } + throw new Error( + `Failed to get video content: ${contentResponse.status}`, + ) + } + + const videoBlob = await contentResponse.blob() + const buffer = await videoBlob.arrayBuffer() + warnIfLargeMediaBuffer(buffer.byteLength, 'video.fetch') + const base64 = arrayBufferToBase64(buffer) + const mimeType = + contentResponse.headers.get('content-type') || 'video/mp4' + return { + jobId, + url: `data:${mimeType};base64,${base64}`, + expiresAt: undefined, + } + } + + // The fall-through SDK methods produce a Blob-ish or fetch-`Response`-ish + // object. Read as bytes + wrap in a data URL so callers see a playable + // URL instead of an endpoint URL. + const fallthroughBlob = + typeof response?.blob === 'function' + ? await response.blob() + : response instanceof Blob + ? response + : null + if (!fallthroughBlob) { + throw new Error( + `Video content download via SDK fall-through returned an unexpected shape (no blob()).`, + ) + } + const fallthroughBuffer = await fallthroughBlob.arrayBuffer() + warnIfLargeMediaBuffer(fallthroughBuffer.byteLength, 'video.sdkFallthrough') + const fallthroughBase64 = arrayBufferToBase64(fallthroughBuffer) + const fallthroughMime = + (typeof response?.headers?.get === 'function' + ? response.headers.get('content-type') + : undefined) || + fallthroughBlob.type || + 'video/mp4' + return { + jobId, + url: `data:${fallthroughMime};base64,${fallthroughBase64}`, + expiresAt: undefined, + } + } catch (error: any) { + if (error.status === 404) { + throw new Error(`Video job not found: ${jobId}`) + } + if (error.status === 400) { + throw new Error( + `Video is not ready for download. Check status first. Job ID: ${jobId}`, + ) + } + throw error + } + } + + protected mapStatus( + apiStatus: string, + ): 'pending' | 'processing' | 'completed' | 'failed' { + switch (apiStatus) { + case 'queued': + case 'pending': + return 'pending' + case 'processing': + case 'in_progress': + return 'processing' + case 'completed': + case 'succeeded': + return 'completed' + case 'failed': + case 'error': + case 'cancelled': + return 'failed' + default: + return 'processing' + } } } -/** - * Creates an OpenAI video adapter with an explicit API key. - * Type resolution happens here at the call site. - * - * @experimental Video generation is an experimental feature and may change. - * - * @param model - The model name (e.g., 'sora-2') - * @param apiKey - Your OpenAI API key - * @param config - Optional additional configuration - * @returns Configured OpenAI video adapter instance with resolved types - * - * @example - * ```typescript - * const adapter = createOpenaiVideo('sora-2', 'your-api-key'); - * - * const { jobId } = await generateVideo({ - * adapter, - * prompt: 'A beautiful sunset over the ocean' - * }); - * ``` - */ export function createOpenaiVideo( model: TModel, apiKey: string, @@ -120,39 +308,6 @@ export function createOpenaiVideo( return new OpenAIVideoAdapter({ apiKey, ...config }, model) } -/** - * Creates an OpenAI video adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENAI_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @experimental Video generation is an experimental feature and may change. - * - * @param model - The model name (e.g., 'sora-2') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenAI video adapter instance with resolved types - * @throws Error if OPENAI_API_KEY is not found in environment - * - * @example - * ```typescript - * // Automatically uses OPENAI_API_KEY from environment - * const adapter = openaiVideo('sora-2'); - * - * // Create a video generation job - * const { jobId } = await generateVideo({ - * adapter, - * prompt: 'A cat playing piano' - * }); - * - * // Poll for status - * const status = await getVideoJobStatus({ - * adapter, - * jobId - * }); - * ``` - */ export function openaiVideo( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/index.ts b/packages/typescript/ai-openai/src/index.ts index b2d6a1d26..a2ddd7221 100644 --- a/packages/typescript/ai-openai/src/index.ts +++ b/packages/typescript/ai-openai/src/index.ts @@ -11,9 +11,8 @@ export { type OpenAITextProviderOptions, } from './adapters/text' -// Summarize adapter - for text summarization +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - OpenAISummarizeAdapter, createOpenaiSummarize, openaiSummarize, type OpenAISummarizeConfig, diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index c1f5a1c6b..b8ef4a06c 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -1,7 +1,14 @@ import { getApiKeyFromEnv } from '@tanstack/ai-utils' -import type { OpenAICompatibleClientConfig } from '@tanstack/ai-openai-compatible' +import type { ClientOptions } from 'openai' -export interface OpenAIClientConfig extends OpenAICompatibleClientConfig {} +/** + * OpenAI client configuration. Pass through to `new OpenAI(...)`. `apiKey` + * is required so the openai-compatible adapters don't need to handle a + * missing-key case at construction time. + */ +export interface OpenAIClientConfig extends Omit { + apiKey: string +} /** * Gets OpenAI API key from environment variables diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 87faf4e9a..2b3fcef6e 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -50,6 +50,7 @@ "devDependencies": { "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", + "openai": "^6.9.1", "vite": "^7.2.7", "zod": "^4.2.0" }, diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index f0e9fdfb0..cbef702a0 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -102,21 +102,10 @@ export class OpenRouterResponsesTextAdapter< readonly kind = 'text' as const readonly name = 'openrouter-responses' as const - /** OpenRouter SDK client. The base's `this.client` (an OpenAI client) is - * unused because we override the SDK-call hooks below. */ protected orClient: OpenRouter constructor(config: OpenRouterResponsesConfig, model: TModel) { - // The base needs an OpenAICompatibleClientConfig to construct an OpenAI - // client we never use. The OpenRouter SDK supports a Promise-returning - // apiKey getter; the OpenAI SDK's constructor here is a no-op for our - // purposes, so any string suffices. - const apiKey = typeof config.apiKey === 'string' ? config.apiKey : 'unused' - super( - { apiKey, baseURL: 'https://openrouter.ai/api/v1' }, - model, - 'openrouter-responses', - ) + super(model, 'openrouter-responses') this.orClient = new OpenRouter(config) } @@ -153,7 +142,10 @@ export class OpenRouterResponsesTextAdapter< // structurally so we don't need to depend on the SDK's class export. const stream = (await this.orClient.beta.responses.send( { responsesRequest: { ...responsesRequest, stream: true } }, - { signal: requestOptions.signal ?? undefined }, + { + signal: requestOptions.signal ?? undefined, + ...(requestOptions.headers && { headers: requestOptions.headers }), + }, )) as unknown as AsyncIterable return adaptOpenRouterResponsesStreamEvents(stream) } @@ -168,7 +160,10 @@ export class OpenRouterResponsesTextAdapter< > const result = await this.orClient.beta.responses.send( { responsesRequest: { ...responsesRequest, stream: false } }, - { signal: requestOptions.signal ?? undefined }, + { + signal: requestOptions.signal ?? undefined, + ...(requestOptions.headers && { headers: requestOptions.headers }), + }, ) return adaptOpenRouterResponsesResult(result) } @@ -519,13 +514,21 @@ async function* adaptOpenRouterResponsesStreamEvents( break } case 'error': { - // Stringify code so provider codes (401/429/500/…) survive - // `toRunErrorPayload`, mirroring the chat-completions fix in - // commit 0171b18e. + // The base reads `chunk.error.code` directly into a string-typed + // RUN_ERROR.code slot (no `toRunErrorPayload` narrowing on this path), + // so coerce here. Typeof-narrow rather than `!= null` so objects / + // symbols / non-finite numbers fall through to undefined instead of + // shipping `"[object Object]"`. + const code = + typeof e.code === 'string' + ? e.code + : typeof e.code === 'number' && Number.isFinite(e.code) + ? String(e.code) + : undefined yield { type: 'error', message: e.message, - code: e.code != null ? String(e.code) : undefined, + code, param: e.param, sequence_number: e.sequenceNumber, } as unknown as ResponseStreamEvent @@ -578,13 +581,18 @@ function toSnakeResponseResult(r: any): Record { ? r.output.map((it: any) => toSnakeOutputItem(it)) : r.output, ...(r.error && { - // Stringify provider error codes so they survive `toRunErrorPayload`'s - // string-only `code` filter — mirrors the top-level `'error'` event - // branch in `adaptOpenRouterResponsesStreamEvents` and the chat- - // completions fix in commit 0171b18e. + // Typeof-narrow the code (same rule as `normalizeCode` in + // `toRunErrorPayload`) — the base reads `chunk.response.error?.code` + // directly into a string slot, so object/symbol/NaN must fall through + // to undefined rather than ship `"[object Object]"`. error: { message: r.error.message, - code: r.error.code != null ? String(r.error.code) : undefined, + code: + typeof r.error.code === 'string' + ? r.error.code + : typeof r.error.code === 'number' && Number.isFinite(r.error.code) + ? String(r.error.code) + : undefined, }, }), } diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index bf6089479..06c23130a 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -101,21 +101,10 @@ export class OpenRouterTextAdapter< readonly kind = 'text' as const readonly name = 'openrouter' as const - /** OpenRouter SDK client. The base's `this.client` (an OpenAI client) is - * unused because we override the SDK-call hooks below. */ protected orClient: OpenRouter constructor(config: OpenRouterConfig, model: TModel) { - // The base needs an OpenAICompatibleClientConfig to construct an OpenAI - // client we never use. The OpenRouter SDK supports a Promise-returning - // apiKey getter; the OpenAI SDK's constructor here is a no-op for our - // purposes, so any string suffices. - const apiKey = typeof config.apiKey === 'string' ? config.apiKey : 'unused' - super( - { apiKey, baseURL: 'https://openrouter.ai/api/v1' }, - model, - 'openrouter', - ) + super(model, 'openrouter') this.orClient = new OpenRouter(config) } @@ -132,7 +121,10 @@ export class OpenRouterTextAdapter< const chatRequest = toOpenRouterRequest(params, true) const stream = (await this.orClient.chat.send( { chatRequest: { ...chatRequest, stream: true } }, - { signal: requestOptions.signal ?? undefined }, + { + signal: requestOptions.signal ?? undefined, + ...(requestOptions.headers && { headers: requestOptions.headers }), + }, )) as AsyncIterable return adaptOpenRouterStreamChunks(stream) } @@ -144,7 +136,10 @@ export class OpenRouterTextAdapter< const chatRequest = toOpenRouterRequest(params, false) const response = await this.orClient.chat.send( { chatRequest: { ...chatRequest, stream: false } }, - { signal: requestOptions.signal ?? undefined }, + { + signal: requestOptions.signal ?? undefined, + ...(requestOptions.headers && { headers: requestOptions.headers }), + }, ) // The base only reads `response.choices[0]?.message.content`. The SDK's // non-streaming response carries that under the same path. @@ -163,13 +158,12 @@ export class OpenRouterTextAdapter< } protected override extractReasoning( - chunk: ChatCompletionChunk, + chunk: unknown, ): { text: string } | undefined { // The chunk-adapter stashes the raw reasoning deltas on a non-standard // field so we don't need to round-trip them through camelCase ↔ // snake_case on the OpenAI Chat Completions chunk schema. - const reasoning = (chunk as unknown as { _reasoningText?: string }) - ._reasoningText + const reasoning = (chunk as { _reasoningText?: string })._reasoningText return reasoning ? { text: reasoning } : undefined } @@ -515,16 +509,14 @@ async function* adaptOpenRouterStreamChunks( } // Surface upstream errors so the base can route them to RUN_ERROR. - // Stringify code: OpenRouter's chunk error.code is numeric (401, 429, - // 500, …) but `toRunErrorPayload` drops non-string codes, which would - // silently lose provider error codes from the RUN_ERROR payload. + // `toRunErrorPayload` handles both string and finite-number codes; any + // other shape (object/array/symbol/NaN) falls through to undefined + // rather than serialising to "[object Object]". if ((chunk as any).error) { const errObj = (chunk as any).error throw Object.assign( new Error(errObj.message || 'OpenRouter stream error'), - { - code: errObj.code != null ? String(errObj.code) : undefined, - }, + { code: errObj.code }, ) } diff --git a/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts b/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts index 685233f78..af4f9f872 100644 --- a/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts +++ b/packages/typescript/ai-openrouter/src/text/responses-provider-options.ts @@ -17,7 +17,6 @@ export type OpenRouterResponsesCommonOptions = Pick< | 'sessionId' | 'metadata' | 'trace' - | 'parallelToolCalls' | 'modalities' | 'serviceTier' | 'safetyIdentifier' @@ -34,6 +33,10 @@ export type OpenRouterResponsesCommonOptions = Pick< variant?: 'free' | 'nitro' | 'online' | 'exacto' | 'extended' | 'thinking' } +// `parallelToolCalls` lives in BaseOptions alongside `toolChoice` (the other +// tool-related knob). Listing it in both picks would let an SDK rename of +// either pick still type-check through the survivor, defeating the static +// gate the picks exist for. export type OpenRouterResponsesBaseOptions = Pick< ResponsesRequest, | 'maxOutputTokens' diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index f6e83df18..5c209836b 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1,5 +1,5 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' -import { chat } from '@tanstack/ai' +import { EventType, chat } from '@tanstack/ai' import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { ChatRequest$outboundSchema } from '@openrouter/sdk/models' import { createOpenRouterText } from '../src/adapters/text' @@ -842,13 +842,44 @@ describe('OpenRouter AG-UI event emission', () => { expect(runErrorChunk).toBeDefined() if (runErrorChunk?.type === 'RUN_ERROR') { expect(runErrorChunk.error?.message).toBe('Rate limit exceeded') - // Provider error codes arrive as numbers (429, 500, etc.) but - // toRunErrorPayload only retains string codes — the chunk adapter - // must stringify before throwing. + // Provider error codes arrive as numbers (429, 500, etc.). The chunk + // adapter passes the raw value through and `toRunErrorPayload` coerces + // finite numbers via `String(...)`. expect(runErrorChunk.error?.code).toBe('429') } }) + it('drops object-shaped error.code rather than shipping "[object Object]"', async () => { + // A misbehaving upstream sending an object as `error.code` previously + // surfaced as `code: "[object Object]"` in RUN_ERROR because the chunk + // adapter pre-stringified anything non-null. The current code path passes + // the raw value through; `toRunErrorPayload`'s typeof gate drops it. + const streamChunks = [ + { + id: 'chatcmpl-bad', + model: 'openai/gpt-4o-mini', + choices: [] as Array, + error: { message: 'weird', code: { nested: 'oops' } }, + }, + ] + setupMockSdkClient(streamChunks) + const adapter = createAdapter() + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(chunk) + } + const runErr = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErr).toBeDefined() + if (runErr?.type === 'RUN_ERROR') { + expect(runErr.error?.message).toBe('weird') + expect(runErr.error?.code).toBeUndefined() + } + }) + it('emits STEP_STARTED and STEP_FINISHED for reasoning content', async () => { const streamChunks = [ { @@ -1726,6 +1757,94 @@ describe('OpenRouter STEP event consistency', () => { expect(stepStarted).toHaveLength(1) expect(stepFinished).toHaveLength(1) }) + + it('emits the spec REASONING_* lifecycle alongside the legacy STEP_* events', async () => { + // The base now exposes both the legacy STEP_STARTED/STEP_FINISHED pair + // (kept for backwards compatibility with consumers built against the + // pre-spec stream) AND the spec REASONING_START / REASONING_MESSAGE_* / + // REASONING_END events. Dropping any of the REASONING_* events would + // silently break consumers that migrated to the new shape. + const streamChunks = [ + { + id: 'r-1', + model: 'openai/o1-preview', + choices: [ + { + delta: { + reasoningDetails: [ + { type: 'reasoning.text', text: 'Thinking...' }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'r-1', + model: 'openai/o1-preview', + choices: [ + { + delta: { + reasoningDetails: [{ type: 'reasoning.text', text: ' done.' }], + }, + finishReason: null, + }, + ], + }, + { + id: 'r-1', + model: 'openai/o1-preview', + choices: [ + { delta: { content: 'Final answer.' }, finishReason: null }, + ], + }, + { + id: 'r-1', + model: 'openai/o1-preview', + choices: [{ delta: {}, finishReason: 'stop' }], + }, + ] + + setupMockSdkClient(streamChunks) + const adapter = createAdapter() + const chunks: Array = [] + for await (const chunk of adapter.chatStream({ + model: 'openai/o1-preview', + messages: [{ role: 'user', content: 'q' }], + logger: testLogger, + })) { + chunks.push(chunk) + } + const types = chunks.map((c) => c.type) + const reasoningStart = types.indexOf(EventType.REASONING_START) + const reasoningMessageStart = types.indexOf( + EventType.REASONING_MESSAGE_START, + ) + const reasoningMessageContent = types.indexOf( + EventType.REASONING_MESSAGE_CONTENT, + ) + const reasoningMessageEnd = types.indexOf(EventType.REASONING_MESSAGE_END) + const reasoningEnd = types.indexOf(EventType.REASONING_END) + expect(reasoningStart).toBeGreaterThanOrEqual(0) + expect(reasoningMessageStart).toBeGreaterThan(reasoningStart) + expect(reasoningMessageContent).toBeGreaterThan(reasoningMessageStart) + expect(reasoningMessageEnd).toBeGreaterThan(reasoningMessageContent) + expect(reasoningEnd).toBeGreaterThan(reasoningMessageEnd) + + // Joining REASONING_MESSAGE_CONTENT deltas reproduces the full reasoning + // text — the migration leaves the new-spec event shape semantically + // equivalent to the legacy STEP_FINISHED accumulator without losing data. + const reasoningDeltas = chunks + .filter( + ( + c, + ): c is Extract => + c.type === 'REASONING_MESSAGE_CONTENT', + ) + .map((c) => c.delta) + .join('') + expect(reasoningDeltas).toBe('Thinking... done.') + }) }) describe('OpenRouter SDK constructor wiring', () => { @@ -1817,7 +1936,37 @@ describe('OpenRouter stream_options conversion', () => { // user-initiated aborts actually reach the SDK rather than letting the // request continue burning tokens silently. const [, options] = mockSend.mock.calls[0]! - expect(options).toEqual({ signal: controller.signal }) + expect(options.signal).toBe(controller.signal) + }) + + it('forwards caller-supplied request headers to the SDK call', async () => { + setupMockSdkClient([ + { + id: 'x', + model: 'openai/gpt-4o-mini', + choices: [{ delta: { content: 'hi' }, finishReason: 'stop' }], + }, + ]) + const adapter = createAdapter() + const headers = { + 'X-Trace-Id': 'trace-123', + 'X-End-User': 'user-abc', + } + + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + request: { headers } as any, + })) { + // consume + } + + // Custom tracing / end-user identifiers passed via options.request.headers + // must reach the SDK — otherwise observability tags are silently dropped + // only for OpenRouter while other providers preserve them. + const [, options] = mockSend.mock.calls[0]! + expect(options.headers).toEqual(headers) }) it('maps RequestAbortedError from the SDK to RUN_ERROR with code: aborted', async () => { @@ -1850,43 +1999,54 @@ describe('OpenRouter convertMessage fail-loud guards', () => { vi.clearAllMocks() }) - it('throws when a user message has empty text content', async () => { + it('surfaces empty user-message guard as RUN_ERROR (no paid request)', async () => { setupMockSdkClient([]) const adapter = createAdapter() - // mapOptionsToRequest runs before chatStream's try block, so the - // fail-loud guard surfaces as a synchronous iterator throw — verifies - // we never made a paid request with an empty user message. - await expect(async () => { - for await (const _ of adapter.chatStream({ - model: 'openai/gpt-4o-mini', - messages: [{ role: 'user', content: '' }], - logger: testLogger, - })) { - // consume - } - }).rejects.toThrow(/empty text content/i) + // mapOptionsToRequest runs inside chatStream's try block, so the + // fail-loud guard surfaces as a RUN_ERROR event instead of an iterator + // throw — uniform error contract for callers, and we still never make a + // paid request with an empty user message. + const events: Array = [] + for await (const evt of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [{ role: 'user', content: '' }], + logger: testLogger, + })) { + events.push(evt) + } + const runError = events.find( + (e): e is Extract => + e.type === EventType.RUN_ERROR, + ) + expect(runError).toBeDefined() + expect(runError!.message).toMatch(/empty text content/i) expect(mockSend).not.toHaveBeenCalled() }) - it('throws on unsupported content-part types instead of dropping them', async () => { + it('surfaces unsupported content-part guard as RUN_ERROR (no paid request)', async () => { setupMockSdkClient([]) const adapter = createAdapter() - await expect(async () => { - for await (const _ of adapter.chatStream({ - model: 'openai/gpt-4o-mini', - messages: [ - { - role: 'user', - content: [{ type: 'mystery-type' as any, content: 'x' } as any], - }, - ], - logger: testLogger, - })) { - // consume - } - }).rejects.toThrow(/unsupported content part/i) + const events: Array = [] + for await (const evt of adapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { + role: 'user', + content: [{ type: 'mystery-type' as any, content: 'x' } as any], + }, + ], + logger: testLogger, + })) { + events.push(evt) + } + const runError = events.find( + (e): e is Extract => + e.type === EventType.RUN_ERROR, + ) + expect(runError).toBeDefined() + expect(runError!.message).toMatch(/unsupported content part/i) expect(mockSend).not.toHaveBeenCalled() }) diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts index 0d5817605..318672b5f 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -1,5 +1,5 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' -import { chat } from '@tanstack/ai' +import { EventType, chat } from '@tanstack/ai' import { resolveDebugOption } from '@tanstack/ai/adapter-internals' import { ResponsesRequest$outboundSchema } from '@openrouter/sdk/models' import { createOpenRouterResponsesText } from '../src/adapters/responses-text' @@ -194,19 +194,314 @@ describe('OpenRouter responses adapter — request shape', () => { expect(params.model).toBe('openai/gpt-4o-mini:thinking') }) - it('rejects webSearchTool() with a clear error pointing at the chat adapter', async () => { + it('rejects webSearchTool() as RUN_ERROR pointing at the chat adapter', async () => { const adapter = createAdapter() const ws = webSearchTool() as unknown as Tool - await expect(async () => { - for await (const _ of adapter.chatStream({ - model: 'openai/gpt-4o-mini' as any, - messages: [{ role: 'user', content: 'hi' }], - tools: [ws], - logger: testLogger, - })) { - // consume - } - }).rejects.toThrow(/openRouterText/) + const events: Array = [] + for await (const evt of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + tools: [ws], + logger: testLogger, + })) { + events.push(evt) + } + const runError = events.find( + (e): e is Extract => + e.type === EventType.RUN_ERROR, + ) + expect(runError).toBeDefined() + expect(runError!.message).toMatch(/openRouterText/) + }) + + it('falls back audio URL → input_file (chat-completions audio input is base64-only)', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'user', + content: [ + { + type: 'audio', + source: { + type: 'url', + value: 'https://example.com/clip.mp3', + } as any, + } as any, + ], + }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const userMsg = params.input.find((i: any) => i.role === 'user') + expect(userMsg).toBeDefined() + const audioPart = userMsg.content.find( + (p: any) => p.type === 'input_file', + ) + expect(audioPart).toBeDefined() + expect(audioPart.fileUrl).toBe('https://example.com/clip.mp3') + }) + + it('builds fileData data URI for inline document parts', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'user', + content: [ + { + type: 'document', + source: { + type: 'data', + value: 'aGVsbG8=', + mimeType: 'application/pdf', + } as any, + } as any, + ], + }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const userMsg = params.input.find((i: any) => i.role === 'user') + const docPart = userMsg.content.find((p: any) => p.type === 'input_file') + expect(docPart).toBeDefined() + expect(docPart.fileData).toBe('data:application/pdf;base64,aGVsbG8=') + // Survives the SDK's outbound Zod schema (key strip would drop fileData) + const serialized = ResponsesRequest$outboundSchema.parse(params) + expect(JSON.stringify(serialized)).toContain( + 'data:application/pdf;base64,aGVsbG8=', + ) + }) + + it('defaults image data-URI mimeType to application/octet-stream when omitted', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'user', + content: [ + { + type: 'image', + source: { type: 'data', value: 'aGVsbG8=' } as any, + } as any, + ], + }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const userMsg = params.input.find((i: any) => i.role === 'user') + const imgPart = userMsg.content.find((p: any) => p.type === 'input_image') + expect(imgPart).toBeDefined() + expect(imgPart.imageUrl).toBe( + 'data:application/octet-stream;base64,aGVsbG8=', + ) + }) + + it('routes video parts as input_video with camelCase videoUrl that survives Zod', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'user', + content: [ + { + type: 'video', + source: { value: 'https://example.com/v.mp4' } as any, + } as any, + ], + }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const userMsg = params.input.find((i: any) => i.role === 'user') + const videoPart = userMsg.content.find( + (p: any) => p.type === 'input_video', + ) + expect(videoPart).toBeDefined() + expect(videoPart.videoUrl).toBe('https://example.com/v.mp4') + // The outbound schema would strip the camelCase videoUrl if the converter + // emitted snake_case (or any other key shape). + const serialized = ResponsesRequest$outboundSchema.parse(params) + expect(JSON.stringify(serialized)).toContain('https://example.com/v.mp4') + }) + + it('stringifies object-shaped assistant tool-call arguments for the SDK', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_obj', + type: 'function', + function: { + name: 'lookup_weather', + arguments: { location: 'Berlin' } as any, + }, + }, + ], + }, + { role: 'tool', toolCallId: 'call_obj', content: '{"temp":72}' }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const fnCall = params.input.find( + (i: any) => i.type === 'function_call' && i.callId === 'call_obj', + ) + expect(fnCall).toBeDefined() + expect(typeof fnCall.arguments).toBe('string') + expect(JSON.parse(fnCall.arguments)).toEqual({ location: 'Berlin' }) + }) + + it('extracts text from array-shaped tool message content rather than JSON-stringifying parts', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + for await (const _ of chat({ + adapter, + messages: [ + { + role: 'assistant', + content: null, + toolCalls: [ + { + id: 'call_arr', + type: 'function', + function: { name: 'lookup_weather', arguments: '{}' }, + }, + ], + }, + { + role: 'tool', + toolCallId: 'call_arr', + content: [ + { type: 'text', content: '{"temp":' } as any, + { type: 'text', content: '72}' } as any, + ] as any, + }, + ], + })) { + // consume + } + const params = mockSend.mock.calls[0]![0].responsesRequest + const fcOutput = params.input.find( + (i: any) => i.type === 'function_call_output', + ) + expect(fcOutput).toBeDefined() + expect(fcOutput.output).toBe('{"temp":72}') + expect(fcOutput.output).not.toContain('"type"') + }) + + it('throws on inline document data via chat-completions adapter (rejects base64 PDF inline)', async () => { + // Cross-adapter assertion: the chat-completions sibling must throw on + // inline document data so callers know to use the Responses adapter. + const { createOpenRouterText } = await import('../src/adapters/text') + const chatAdapter = createOpenRouterText('openai/gpt-4o-mini' as any, 'k') + const events: Array = [] + for await (const evt of chatAdapter.chatStream({ + model: 'openai/gpt-4o-mini', + messages: [ + { + role: 'user', + content: [ + { + type: 'document', + source: { type: 'data', value: 'aGVsbG8=' } as any, + } as any, + ], + }, + ], + logger: testLogger, + })) { + events.push(evt) + } + const runError = events.find( + (e): e is Extract => + e.type === EventType.RUN_ERROR, + ) + expect(runError).toBeDefined() + expect(runError!.message.toLowerCase()).toMatch(/inline.*document|document.*inline|responses adapter/) }) }) @@ -405,6 +700,42 @@ describe('OpenRouter responses adapter — stream event bridge', () => { expect(err.error.code).toBe('429') }) + it('drops object-shaped error.code rather than shipping "[object Object]"', async () => { + setupMockSdkClient([ + { + type: 'response.created', + sequenceNumber: 0, + response: { model: 'm', output: [] }, + }, + { + type: 'response.failed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + error: { message: 'malformed', code: { nested: 'oops' } as any }, + }, + }, + ]) + const adapter = createAdapter() + const chunks: Array = [] + for await (const c of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + })) { + chunks.push(c) + } + const err = chunks.find((c) => c.type === 'RUN_ERROR') as any + expect(err).toBeDefined() + expect(err.message).toBe('malformed') + // Object-shaped code must fall through to undefined rather than being + // stringified as "[object Object]" — the typeof narrowing matches + // normalizeCode's contract in toRunErrorPayload. + expect(err.code).toBeUndefined() + expect(err.error.code).toBeUndefined() + }) + it('stringifies non-string error.code on response.failed events', async () => { setupMockSdkClient([ { @@ -616,6 +947,32 @@ describe('OpenRouter responses adapter — SDK constructor wiring', () => { // consume } const [, options] = mockSend.mock.calls[0]! - expect(options).toEqual({ signal: controller.signal }) + expect(options.signal).toBe(controller.signal) + }) + + it('forwards caller-supplied request headers to the SDK call', async () => { + setupMockSdkClient([ + { + type: 'response.completed', + sequenceNumber: 1, + response: { + model: 'm', + output: [], + usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 }, + }, + }, + ]) + const adapter = createAdapter() + const headers = { 'X-Trace-Id': 'trace-r1' } + for await (const _ of adapter.chatStream({ + model: 'openai/gpt-4o-mini' as any, + messages: [{ role: 'user', content: 'hi' }], + logger: testLogger, + request: { headers } as any, + })) { + // consume + } + const [, options] = mockSend.mock.calls[0]! + expect(options.headers).toEqual(headers) }) }) diff --git a/packages/typescript/ai/src/activities/error-payload.ts b/packages/typescript/ai/src/activities/error-payload.ts index 33a6b8157..3ab7d101d 100644 --- a/packages/typescript/ai/src/activities/error-payload.ts +++ b/packages/typescript/ai/src/activities/error-payload.ts @@ -18,6 +18,17 @@ const ABORT_ERROR_NAMES = new Set([ 'RequestAbortedError', ]) +// HTTP status codes carried as numbers (e.g. `error.status = 429`) are a +// common variant on SDK error classes; coerce so the resulting `code` field +// is stable as a string for downstream consumers. +function normalizeCode(codeField: unknown): string | undefined { + if (typeof codeField === 'string') return codeField + if (typeof codeField === 'number' && Number.isFinite(codeField)) { + return String(codeField) + } + return undefined +} + export function toRunErrorPayload( error: unknown, fallbackMessage = 'Unknown error occurred', @@ -32,7 +43,7 @@ export function toRunErrorPayload( const codeField = (error as Error & { code?: unknown }).code return { message: error.message || fallbackMessage, - code: typeof codeField === 'string' ? codeField : undefined, + code: normalizeCode(codeField), } } if (typeof error === 'object' && error !== null) { @@ -43,7 +54,7 @@ export function toRunErrorPayload( typeof messageField === 'string' && messageField.length > 0 ? messageField : fallbackMessage, - code: typeof codeField === 'string' ? codeField : undefined, + code: normalizeCode(codeField), } } if (typeof error === 'string' && error.length > 0) { diff --git a/packages/typescript/ai/src/activities/index.ts b/packages/typescript/ai/src/activities/index.ts index b69e8f423..d3864dd50 100644 --- a/packages/typescript/ai/src/activities/index.ts +++ b/packages/typescript/ai/src/activities/index.ts @@ -60,6 +60,10 @@ export { type SummarizeAdapterConfig, type AnySummarizeAdapter, } from './summarize/adapter' +export { + ChatStreamSummarizeAdapter, + type ChatStreamCapable, +} from './summarize/chat-stream-wrapper' // =========================== // Image Activity diff --git a/packages/typescript/ai-openai-compatible/src/adapters/summarize.ts b/packages/typescript/ai/src/activities/summarize/chat-stream-wrapper.ts similarity index 84% rename from packages/typescript/ai-openai-compatible/src/adapters/summarize.ts rename to packages/typescript/ai/src/activities/summarize/chat-stream-wrapper.ts index fed92b296..c58510774 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/summarize.ts +++ b/packages/typescript/ai/src/activities/summarize/chat-stream-wrapper.ts @@ -1,17 +1,16 @@ -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' -import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' -import { generateId } from '@tanstack/ai-utils' +import { toRunErrorPayload } from '../error-payload' +import { BaseSummarizeAdapter } from './adapter' import type { StreamChunk, SummarizationOptions, SummarizationResult, TextOptions, -} from '@tanstack/ai' +} from '../../types' /** - * Minimal interface for a text adapter that supports chatStream. - * This allows the summarize adapter to work with any OpenAI-compatible - * text adapter without tight coupling to a specific implementation. + * Minimal contract for a text adapter that supports `chatStream`. Lets + * `ChatStreamSummarizeAdapter` work with any text adapter without coupling + * to a specific implementation. */ export interface ChatStreamCapable { chatStream: ( @@ -20,16 +19,10 @@ export interface ChatStreamCapable { } /** - * OpenAI-Compatible Summarize Adapter - * - * A thin wrapper around a text adapter that adds summarization-specific prompting. - * Delegates all API calls to the provided text adapter. - * - * Subclasses or instantiators provide a text adapter (or factory) at construction - * time, allowing any OpenAI-compatible provider to get summarization for free by - * reusing its text adapter. + * Summarize adapter that wraps any `ChatStreamCapable` text adapter and + * prompts it for summarization. Not tied to any wire format. */ -export class OpenAICompatibleSummarizeAdapter< +export class ChatStreamSummarizeAdapter< TModel extends string, TProviderOptions extends object = Record, > extends BaseSummarizeAdapter { @@ -40,7 +33,7 @@ export class OpenAICompatibleSummarizeAdapter< constructor( textAdapter: ChatStreamCapable, model: TModel, - name: string = 'openai-compatible', + name: string = 'chat-stream-summarize', ) { super({}, model) this.name = name @@ -51,7 +44,7 @@ export class OpenAICompatibleSummarizeAdapter< const systemPrompt = this.buildSummarizationPrompt(options) let summary = '' - const id = generateId(this.name) + const id = this.generateId() let model = options.model let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index e73bd7532..c638f574a 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -313,3 +313,7 @@ export type { AnySummarizeAdapter, } from './adapter' export { BaseSummarizeAdapter } from './adapter' +export { + ChatStreamSummarizeAdapter, + type ChatStreamCapable, +} from './chat-stream-wrapper' diff --git a/packages/typescript/ai/tests/error-payload.test.ts b/packages/typescript/ai/tests/error-payload.test.ts index 784a4901c..689c3046d 100644 --- a/packages/typescript/ai/tests/error-payload.test.ts +++ b/packages/typescript/ai/tests/error-payload.test.ts @@ -34,17 +34,27 @@ describe('toRunErrorPayload', () => { ) }) - it('ignores non-string code fields (returns undefined)', () => { + it('coerces numeric code fields to strings', () => { expect(toRunErrorPayload({ message: 'x', code: 500 })).toEqual({ message: 'x', - code: undefined, + code: '500', }) }) - it('ignores non-string code fields on Error instances too', () => { - const err = Object.assign(new Error('numeric code'), { code: 500 }) + it('coerces numeric code fields on Error instances too', () => { + const err = Object.assign(new Error('http 429'), { code: 429 }) expect(toRunErrorPayload(err)).toEqual({ - message: 'numeric code', + message: 'http 429', + code: '429', + }) + }) + + it('ignores non-finite or otherwise non-string/non-number codes', () => { + expect( + toRunErrorPayload({ message: 'nan', code: Number.NaN }), + ).toEqual({ message: 'nan', code: undefined }) + expect(toRunErrorPayload({ message: 'sym', code: Symbol('x') })).toEqual({ + message: 'sym', code: undefined, }) }) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9162bff0e..734ac81df 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1207,6 +1207,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) zod: specifier: ^4.0.0 version: 4.3.6 @@ -1232,6 +1235,9 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) zod: specifier: ^4.0.0 version: 4.3.6 @@ -1340,9 +1346,6 @@ importers: '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils - openai: - specifier: ^6.9.1 - version: 6.10.0(ws@8.19.0)(zod@4.3.6) devDependencies: '@tanstack/ai': specifier: workspace:* @@ -1350,6 +1353,9 @@ importers: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1372,6 +1378,9 @@ importers: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) From 62aad9071bd7a8d771be610bdc3520f1c791dc49 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 03:11:03 +0000 Subject: [PATCH 30/49] ci: apply automated fixes --- .../tests/chat-completions-text.test.ts | 95 ++-------- .../tests/responses-text.test.ts | 165 ++++-------------- .../typescript/ai-openai/src/adapters/tts.ts | 7 +- .../ai-openai/src/adapters/video.ts | 5 +- .../tests/openrouter-adapter.test.ts | 8 +- .../openrouter-responses-adapter.test.ts | 12 +- .../typescript/ai/tests/error-payload.test.ts | 7 +- 7 files changed, 70 insertions(+), 229 deletions(-) diff --git a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts b/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts index 4346e4770..bb8750c2c 100644 --- a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts +++ b/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts @@ -84,10 +84,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { describe('instantiation', () => { it('creates an adapter with default name', () => { - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') expect(adapter).toBeDefined() expect(adapter.kind).toBe('text') @@ -151,10 +148,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -202,10 +196,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -264,10 +255,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -334,10 +322,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -407,10 +392,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -503,10 +485,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -577,10 +556,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { mockCreate = vi.fn().mockResolvedValue(errorIterable) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -602,10 +578,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { it('emits RUN_STARTED then RUN_ERROR when client.create throws', async () => { mockCreate = vi.fn().mockRejectedValue(new Error('API key invalid')) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -640,10 +613,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const result = await adapter.structuredOutput({ chatOptions: { @@ -689,10 +659,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const result = await adapter.structuredOutput({ chatOptions: { @@ -728,10 +695,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ @@ -757,10 +721,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { } setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') // Empty content must surface as a distinct error rather than masquerade // as a JSON-parse failure on an empty string. @@ -782,10 +743,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { } setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ @@ -832,10 +790,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient(streamChunks) const errorsSpy = vi.spyOn(testLogger, 'errors') - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') try { for await (const _ of adapter.chatStream({ @@ -899,10 +854,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -940,10 +892,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -971,10 +920,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { setupMockSdkClient([], nonStreamResponse) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') await adapter.structuredOutput({ chatOptions: { @@ -1011,10 +957,7 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { ] setupMockSdkClient(streamChunks) - const adapter = new TestChatCompletionsAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestChatCompletionsAdapter(testConfig, 'test-model') const controller = new AbortController() const chunks: Array = [] diff --git a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts b/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts index 87a25a4d3..fdfa3990a 100644 --- a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts +++ b/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts @@ -17,10 +17,7 @@ class TestResponsesAdapter extends OpenAICompatibleResponsesTextAdapter constructor(_config: unknown, model: string, name?: string) { super(model, name) } - protected async callResponse( - params: any, - requestOptions: any, - ): Promise { + protected async callResponse(params: any, requestOptions: any): Promise { return mockResponsesCreate(params, requestOptions) } protected async callResponseStream( @@ -82,10 +79,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { describe('instantiation', () => { it('creates an adapter with default name', () => { - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') expect(adapter).toBeDefined() expect(adapter.kind).toBe('text') @@ -150,10 +144,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -202,10 +193,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -265,10 +253,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -336,10 +321,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -404,10 +386,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -478,10 +457,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -562,10 +538,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -649,10 +622,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -775,10 +745,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -864,10 +831,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -952,10 +916,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }, ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ logger: testLogger, @@ -1038,10 +999,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }, ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ logger: testLogger, @@ -1106,10 +1064,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }, ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ logger: testLogger, @@ -1186,10 +1141,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }, ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ logger: testLogger, @@ -1241,10 +1193,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1307,10 +1256,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1355,10 +1301,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { }, ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ logger: testLogger, @@ -1411,10 +1354,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { mockResponsesCreate = vi.fn().mockResolvedValue(errorIterable) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1438,10 +1378,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { .fn() .mockRejectedValue(new Error('API key invalid')) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1478,10 +1415,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1515,10 +1449,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1556,10 +1487,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1599,10 +1527,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const result = await adapter.structuredOutput({ chatOptions: { @@ -1656,10 +1581,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const result = await adapter.structuredOutput({ chatOptions: { @@ -1699,10 +1621,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ @@ -1732,10 +1651,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ], } setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ chatOptions: { @@ -1765,10 +1681,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ], } setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ chatOptions: { @@ -1795,10 +1708,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ], } setupMockResponsesClient([], nonStreamResponse) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') await expect( adapter.structuredOutput({ chatOptions: { @@ -1844,10 +1754,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1915,10 +1822,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ @@ -1966,10 +1870,7 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { ] setupMockResponsesClient(streamChunks) - const adapter = new TestResponsesAdapter( - testConfig, - 'test-model', - ) + const adapter = new TestResponsesAdapter(testConfig, 'test-model') const chunks: Array = [] for await (const chunk of adapter.chatStream({ diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index d453f4d14..382d0096c 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -26,10 +26,9 @@ export interface OpenAITTSConfig extends OpenAIClientConfig {} * Voices: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, verse. * Formats: mp3, opus, aac, flac, wav, pcm. Speed 0.25 to 4.0. */ -export class OpenAITTSAdapter extends BaseTTSAdapter< - TModel, - OpenAITTSProviderOptions -> { +export class OpenAITTSAdapter< + TModel extends OpenAITTSModel, +> extends BaseTTSAdapter { readonly name = 'openai' as const protected client: OpenAI diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index e219d796c..7b009237d 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -251,7 +251,10 @@ export class OpenAIVideoAdapter< ) } const fallthroughBuffer = await fallthroughBlob.arrayBuffer() - warnIfLargeMediaBuffer(fallthroughBuffer.byteLength, 'video.sdkFallthrough') + warnIfLargeMediaBuffer( + fallthroughBuffer.byteLength, + 'video.sdkFallthrough', + ) const fallthroughBase64 = arrayBufferToBase64(fallthroughBuffer) const fallthroughMime = (typeof response?.headers?.get === 'function' diff --git a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts index 5c209836b..3fc2ba243 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-adapter.test.ts @@ -1794,9 +1794,7 @@ describe('OpenRouter STEP event consistency', () => { { id: 'r-1', model: 'openai/o1-preview', - choices: [ - { delta: { content: 'Final answer.' }, finishReason: null }, - ], + choices: [{ delta: { content: 'Final answer.' }, finishReason: null }], }, { id: 'r-1', @@ -1836,9 +1834,7 @@ describe('OpenRouter STEP event consistency', () => { // equivalent to the legacy STEP_FINISHED accumulator without losing data. const reasoningDeltas = chunks .filter( - ( - c, - ): c is Extract => + (c): c is Extract => c.type === 'REASONING_MESSAGE_CONTENT', ) .map((c) => c.delta) diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts index 318672b5f..ef214b8ac 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -249,9 +249,7 @@ describe('OpenRouter responses adapter — request shape', () => { const params = mockSend.mock.calls[0]![0].responsesRequest const userMsg = params.input.find((i: any) => i.role === 'user') expect(userMsg).toBeDefined() - const audioPart = userMsg.content.find( - (p: any) => p.type === 'input_file', - ) + const audioPart = userMsg.content.find((p: any) => p.type === 'input_file') expect(audioPart).toBeDefined() expect(audioPart.fileUrl).toBe('https://example.com/clip.mp3') }) @@ -370,9 +368,7 @@ describe('OpenRouter responses adapter — request shape', () => { } const params = mockSend.mock.calls[0]![0].responsesRequest const userMsg = params.input.find((i: any) => i.role === 'user') - const videoPart = userMsg.content.find( - (p: any) => p.type === 'input_video', - ) + const videoPart = userMsg.content.find((p: any) => p.type === 'input_video') expect(videoPart).toBeDefined() expect(videoPart.videoUrl).toBe('https://example.com/v.mp4') // The outbound schema would strip the camelCase videoUrl if the converter @@ -501,7 +497,9 @@ describe('OpenRouter responses adapter — request shape', () => { e.type === EventType.RUN_ERROR, ) expect(runError).toBeDefined() - expect(runError!.message.toLowerCase()).toMatch(/inline.*document|document.*inline|responses adapter/) + expect(runError!.message.toLowerCase()).toMatch( + /inline.*document|document.*inline|responses adapter/, + ) }) }) diff --git a/packages/typescript/ai/tests/error-payload.test.ts b/packages/typescript/ai/tests/error-payload.test.ts index 689c3046d..75d6bab96 100644 --- a/packages/typescript/ai/tests/error-payload.test.ts +++ b/packages/typescript/ai/tests/error-payload.test.ts @@ -50,9 +50,10 @@ describe('toRunErrorPayload', () => { }) it('ignores non-finite or otherwise non-string/non-number codes', () => { - expect( - toRunErrorPayload({ message: 'nan', code: Number.NaN }), - ).toEqual({ message: 'nan', code: undefined }) + expect(toRunErrorPayload({ message: 'nan', code: Number.NaN })).toEqual({ + message: 'nan', + code: undefined, + }) expect(toRunErrorPayload({ message: 'sym', code: Symbol('x') })).toEqual({ message: 'sym', code: undefined, From ebd62444a0bd113707f119819e69f0121ff7d113 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 13:34:35 +1000 Subject: [PATCH 31/49] refactor(ai): rename chat-stream-wrapper to chat-stream-summarize --- packages/typescript/ai/src/activities/index.ts | 2 +- .../{chat-stream-wrapper.ts => chat-stream-summarize.ts} | 0 packages/typescript/ai/src/activities/summarize/index.ts | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename packages/typescript/ai/src/activities/summarize/{chat-stream-wrapper.ts => chat-stream-summarize.ts} (100%) diff --git a/packages/typescript/ai/src/activities/index.ts b/packages/typescript/ai/src/activities/index.ts index d3864dd50..aaf28459b 100644 --- a/packages/typescript/ai/src/activities/index.ts +++ b/packages/typescript/ai/src/activities/index.ts @@ -63,7 +63,7 @@ export { export { ChatStreamSummarizeAdapter, type ChatStreamCapable, -} from './summarize/chat-stream-wrapper' +} from './summarize/chat-stream-summarize' // =========================== // Image Activity diff --git a/packages/typescript/ai/src/activities/summarize/chat-stream-wrapper.ts b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts similarity index 100% rename from packages/typescript/ai/src/activities/summarize/chat-stream-wrapper.ts rename to packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index c638f574a..b696735bf 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -316,4 +316,4 @@ export { BaseSummarizeAdapter } from './adapter' export { ChatStreamSummarizeAdapter, type ChatStreamCapable, -} from './chat-stream-wrapper' +} from './chat-stream-summarize' From e0dcb7780eae7913051a9607bf6d614c5b3dc009 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 14:24:33 +1000 Subject: [PATCH 32/49] refactor(summarize): unify provider summarize adapters on chat-stream wrapper - Migrate anthropic, gemini, ollama, openrouter summarize adapters to thin factories over ChatStreamSummarizeAdapter, matching the openai/grok pattern. Drops ~600 lines of duplicated streaming/error/usage handling. - Thread modelOptions from SummarizationOptions through the activity layer and into the wrapped text adapter's chatStream so provider-specific knobs (cache control, plugins, safety settings, tuning params) reach the wire. - Add InferTextProviderOptions helper to extract per-model provider options from a text adapter's ~types. - Drop bespoke XSummarizeProviderOptions interfaces from all 6 providers; provider summarize types now resolve to the text adapter's per-model options shape, giving accurate IntelliSense for modelOptions. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../ai-anthropic/src/adapters/summarize.ts | 246 ++------------ packages/typescript/ai-anthropic/src/index.ts | 5 +- .../ai-gemini/src/adapters/summarize.ts | 299 +++--------------- packages/typescript/ai-gemini/src/index.ts | 7 +- .../ai-gemini/tests/gemini-adapter.test.ts | 36 ++- .../ai-grok/src/adapters/summarize.ts | 18 +- .../typescript/ai-grok/src/adapters/text.ts | 4 +- packages/typescript/ai-grok/src/index.ts | 1 - .../ai-ollama/src/adapters/summarize.ts | 258 ++------------- packages/typescript/ai-ollama/src/index.ts | 4 +- .../ai-openai/src/adapters/summarize.ts | 18 +- packages/typescript/ai-openai/src/index.ts | 1 - .../ai-openrouter/src/adapters/summarize.ts | 201 ++---------- .../typescript/ai-openrouter/src/index.ts | 5 +- .../typescript/ai/src/activities/index.ts | 1 + .../ai/src/activities/summarize/adapter.ts | 12 +- .../summarize/chat-stream-summarize.ts | 82 +++-- .../ai/src/activities/summarize/index.ts | 7 +- packages/typescript/ai/src/types.ts | 6 +- testing/e2e/src/routes/api.summarize.ts | 2 +- 20 files changed, 246 insertions(+), 967 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts index 60effa555..b9febd09f 100644 --- a/packages/typescript/ai-anthropic/src/adapters/summarize.ts +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -1,221 +1,21 @@ -import { EventType } from '@tanstack/ai' -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' -import { - createAnthropicClient, - generateId, - getAnthropicApiKeyFromEnv, -} from '../utils' +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' +import { getAnthropicApiKeyFromEnv } from '../utils' +import { AnthropicTextAdapter } from './text' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { ANTHROPIC_MODELS } from '../model-meta' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' import type { AnthropicClientConfig } from '../utils' -/** - * Configuration for Anthropic summarize adapter - */ export interface AnthropicSummarizeConfig extends AnthropicClientConfig {} -/** - * Anthropic-specific provider options for summarization - */ -export interface AnthropicSummarizeProviderOptions { - /** Temperature for response generation (0-1) */ - temperature?: number - /** Maximum tokens in the response */ - maxTokens?: number -} - -/** Model type for Anthropic summarization */ export type AnthropicSummarizeModel = (typeof ANTHROPIC_MODELS)[number] -/** - * Anthropic Summarize Adapter - * - * Tree-shakeable adapter for Anthropic summarization functionality. - * Import only what you need for smaller bundle sizes. - */ -export class AnthropicSummarizeAdapter< - TModel extends AnthropicSummarizeModel, -> extends BaseSummarizeAdapter { - readonly kind = 'summarize' as const - readonly name = 'anthropic' as const - - private client: ReturnType - - constructor(config: AnthropicSummarizeConfig, model: TModel) { - super({}, model) - this.client = createAnthropicClient(config) - } - - async summarize(options: SummarizationOptions): Promise { - const { logger } = options - const systemPrompt = this.buildSummarizationPrompt(options) - - logger.request(`activity=summarize provider=anthropic`, { - provider: 'anthropic', - model: options.model, - }) - - try { - const response = await this.client.messages.create({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - system: systemPrompt, - max_tokens: options.maxLength || 500, - temperature: 0.3, - stream: false, - }) - - const content = response.content - .map((c) => (c.type === 'text' ? c.text : '')) - .join('') - - return { - id: response.id, - model: response.model, - summary: content, - usage: { - promptTokens: response.usage.input_tokens, - completionTokens: response.usage.output_tokens, - totalTokens: - response.usage.input_tokens + response.usage.output_tokens, - }, - } - } catch (error) { - logger.errors('anthropic.summarize fatal', { - error, - source: 'anthropic.summarize', - }) - throw error - } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const { logger } = options - const systemPrompt = this.buildSummarizationPrompt(options) - const id = generateId(this.name) - const threadId = generateId('thread') - const model = options.model - let accumulatedContent = '' - let inputTokens = 0 - let outputTokens = 0 - - logger.request(`activity=summarize provider=anthropic`, { - provider: 'anthropic', - model, - stream: true, - }) - - try { - yield { - type: EventType.RUN_STARTED, - runId: id, - threadId, - model, - timestamp: Date.now(), - } satisfies StreamChunk - - const stream = await this.client.messages.create({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - system: systemPrompt, - max_tokens: options.maxLength || 500, - temperature: 0.3, - stream: true, - }) - - for await (const event of stream) { - logger.provider(`provider=anthropic type=${event.type}`, { - chunk: event, - }) - - if (event.type === 'message_start') { - inputTokens = event.message.usage.input_tokens - } else if (event.type === 'content_block_delta') { - if (event.delta.type === 'text_delta') { - const delta = event.delta.text - accumulatedContent += delta - yield { - type: EventType.TEXT_MESSAGE_CONTENT, - messageId: id, - model, - timestamp: Date.now(), - delta, - content: accumulatedContent, - } satisfies StreamChunk - } - } else if (event.type === 'message_delta') { - outputTokens = event.usage.output_tokens - yield { - type: EventType.RUN_FINISHED, - runId: id, - threadId, - model, - timestamp: Date.now(), - finishReason: event.delta.stop_reason as - | 'stop' - | 'length' - | 'content_filter' - | null, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, - } satisfies StreamChunk - } - } - } catch (error) { - logger.errors('anthropic.summarize fatal', { - error, - source: 'anthropic.summarize', - }) - throw error - } - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } - - return prompt - } -} - /** * Creates an Anthropic summarize adapter with explicit API key. - * Type resolution happens here at the call site. * - * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') - * @param apiKey - Your Anthropic API key - * @param config - Optional additional configuration - * @returns Configured Anthropic summarize adapter instance with resolved types + * @example + * ```typescript + * const adapter = createAnthropicSummarize('claude-sonnet-4-5', 'sk-ant-...'); + * ``` */ export function createAnthropicSummarize< TModel extends AnthropicSummarizeModel, @@ -223,22 +23,32 @@ export function createAnthropicSummarize< model: TModel, apiKey: string, config?: Omit, -): AnthropicSummarizeAdapter { - return new AnthropicSummarizeAdapter({ apiKey, ...config }, model) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return new ChatStreamSummarizeAdapter( + new AnthropicTextAdapter({ apiKey, ...config }, model), + model, + 'anthropic', + ) } /** - * Creates an Anthropic summarize adapter with automatic API key detection. - * Type resolution happens here at the call site. + * Creates an Anthropic summarize adapter with API key from `ANTHROPIC_API_KEY`. * - * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured Anthropic summarize adapter instance with resolved types + * @example + * ```typescript + * const adapter = anthropicSummarize('claude-sonnet-4-5'); + * await summarize({ adapter, text: 'Long article text...' }); + * ``` */ export function anthropicSummarize( model: TModel, config?: Omit, -): AnthropicSummarizeAdapter { - const apiKey = getAnthropicApiKeyFromEnv() - return createAnthropicSummarize(model, apiKey, config) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return createAnthropicSummarize(model, getAnthropicApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-anthropic/src/index.ts b/packages/typescript/ai-anthropic/src/index.ts index 4100ec183..1ba8e92b8 100644 --- a/packages/typescript/ai-anthropic/src/index.ts +++ b/packages/typescript/ai-anthropic/src/index.ts @@ -11,13 +11,12 @@ export { type AnthropicTextProviderOptions, } from './adapters/text' -// Summarize adapter - for text summarization +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - AnthropicSummarizeAdapter, anthropicSummarize, createAnthropicSummarize, type AnthropicSummarizeConfig, - type AnthropicSummarizeProviderOptions, + type AnthropicSummarizeModel, } from './adapters/summarize' // ============================================================================ // Type Exports diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index e82b3ec29..2fc39a63f 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -1,281 +1,56 @@ -import { FinishReason } from '@google/genai' -import { EventType } from '@tanstack/ai' -import { - createGeminiClient, - generateId, - getGeminiApiKeyFromEnv, -} from '../utils' -import type { GoogleGenAI } from '@google/genai' +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' +import { getGeminiApiKeyFromEnv } from '../utils' +import { GeminiTextAdapter } from './text' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' +import type { GEMINI_MODELS } from '../model-meta' import type { GeminiClientConfig } from '../utils' -import type { SummarizeAdapter } from '@tanstack/ai/adapters' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' -/** - * Configuration for Gemini summarize adapter - */ export interface GeminiSummarizeConfig extends GeminiClientConfig {} -/** - * Available Gemini models for summarization - */ -export const GeminiSummarizeModels = [ - 'gemini-3.1-flash-lite-preview', - 'gemini-2.0-flash', - 'gemini-1.5-flash', - 'gemini-1.5-pro', - 'gemini-2.0-flash-lite', -] as const - -export type GeminiSummarizeModel = (typeof GeminiSummarizeModels)[number] - -/** - * Provider-specific options for Gemini summarization - */ -export interface GeminiSummarizeProviderOptions { - /** Generation configuration */ - generationConfig?: { - temperature?: number - topP?: number - topK?: number - maxOutputTokens?: number - stopSequences?: Array - } - /** Safety settings */ - safetySettings?: Array<{ - category: string - threshold: string - }> -} - -export interface GeminiSummarizeAdapterOptions { - // Additional adapter options can be added here -} - -/** - * Gemini Summarize Adapter - * A tree-shakeable summarization adapter for Google Gemini - */ -export class GeminiSummarizeAdapter< - TModel extends GeminiSummarizeModel, -> implements SummarizeAdapter { - readonly kind = 'summarize' as const - readonly name = 'gemini' as const - readonly model: TModel - - // Type-only property - never assigned at runtime - declare '~types': { - providerOptions: GeminiSummarizeProviderOptions - } - - private client: GoogleGenAI - - constructor(config: GeminiSummarizeConfig, model: TModel) { - this.client = createGeminiClient(config) - this.model = model - } - async summarize(options: SummarizationOptions): Promise { - const { logger } = options - const model = options.model - - logger.request(`activity=summarize provider=gemini`, { - provider: 'gemini', - model, - }) - - // Build the system prompt based on format - const formatInstructions = this.getFormatInstructions(options.style) - const lengthInstructions = options.maxLength - ? ` Keep the summary under ${options.maxLength} tokens.` - : '' - - const systemPrompt = `You are a helpful assistant that summarizes text. ${formatInstructions}${lengthInstructions}` - - try { - const response = await this.client.models.generateContent({ - model, - contents: [ - { - role: 'user', - parts: [ - { text: `Please summarize the following:\n\n${options.text}` }, - ], - }, - ], - config: { - systemInstruction: systemPrompt, - }, - }) - - const summary = response.text ?? '' - const inputTokens = response.usageMetadata?.promptTokenCount ?? 0 - const outputTokens = response.usageMetadata?.candidatesTokenCount ?? 0 - - return { - id: generateId('sum'), - model, - summary, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, - } - } catch (error) { - logger.errors('gemini.summarize fatal', { - error, - source: 'gemini.summarize', - }) - throw error - } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const { logger } = options - const model = options.model - const id = generateId('sum') - const threadId = generateId('thread') - let accumulatedContent = '' - let inputTokens = 0 - let outputTokens = 0 - - // Build the system prompt based on format - const formatInstructions = this.getFormatInstructions(options.style) - const lengthInstructions = options.maxLength - ? ` Keep the summary under ${options.maxLength} words.` - : '' - - const systemPrompt = `You are a helpful assistant that summarizes text. ${formatInstructions}${lengthInstructions}` - - logger.request(`activity=summarize provider=gemini`, { - provider: 'gemini', - model, - stream: true, - }) - - try { - yield { - type: EventType.RUN_STARTED, - runId: id, - threadId, - model, - timestamp: Date.now(), - } satisfies StreamChunk - - const result = await this.client.models.generateContentStream({ - model, - contents: [ - { - role: 'user', - parts: [ - { text: `Please summarize the following:\n\n${options.text}` }, - ], - }, - ], - config: { - systemInstruction: systemPrompt, - }, - }) - - for await (const chunk of result) { - logger.provider(`provider=gemini`, { chunk }) - // Track usage metadata - if (chunk.usageMetadata) { - inputTokens = chunk.usageMetadata.promptTokenCount ?? inputTokens - outputTokens = - chunk.usageMetadata.candidatesTokenCount ?? outputTokens - } - - if (chunk.candidates?.[0]?.content?.parts) { - for (const part of chunk.candidates[0].content.parts) { - if (part.text) { - accumulatedContent += part.text - yield { - type: EventType.TEXT_MESSAGE_CONTENT, - messageId: id, - model, - timestamp: Date.now(), - delta: part.text, - content: accumulatedContent, - } satisfies StreamChunk - } - } - } - - // Check for finish reason - const finishReason = chunk.candidates?.[0]?.finishReason - if ( - finishReason === FinishReason.STOP || - finishReason === FinishReason.MAX_TOKENS || - finishReason === FinishReason.SAFETY - ) { - yield { - type: EventType.RUN_FINISHED, - runId: id, - threadId, - model, - timestamp: Date.now(), - finishReason: - finishReason === FinishReason.STOP - ? 'stop' - : finishReason === FinishReason.MAX_TOKENS - ? 'length' - : 'content_filter', - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, - } satisfies StreamChunk - } - } - } catch (error) { - logger.errors('gemini.summarize fatal', { - error, - source: 'gemini.summarize', - }) - throw error - } - } - - private getFormatInstructions( - style?: 'paragraph' | 'bullet-points' | 'concise', - ): string { - switch (style) { - case 'bullet-points': - return 'Provide the summary as bullet points.' - case 'concise': - return 'Provide a very brief one or two sentence summary.' - case 'paragraph': - default: - return 'Provide the summary in paragraph form.' - } - } -} +export type GeminiSummarizeModel = (typeof GEMINI_MODELS)[number] /** - * Creates a Gemini summarize adapter with explicit API key and model + * Creates a Gemini summarize adapter with explicit API key and model. + * + * Note: keeps the historical (apiKey, model, config) argument order to + * avoid breaking existing callers. + * + * @example + * ```typescript + * const adapter = createGeminiSummarize('AIza...', 'gemini-2.0-flash'); + * ``` */ export function createGeminiSummarize( apiKey: string, model: TModel, config?: Omit, -): GeminiSummarizeAdapter { - return new GeminiSummarizeAdapter({ ...config, apiKey }, model) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return new ChatStreamSummarizeAdapter( + new GeminiTextAdapter({ ...config, apiKey }, model), + model, + 'gemini', + ) } /** - * Creates a Gemini summarize adapter with API key from environment and required model + * Creates a Gemini summarize adapter with API key from `GOOGLE_API_KEY` / + * `GEMINI_API_KEY` environment variables. + * + * @example + * ```typescript + * const adapter = geminiSummarize('gemini-2.0-flash'); + * await summarize({ adapter, text: 'Long article text...' }); + * ``` */ export function geminiSummarize( model: TModel, config?: Omit, -): GeminiSummarizeAdapter { - const apiKey = getGeminiApiKeyFromEnv() - return new GeminiSummarizeAdapter({ ...config, apiKey }, model) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return createGeminiSummarize(getGeminiApiKeyFromEnv(), model, config) } diff --git a/packages/typescript/ai-gemini/src/index.ts b/packages/typescript/ai-gemini/src/index.ts index 58c767d9a..a77e542ef 100644 --- a/packages/typescript/ai-gemini/src/index.ts +++ b/packages/typescript/ai-gemini/src/index.ts @@ -11,15 +11,12 @@ export { type GeminiTextProviderOptions, } from './adapters/text' -// Summarize adapter +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - GeminiSummarizeAdapter, - GeminiSummarizeModels, createGeminiSummarize, geminiSummarize, - type GeminiSummarizeAdapterOptions, + type GeminiSummarizeConfig, type GeminiSummarizeModel, - type GeminiSummarizeProviderOptions, } from './adapters/summarize' // Image adapter diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index 3da5d4c20..c412a970d 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -8,7 +8,7 @@ import { type SafetySetting, } from '@google/genai' import { GeminiTextAdapter } from '../src/adapters/text' -import { GeminiSummarizeAdapter } from '../src/adapters/summarize' +import { createGeminiSummarize } from '../src/adapters/summarize' import type { GeminiTextProviderOptions } from '../src/adapters/text' import type { Schema } from '@google/genai' @@ -53,7 +53,7 @@ vi.mock('@google/genai', async () => { const createTextAdapter = () => new GeminiTextAdapter({ apiKey: 'test-key' }, 'gemini-2.5-pro') const createSummarizeAdapter = () => - new GeminiSummarizeAdapter('test-key', 'gemini-2.0-flash') + createGeminiSummarize('test-key', 'gemini-2.0-flash') const weatherTool: Tool = { name: 'lookup_weather', @@ -755,15 +755,26 @@ describe('GeminiAdapter through AI', () => { expect(funcResponsePart.functionResponse.id).toBe('fc_001') }) - it('uses summarize function with models API', async () => { + it('routes summarize() through the gemini chat-stream path', async () => { const summaryText = 'Short and sweet.' - mocks.generateContentSpy.mockResolvedValueOnce({ - text: summaryText, - usageMetadata: { - promptTokenCount: 10, - candidatesTokenCount: 5, + const streamChunks = [ + { + candidates: [ + { + content: { parts: [{ text: summaryText }] }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 10, + candidatesTokenCount: 5, + totalTokenCount: 15, + }, }, - }) + ] + mocks.generateContentStreamSpy.mockResolvedValueOnce( + createStream(streamChunks), + ) const adapter = createSummarizeAdapter() const result = await summarize({ @@ -773,10 +784,11 @@ describe('GeminiAdapter through AI', () => { style: 'paragraph', }) - expect(mocks.generateContentSpy).toHaveBeenCalledTimes(1) - const [payload] = mocks.generateContentSpy.mock.calls[0] + expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) + const [payload] = mocks.generateContentStreamSpy.mock.calls[0] expect(payload.model).toBe('gemini-2.0-flash') - expect(payload.config.systemInstruction).toContain('summarizes text') + expect(payload.config.systemInstruction).toContain('professional summarizer') + expect(payload.config.systemInstruction).toContain('paragraph format') expect(payload.config.systemInstruction).toContain('123 tokens') expect(result.summary).toBe(summaryText) }) diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 301d5e5e7..0177d2983 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -1,18 +1,12 @@ import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' import { getGrokApiKeyFromEnv } from '../utils' import { GrokTextAdapter } from './text' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { GROK_CHAT_MODELS } from '../model-meta' import type { GrokClientConfig } from '../utils' export interface GrokSummarizeConfig extends GrokClientConfig {} -export interface GrokSummarizeProviderOptions { - /** Temperature for response generation (0-2) */ - temperature?: number - /** Maximum tokens in the response */ - maxTokens?: number -} - export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] /** @@ -27,7 +21,10 @@ export function createGrokSummarize( model: TModel, apiKey: string, config?: Omit, -): ChatStreamSummarizeAdapter { +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { return new ChatStreamSummarizeAdapter( new GrokTextAdapter({ apiKey, ...config }, model), model, @@ -47,6 +44,9 @@ export function createGrokSummarize( export function grokSummarize( model: TModel, config?: Omit, -): ChatStreamSummarizeAdapter { +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { return createGrokSummarize(model, getGrokApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index c1cbb3521..1324e0556 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -68,14 +68,14 @@ export class GrokTextAdapter< params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, ): Promise { - return this.client.chat.completions.create(params, requestOptions) + return await this.client.chat.completions.create(params, requestOptions) } protected async callChatCompletionStream( params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, ): Promise> { - return this.client.chat.completions.create(params, requestOptions) + return await this.client.chat.completions.create(params, requestOptions) } } diff --git a/packages/typescript/ai-grok/src/index.ts b/packages/typescript/ai-grok/src/index.ts index a313cf06f..142ab3346 100644 --- a/packages/typescript/ai-grok/src/index.ts +++ b/packages/typescript/ai-grok/src/index.ts @@ -16,7 +16,6 @@ export { createGrokSummarize, grokSummarize, type GrokSummarizeConfig, - type GrokSummarizeProviderOptions, type GrokSummarizeModel, } from './adapters/summarize' diff --git a/packages/typescript/ai-ollama/src/adapters/summarize.ts b/packages/typescript/ai-ollama/src/adapters/summarize.ts index b0729c662..f4315cad0 100644 --- a/packages/typescript/ai-ollama/src/adapters/summarize.ts +++ b/packages/typescript/ai-ollama/src/adapters/summarize.ts @@ -1,248 +1,56 @@ -import { EventType } from '@tanstack/ai' -import { - createOllamaClient, - estimateTokens, - generateId, - getOllamaHostFromEnv, -} from '../utils' - +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' +import { getOllamaHostFromEnv } from '../utils' +import { OllamaTextAdapter } from './text' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from '../model-meta' -import type { Ollama } from 'ollama' -import type { SummarizeAdapter } from '@tanstack/ai/adapters' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' export type OllamaSummarizeModel = | (typeof OllamaSummarizeModels)[number] | (string & {}) -/** - * Ollama-specific provider options for summarization - */ -export interface OllamaSummarizeProviderOptions { - /** Number of GPU layers to use */ - num_gpu?: number - /** Number of threads to use */ - num_thread?: number - /** Context window size */ - num_ctx?: number - /** Number of tokens to predict */ - num_predict?: number - /** Temperature for sampling */ - temperature?: number - /** Top-p sampling */ - top_p?: number - /** Top-k sampling */ - top_k?: number - /** Repeat penalty */ - repeat_penalty?: number -} - export interface OllamaSummarizeAdapterOptions { host?: string } /** - * Ollama Summarize Adapter - * A tree-shakeable summarization adapter for Ollama - */ -export class OllamaSummarizeAdapter< - TModel extends OllamaSummarizeModel, -> implements SummarizeAdapter { - readonly kind = 'summarize' as const - readonly name = 'ollama' as const - readonly model: TModel - - // Type-only property - never assigned at runtime - declare '~types': { - providerOptions: OllamaSummarizeProviderOptions - } - - private client: Ollama - constructor( - hostOrClient: string | Ollama | undefined, - model: TModel, - _options: OllamaSummarizeAdapterOptions = {}, - ) { - if (typeof hostOrClient === 'string' || hostOrClient === undefined) { - this.client = createOllamaClient({ host: hostOrClient }) - } else { - this.client = hostOrClient - } - this.model = model - } - - async summarize(options: SummarizationOptions): Promise { - const { logger } = options - const model = options.model - - logger.request(`activity=summarize provider=ollama`, { - provider: 'ollama', - model, - }) - - const prompt = this.buildSummarizationPrompt(options) - - try { - const response = await this.client.generate({ - model, - prompt, - options: { - temperature: 0.3, - num_predict: options.maxLength ?? 500, - }, - stream: false, - }) - - const promptTokens = estimateTokens(prompt) - const completionTokens = estimateTokens(response.response) - - return { - id: generateId('sum'), - model: response.model, - summary: response.response, - usage: { - promptTokens, - completionTokens, - totalTokens: promptTokens + completionTokens, - }, - } - } catch (error) { - logger.errors('ollama.summarize fatal', { - error, - source: 'ollama.summarize', - }) - throw error - } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const { logger } = options - const model = options.model - const id = generateId('sum') - const threadId = generateId('thread') - const prompt = this.buildSummarizationPrompt(options) - let accumulatedContent = '' - - logger.request(`activity=summarize provider=ollama`, { - provider: 'ollama', - model, - stream: true, - }) - - try { - yield { - type: EventType.RUN_STARTED, - runId: id, - threadId, - model, - timestamp: Date.now(), - } satisfies StreamChunk - - const stream = await this.client.generate({ - model, - prompt, - options: { - temperature: 0.3, - num_predict: options.maxLength ?? 500, - }, - stream: true, - }) - - for await (const chunk of stream) { - logger.provider(`provider=ollama`, { chunk }) - - if (chunk.response) { - accumulatedContent += chunk.response - yield { - type: EventType.TEXT_MESSAGE_CONTENT, - messageId: id, - model: chunk.model, - timestamp: Date.now(), - delta: chunk.response, - content: accumulatedContent, - } satisfies StreamChunk - } - - if (chunk.done) { - const promptTokens = estimateTokens(prompt) - const completionTokens = estimateTokens(accumulatedContent) - yield { - type: EventType.RUN_FINISHED, - runId: id, - threadId, - model: chunk.model, - timestamp: Date.now(), - finishReason: 'stop', - usage: { - promptTokens, - completionTokens, - totalTokens: promptTokens + completionTokens, - }, - } satisfies StreamChunk - } - } - } catch (error) { - logger.errors('ollama.summarize fatal', { - error, - source: 'ollama.summarize', - }) - throw error - } - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'concise': - prompt += 'Provide a very brief one or two sentence summary. ' - break - case 'paragraph': - default: - prompt += 'Provide a clear and concise summary in paragraph format. ' - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} words. ` - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on: ${options.focus.join(', ')}. ` - } - - prompt += `\n\nText to summarize:\n${options.text}\n\nSummary:` - - return prompt - } -} - -/** - * Creates an Ollama summarize adapter with explicit host and model + * Creates an Ollama summarize adapter with explicit host and model. + * + * @example + * ```typescript + * const adapter = createOllamaSummarize('mistral', 'http://localhost:11434'); + * ``` */ export function createOllamaSummarize( model: TModel, host?: string, - options?: OllamaSummarizeAdapterOptions, -): OllamaSummarizeAdapter { - return new OllamaSummarizeAdapter(host, model, options) + _options?: OllamaSummarizeAdapterOptions, +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return new ChatStreamSummarizeAdapter( + new OllamaTextAdapter(host, model), + model, + 'ollama', + ) } /** - * Creates an Ollama summarize adapter with host from environment and required model + * Creates an Ollama summarize adapter with host from `OLLAMA_HOST` env var + * (falling back to the Ollama default). + * + * @example + * ```typescript + * const adapter = ollamaSummarize('mistral'); + * await summarize({ adapter, text: 'Long article text...' }); + * ``` */ export function ollamaSummarize( model: TModel, options?: OllamaSummarizeAdapterOptions, -): OllamaSummarizeAdapter { - const host = getOllamaHostFromEnv() - return new OllamaSummarizeAdapter(host, model, options) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return createOllamaSummarize(model, getOllamaHostFromEnv(), options) } diff --git a/packages/typescript/ai-ollama/src/index.ts b/packages/typescript/ai-ollama/src/index.ts index 781aa7eab..05c8d0e81 100644 --- a/packages/typescript/ai-ollama/src/index.ts +++ b/packages/typescript/ai-ollama/src/index.ts @@ -13,14 +13,12 @@ export { } from './adapters/text' export { OLLAMA_TEXT_MODELS as OllamaTextModels } from './model-meta' -// Summarize adapter +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - OllamaSummarizeAdapter, createOllamaSummarize, ollamaSummarize, type OllamaSummarizeAdapterOptions, type OllamaSummarizeModel, - type OllamaSummarizeProviderOptions, } from './adapters/summarize' export { OLLAMA_TEXT_MODELS as OllamaSummarizeModels } from './model-meta' diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index 17b652b7c..64dd71a8c 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -1,18 +1,12 @@ import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' import { getOpenAIApiKeyFromEnv } from '../utils/client' import { OpenAITextAdapter } from './text' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { OpenAIChatModel } from '../model-meta' import type { OpenAIClientConfig } from '../utils/client' export interface OpenAISummarizeConfig extends OpenAIClientConfig {} -export interface OpenAISummarizeProviderOptions { - /** Temperature for response generation (0-2) */ - temperature?: number - /** Maximum tokens in the response */ - maxTokens?: number -} - /** * Creates an OpenAI summarize adapter with explicit API key. * @@ -25,7 +19,10 @@ export function createOpenaiSummarize( model: TModel, apiKey: string, config?: Omit, -): ChatStreamSummarizeAdapter { +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { return new ChatStreamSummarizeAdapter( new OpenAITextAdapter({ apiKey, ...config }, model), model, @@ -45,6 +42,9 @@ export function createOpenaiSummarize( export function openaiSummarize( model: TModel, config?: Omit, -): ChatStreamSummarizeAdapter { +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { return createOpenaiSummarize(model, getOpenAIApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-openai/src/index.ts b/packages/typescript/ai-openai/src/index.ts index a2ddd7221..170833aea 100644 --- a/packages/typescript/ai-openai/src/index.ts +++ b/packages/typescript/ai-openai/src/index.ts @@ -16,7 +16,6 @@ export { createOpenaiSummarize, openaiSummarize, type OpenAISummarizeConfig, - type OpenAISummarizeProviderOptions, } from './adapters/summarize' // Image adapter - for image generation diff --git a/packages/typescript/ai-openrouter/src/adapters/summarize.ts b/packages/typescript/ai-openrouter/src/adapters/summarize.ts index aa7513522..21004335d 100644 --- a/packages/typescript/ai-openrouter/src/adapters/summarize.ts +++ b/packages/typescript/ai-openrouter/src/adapters/summarize.ts @@ -1,11 +1,7 @@ -import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { ChatStreamSummarizeAdapter } from '@tanstack/ai/adapters' import { getOpenRouterApiKeyFromEnv } from '../utils' import { OpenRouterTextAdapter } from './text' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '@tanstack/ai' +import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { OpenRouterConfig } from './text' import type { OPENROUTER_CHAT_MODELS } from '../model-meta' import type { SDKOptions } from '@openrouter/sdk' @@ -22,202 +18,45 @@ export interface OpenRouterSummarizeConfig extends OpenRouterConfig { maxTokens?: number } -/** - * OpenRouter-specific provider options for summarization - */ -export interface OpenRouterSummarizeProviderOptions { - /** Temperature for response generation (0-2) */ - temperature?: number - /** Maximum tokens in the response */ - maxTokens?: number -} - -/** - * OpenRouter Summarize Adapter - * - * A thin wrapper around the text adapter that adds summarization-specific prompting. - * Delegates all API calls to the OpenRouterTextAdapter. - */ -export class OpenRouterSummarizeAdapter< - TModel extends OpenRouterTextModels, -> extends BaseSummarizeAdapter { - readonly kind = 'summarize' as const - readonly name = 'openrouter' as const - - private textAdapter: OpenRouterTextAdapter - private temperature: number - private maxTokens: number | undefined - - constructor(config: OpenRouterSummarizeConfig, model: TModel) { - super({}, model) - this.textAdapter = new OpenRouterTextAdapter(config, model) - this.temperature = config.temperature ?? 0.3 - this.maxTokens = config.maxTokens - } - - async summarize(options: SummarizationOptions): Promise { - const { logger } = options - const systemPrompt = this.buildSummarizationPrompt(options) - - logger.request(`activity=summarize provider=openrouter`, { - provider: 'openrouter', - model: options.model, - }) - - let summary = '' - const id = '' - let model = options.model - let usage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 } - - try { - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: this.maxTokens ?? options.maxLength, - temperature: this.temperature, - logger, - })) { - // AG-UI TEXT_MESSAGE_CONTENT event - if (chunk.type === 'TEXT_MESSAGE_CONTENT') { - if (chunk.content) { - summary = chunk.content - } else { - summary += chunk.delta - } - model = chunk.model || model - } - // AG-UI RUN_FINISHED event - if (chunk.type === 'RUN_FINISHED') { - if (chunk.usage) { - usage = chunk.usage - } - } - // AG-UI RUN_ERROR event - if (chunk.type === 'RUN_ERROR') { - throw new Error(`Error during summarization: ${chunk.error?.message}`) - } - } - } catch (error) { - logger.errors('openrouter.summarize fatal', { - error, - source: 'openrouter.summarize', - }) - throw error - } - - return { id, model, summary, usage } - } - - async *summarizeStream( - options: SummarizationOptions, - ): AsyncIterable { - const { logger } = options - const systemPrompt = this.buildSummarizationPrompt(options) - - logger.request(`activity=summarize provider=openrouter`, { - provider: 'openrouter', - model: options.model, - stream: true, - }) - - try { - yield* this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: this.maxTokens ?? options.maxLength, - temperature: this.temperature, - logger, - }) - } catch (error) { - logger.errors('openrouter.summarize fatal', { - error, - source: 'openrouter.summarize', - }) - throw error - } - } - - private buildSummarizationPrompt(options: SummarizationOptions): string { - let prompt = 'You are a professional summarizer. ' - - switch (options.style) { - case 'bullet-points': - prompt += 'Provide a summary in bullet point format. ' - break - case 'paragraph': - prompt += 'Provide a summary in paragraph format. ' - break - case 'concise': - prompt += 'Provide a very concise summary in 1-2 sentences. ' - break - default: - prompt += 'Provide a clear and concise summary. ' - } - - if (options.focus && options.focus.length > 0) { - prompt += `Focus on the following aspects: ${options.focus.join(', ')}. ` - } - - if (options.maxLength) { - prompt += `Keep the summary under ${options.maxLength} tokens. ` - } - - return prompt - } -} - /** * Creates an OpenRouter summarize adapter with explicit API key. - * Type resolution happens here at the call site. - * - * @param model - The model name (e.g., 'openai/gpt-4o-mini', 'anthropic/claude-3-5-sonnet') - * @param apiKey - Your OpenRouter API key - * @param config - Optional additional configuration - * @returns Configured OpenRouter summarize adapter instance with resolved types * * @example * ```typescript - * const adapter = createOpenRouterSummarize('openai/gpt-4o-mini', "sk-or-..."); + * const adapter = createOpenRouterSummarize('openai/gpt-4o-mini', 'sk-or-...'); * ``` */ export function createOpenRouterSummarize( model: TModel, apiKey: string, config?: Omit, -): OpenRouterSummarizeAdapter { - return new OpenRouterSummarizeAdapter({ apiKey, ...config }, model) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return new ChatStreamSummarizeAdapter( + new OpenRouterTextAdapter({ apiKey, ...config }, model), + model, + 'openrouter', + ) } /** - * Creates an OpenRouter summarize adapter with automatic API key detection from environment variables. - * Type resolution happens here at the call site. - * - * Looks for `OPENROUTER_API_KEY` in: - * - `process.env` (Node.js) - * - `window.env` (Browser with injected env) - * - * @param model - The model name (e.g., 'openai/gpt-4o-mini', 'anthropic/claude-3-5-sonnet') - * @param config - Optional configuration (excluding apiKey which is auto-detected) - * @returns Configured OpenRouter summarize adapter instance with resolved types - * @throws Error if OPENROUTER_API_KEY is not found in environment + * Creates an OpenRouter summarize adapter with API key from + * `OPENROUTER_API_KEY` in `process.env` (Node) or `window.env` (browser). * * @example * ```typescript - * // Automatically uses OPENROUTER_API_KEY from environment * const adapter = openRouterSummarize('openai/gpt-4o-mini'); - * - * await summarize({ - * adapter, - * text: "Long article text..." - * }); + * await summarize({ adapter, text: 'Long article text...' }); * ``` */ export function openRouterSummarize( model: TModel, config?: Omit, -): OpenRouterSummarizeAdapter { - const apiKey = getOpenRouterApiKeyFromEnv() - return createOpenRouterSummarize(model, apiKey, config) +): ChatStreamSummarizeAdapter< + TModel, + InferTextProviderOptions> +> { + return createOpenRouterSummarize(model, getOpenRouterApiKeyFromEnv(), config) } diff --git a/packages/typescript/ai-openrouter/src/index.ts b/packages/typescript/ai-openrouter/src/index.ts index 0ff7e1432..e883b4323 100644 --- a/packages/typescript/ai-openrouter/src/index.ts +++ b/packages/typescript/ai-openrouter/src/index.ts @@ -20,13 +20,12 @@ export { type OpenRouterResponsesTextProviderOptions, } from './adapters/responses-text' -// Summarize adapter - for text summarization +// Summarize - thin factory functions over @tanstack/ai's ChatStreamSummarizeAdapter export { - OpenRouterSummarizeAdapter, createOpenRouterSummarize, openRouterSummarize, type OpenRouterSummarizeConfig, - type OpenRouterSummarizeProviderOptions, + type OpenRouterTextModels as OpenRouterSummarizeModel, } from './adapters/summarize' // Image adapter - for image generation diff --git a/packages/typescript/ai/src/activities/index.ts b/packages/typescript/ai/src/activities/index.ts index aaf28459b..69d06be22 100644 --- a/packages/typescript/ai/src/activities/index.ts +++ b/packages/typescript/ai/src/activities/index.ts @@ -63,6 +63,7 @@ export { export { ChatStreamSummarizeAdapter, type ChatStreamCapable, + type InferTextProviderOptions, } from './summarize/chat-stream-summarize' // =========================== diff --git a/packages/typescript/ai/src/activities/summarize/adapter.ts b/packages/typescript/ai/src/activities/summarize/adapter.ts index 0c9beed91..2f7cc34f6 100644 --- a/packages/typescript/ai/src/activities/summarize/adapter.ts +++ b/packages/typescript/ai/src/activities/summarize/adapter.ts @@ -46,7 +46,9 @@ export interface SummarizeAdapter< /** * Summarize the given text */ - summarize: (options: SummarizationOptions) => Promise + summarize: ( + options: SummarizationOptions, + ) => Promise /** * Stream summarization of the given text. @@ -54,7 +56,7 @@ export interface SummarizeAdapter< * non-streaming summarize and yield the result as a single chunk. */ summarizeStream?: ( - options: SummarizationOptions, + options: SummarizationOptions, ) => AsyncIterable } @@ -91,7 +93,7 @@ export abstract class BaseSummarizeAdapter< } abstract summarize( - options: SummarizationOptions, + options: SummarizationOptions, ): Promise /** @@ -99,7 +101,9 @@ export abstract class BaseSummarizeAdapter< * Override this method in concrete implementations to enable streaming. * If not overridden, the activity layer will fall back to non-streaming. */ - summarizeStream?(options: SummarizationOptions): AsyncIterable + summarizeStream?( + options: SummarizationOptions, + ): AsyncIterable protected generateId(): string { return `${this.name}-${Date.now()}-${Math.random().toString(36).substring(7)}` diff --git a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts index c58510774..534851824 100644 --- a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts +++ b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts @@ -11,13 +11,30 @@ import type { * Minimal contract for a text adapter that supports `chatStream`. Lets * `ChatStreamSummarizeAdapter` work with any text adapter without coupling * to a specific implementation. + * + * The provider-options shape is intentionally `any` here — the wrapper only + * forwards `modelOptions` straight through, so a text adapter with a richer + * per-model options type (e.g. `ResolveProviderOptions`) is still + * acceptable. Summarize-level type safety is enforced via + * `SummarizationOptions` on the wrapper itself. */ -export interface ChatStreamCapable { - chatStream: ( - options: TextOptions, - ) => AsyncIterable +export interface ChatStreamCapable { + chatStream: (options: TextOptions) => AsyncIterable } +/** + * Extract the per-model `modelOptions` type a text adapter accepts. Used by + * provider summarize factories so their `modelOptions` IntelliSense matches + * what the underlying text adapter actually understands. + */ +export type InferTextProviderOptions = TAdapter extends { + '~types': { providerOptions: infer P } +} + ? P extends object + ? P + : object + : object + /** * Summarize adapter that wraps any `ChatStreamCapable` text adapter and * prompts it for summarization. Not tied to any wire format. @@ -28,10 +45,10 @@ export class ChatStreamSummarizeAdapter< > extends BaseSummarizeAdapter { readonly name: string - private textAdapter: ChatStreamCapable + private textAdapter: ChatStreamCapable constructor( - textAdapter: ChatStreamCapable, + textAdapter: ChatStreamCapable, model: TModel, name: string = 'chat-stream-summarize', ) { @@ -40,7 +57,9 @@ export class ChatStreamSummarizeAdapter< this.textAdapter = textAdapter } - async summarize(options: SummarizationOptions): Promise { + async summarize( + options: SummarizationOptions, + ): Promise { const systemPrompt = this.buildSummarizationPrompt(options) let summary = '' @@ -54,14 +73,9 @@ export class ChatStreamSummarizeAdapter< ) try { - for await (const chunk of this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - logger: options.logger, - } satisfies TextOptions)) { + for await (const chunk of this.textAdapter.chatStream( + this.buildTextOptions(options, systemPrompt), + )) { if (chunk.type === 'TEXT_MESSAGE_CONTENT') { if (chunk.content) { summary = chunk.content @@ -110,7 +124,7 @@ export class ChatStreamSummarizeAdapter< } async *summarizeStream( - options: SummarizationOptions, + options: SummarizationOptions, ): AsyncIterable { const systemPrompt = this.buildSummarizationPrompt(options) @@ -120,14 +134,9 @@ export class ChatStreamSummarizeAdapter< ) try { - yield* this.textAdapter.chatStream({ - model: options.model, - messages: [{ role: 'user', content: options.text }], - systemPrompts: [systemPrompt], - maxTokens: options.maxLength, - temperature: 0.3, - logger: options.logger, - } satisfies TextOptions) + yield* this.textAdapter.chatStream( + this.buildTextOptions(options, systemPrompt), + ) } catch (error: unknown) { options.logger.errors(`${this.name}.summarizeStream fatal`, { error: toRunErrorPayload(error, `${this.name}.summarizeStream failed`), @@ -137,7 +146,30 @@ export class ChatStreamSummarizeAdapter< } } - protected buildSummarizationPrompt(options: SummarizationOptions): string { + /** + * Build the TextOptions passed to the underlying chatStream. Provider + * `modelOptions` from the summarize call are forwarded as-is so knobs like + * Anthropic cache headers, Gemini safety settings, or Ollama tuning params + * still reach the wire layer. + */ + protected buildTextOptions( + options: SummarizationOptions, + systemPrompt: string, + ): TextOptions { + return { + model: options.model, + messages: [{ role: 'user', content: options.text }], + systemPrompts: [systemPrompt], + maxTokens: options.maxLength, + temperature: 0.3, + modelOptions: options.modelOptions, + logger: options.logger, + } + } + + protected buildSummarizationPrompt( + options: SummarizationOptions, + ): string { let prompt = 'You are a professional summarizer. ' switch (options.style) { diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index b696735bf..f454ab6af 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -184,7 +184,7 @@ export function summarize< async function runSummarize( options: SummarizeActivityOptions, false>, ): Promise { - const { adapter, text, maxLength, style, focus } = options + const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model const requestId = createId('summarize') const inputLength = text.length @@ -211,6 +211,7 @@ async function runSummarize( maxLength, style, focus, + modelOptions, logger, } @@ -253,7 +254,7 @@ async function runSummarize( async function* runStreamingSummarize( options: SummarizeActivityOptions, true>, ): AsyncIterable { - const { adapter, text, maxLength, style, focus } = options + const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model const logger: InternalLogger = resolveDebugOption(options.debug) @@ -269,6 +270,7 @@ async function* runStreamingSummarize( maxLength, style, focus, + modelOptions, logger, } @@ -316,4 +318,5 @@ export { BaseSummarizeAdapter } from './adapter' export { ChatStreamSummarizeAdapter, type ChatStreamCapable, + type InferTextProviderOptions, } from './chat-stream-summarize' diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index 7c34446b4..f66522b3a 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -1185,12 +1185,16 @@ export interface TextCompletionChunk { } } -export interface SummarizationOptions { +export interface SummarizationOptions< + TProviderOptions extends object = Record, +> { model: string text: string maxLength?: number style?: 'bullet-points' | 'paragraph' | 'concise' focus?: Array + /** Provider-specific options forwarded by the summarize() activity. */ + modelOptions?: TProviderOptions /** * Internal logger threaded from the summarize() entry point. Adapters must * call logger.request() before the SDK call and logger.errors() in catch blocks. diff --git a/testing/e2e/src/routes/api.summarize.ts b/testing/e2e/src/routes/api.summarize.ts index 131aedac6..5f65e9884 100644 --- a/testing/e2e/src/routes/api.summarize.ts +++ b/testing/e2e/src/routes/api.summarize.ts @@ -28,7 +28,7 @@ function createSummarizeAdapter(provider: Provider) { grok: () => createGrokSummarize('grok-3', DUMMY_KEY, { baseURL: LLMOCK_OPENAI }), // Both OpenRouter provider rows use the OpenRouter summarize adapter: - // `OpenRouterSummarizeAdapter` wraps the OpenRouter chat-completions + // `createOpenRouterSummarize` wraps the OpenRouter chat-completions // text adapter regardless of whether the caller selected the Chat // Completions or Responses surface, so a single factory backs both // matrix entries. From 0db4c120b06e5b64cbbc4a1153afd7348c030f61 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 04:25:42 +0000 Subject: [PATCH 33/49] ci: apply automated fixes --- packages/typescript/ai-gemini/tests/gemini-adapter.test.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index c412a970d..b894da9f3 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -787,7 +787,9 @@ describe('GeminiAdapter through AI', () => { expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) const [payload] = mocks.generateContentStreamSpy.mock.calls[0] expect(payload.model).toBe('gemini-2.0-flash') - expect(payload.config.systemInstruction).toContain('professional summarizer') + expect(payload.config.systemInstruction).toContain( + 'professional summarizer', + ) expect(payload.config.systemInstruction).toContain('paragraph format') expect(payload.config.systemInstruction).toContain('123 tokens') expect(result.summary).toBe(summaryText) From a39e2bc15ed59f05734d4d45eb5e8de0f9dca799 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 17:10:06 +1000 Subject: [PATCH 34/49] refactor(ai-openai-compatible): vendor wire types; drop openai dep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the OpenAI wire-format types from `import type ... from 'openai'` to local `src/types/{chat-completions,responses,tools}.ts` — hand-written minimal interfaces covering only the fields the base reads/writes. - Drop `openai` from `@tanstack/ai-openai-compatible`'s peerDependencies, peerDependenciesMeta, and devDependencies. - Drop `openai` from `@tanstack/ai-openrouter`'s devDependencies (it was only there to satisfy type leakage from `@tanstack/ai-openai-compatible`). - Update `@tanstack/ai-openai`'s text adapter overrides to use the local protocol types and cast at the SDK boundary, keeping variance compat with the now-local base. Net effect: `@tanstack/ai-openai-compatible`'s emitted .d.ts contains zero `from 'openai'` references. End-users installing `@tanstack/ai-openrouter` (or any future protocol-compatible adapter that doesn't itself use the openai SDK at runtime) no longer pull `openai` into their dependency tree. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../ai-openai-compatible/package.json | 9 +- .../src/adapters/chat-completions-text.ts | 34 ++- .../chat-completions-tool-converter.ts | 5 +- .../src/adapters/responses-text.ts | 37 ++- .../ai-openai-compatible/src/index.ts | 16 +- .../src/tools/apply-patch-tool.ts | 4 +- .../src/tools/code-interpreter-tool.ts | 4 +- .../src/tools/computer-use-tool.ts | 4 +- .../src/tools/custom-tool.ts | 4 +- .../src/tools/file-search-tool.ts | 6 +- .../src/tools/function-tool.ts | 4 +- .../src/tools/image-generation-tool.ts | 4 +- .../src/tools/local-shell-tool.ts | 4 +- .../src/tools/mcp-tool.ts | 4 +- .../src/tools/shell-tool.ts | 4 +- .../src/tools/web-search-preview-tool.ts | 4 +- .../src/tools/web-search-tool.ts | 4 +- .../src/types/chat-completions.ts | 239 +++++++++++++++ .../src/types/responses.ts | 282 ++++++++++++++++++ .../ai-openai-compatible/src/types/tools.ts | 166 +++++++++++ .../typescript/ai-openai/src/adapters/text.ts | 40 ++- .../src/text/text-provider-options.ts | 3 +- .../typescript/ai-openrouter/package.json | 1 - pnpm-lock.yaml | 6 - 24 files changed, 798 insertions(+), 90 deletions(-) create mode 100644 packages/typescript/ai-openai-compatible/src/types/chat-completions.ts create mode 100644 packages/typescript/ai-openai-compatible/src/types/responses.ts create mode 100644 packages/typescript/ai-openai-compatible/src/types/tools.ts diff --git a/packages/typescript/ai-openai-compatible/package.json b/packages/typescript/ai-openai-compatible/package.json index e66ee09eb..7a8dd8520 100644 --- a/packages/typescript/ai-openai-compatible/package.json +++ b/packages/typescript/ai-openai-compatible/package.json @@ -44,18 +44,11 @@ "@tanstack/ai-utils": "workspace:*" }, "peerDependencies": { - "@tanstack/ai": "workspace:^", - "openai": "^6.9.1" - }, - "peerDependenciesMeta": { - "openai": { - "optional": true - } + "@tanstack/ai": "workspace:^" }, "devDependencies": { "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", - "openai": "^6.9.1", "vite": "^7.2.7" } } diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts index ee3a39c69..ef5142a0a 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts @@ -9,7 +9,15 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' -import type OpenAI_SDK from 'openai' +import type { + ChatCompletion, + ChatCompletionChunk, + ChatCompletionChunkChoice, + ChatCompletionContentPart, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionMessageParam, +} from '../types/chat-completions' import type { ContentPart, DefaultMessageMetadataByModality, @@ -228,9 +236,9 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< * reads documented fields like `response.choices[0].message.content`. */ protected abstract callChatCompletion( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + params: ChatCompletionCreateParamsNonStreaming, requestOptions: ReturnType, - ): Promise + ): Promise /** * Performs the streaming Chat Completions network call. Returns an @@ -238,9 +246,9 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< * only needs structural iteration over chunks. */ protected abstract callChatCompletionStream( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, + params: ChatCompletionCreateParamsStreaming, requestOptions: ReturnType, - ): Promise> + ): Promise> /** * Extract reasoning content from a stream chunk. Default returns @@ -269,7 +277,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< * Override this in subclasses to handle provider-specific stream behavior. */ protected async *processStreamChunks( - stream: AsyncIterable, + stream: AsyncIterable, options: TextOptions, aguiState: { runId: string @@ -288,10 +296,10 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< // therefore defer RUN_FINISHED until the iterator is exhausted so we can // pick up usage from the trailing chunk regardless of arrival order. let lastUsage: - | OpenAI_SDK.Chat.Completions.ChatCompletionChunk['usage'] + | ChatCompletionChunk['usage'] | undefined let pendingFinishReason: - | OpenAI_SDK.Chat.Completions.ChatCompletionChunk.Choice['finish_reason'] + | ChatCompletionChunkChoice['finish_reason'] | undefined // Track tool calls being streamed (arguments come in chunks) @@ -753,7 +761,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< */ protected mapOptionsToRequest( options: TextOptions, - ): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming { + ): ChatCompletionCreateParamsStreaming { const tools = options.tools ? convertToolsToChatCompletionsFormat( options.tools, @@ -762,7 +770,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< : undefined // Build messages array with system prompts - const messages: Array = + const messages: Array = [] // Add system prompts first @@ -812,7 +820,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< */ protected convertMessage( message: ModelMessage, - ): OpenAI_SDK.Chat.Completions.ChatCompletionMessageParam { + ): ChatCompletionMessageParam { // Handle tool messages if (message.role === 'tool') { return { @@ -878,7 +886,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< // content parts rather than silently dropping them — a message of all // unsupported parts would otherwise turn into an empty user prompt and // mask a real capability mismatch. - const parts: Array = + const parts: Array = [] for (const part of contentParts) { const converted = this.convertContentPart(part) @@ -915,7 +923,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< */ protected convertContentPart( part: ContentPart, - ): OpenAI_SDK.Chat.Completions.ChatCompletionContentPart | null { + ): ChatCompletionContentPart | null { if (part.type === 'text') { return { type: 'text', text: part.content } } diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts index 2a83eaae3..03b910d0b 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts @@ -1,13 +1,12 @@ import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import type { ChatCompletionTool } from '../types/chat-completions' import type { JSONSchema, Tool } from '@tanstack/ai' -import type OpenAI from 'openai' /** * Chat Completions API tool format. * This is distinct from the Responses API tool format. */ -export type ChatCompletionFunctionTool = - OpenAI.Chat.Completions.ChatCompletionTool +export type ChatCompletionFunctionTool = ChatCompletionTool /** * Converts a standard Tool to OpenAI Chat Completions ChatCompletionTool format. diff --git a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts index 61a48bf2b..f1fc41213 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts @@ -9,8 +9,15 @@ import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' -import type OpenAI_SDK from 'openai' -import type { Responses } from 'openai/resources' +import type { + Response, + ResponseCreateParams, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseInput, + ResponseInputContent, + ResponseStreamEvent, +} from '../types/responses' import type { ContentPart, DefaultMessageMetadataByModality, @@ -182,7 +189,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< const response = await this.callResponse( { ...(cleanParams as Omit< - OpenAI_SDK.Responses.ResponseCreateParams, + ResponseCreateParams, 'stream' >), stream: false, @@ -204,7 +211,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< // that contract local rather than relying on inference through the // overloaded `client.responses.create` signature. const rawText = this.extractTextFromResponse( - response satisfies OpenAI_SDK.Responses.Response, + response satisfies Response, ) // Fail loud on empty content rather than letting it cascade into a @@ -281,9 +288,9 @@ export abstract class OpenAICompatibleResponsesTextAdapter< * documented fields like `response.output[...]`. */ protected abstract callResponse( - params: OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, + params: ResponseCreateParamsNonStreaming, requestOptions: ReturnType, - ): Promise + ): Promise /** * Performs the streaming Responses API network call. Returns an @@ -291,16 +298,16 @@ export abstract class OpenAICompatibleResponsesTextAdapter< * only needs structural iteration over events. */ protected abstract callResponseStream( - params: OpenAI_SDK.Responses.ResponseCreateParamsStreaming, + params: ResponseCreateParamsStreaming, requestOptions: ReturnType, - ): Promise> + ): Promise> /** * Extract text content from a non-streaming Responses API response. * Override this in subclasses for provider-specific response shapes. */ protected extractTextFromResponse( - response: OpenAI_SDK.Responses.Response, + response: Response, ): string { let textContent = '' let refusal: string | undefined @@ -370,7 +377,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< * - error */ protected async *processStreamChunks( - stream: AsyncIterable, + stream: AsyncIterable, toolCallMetadata: Map< string, { @@ -1204,7 +1211,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< */ protected mapOptionsToRequest( options: TextOptions, - ): Omit { + ): Omit { const input = this.convertMessagesToInput(options.messages) const tools = options.tools @@ -1257,8 +1264,8 @@ export abstract class OpenAICompatibleResponsesTextAdapter< */ protected convertMessagesToInput( messages: Array, - ): Responses.ResponseInput { - const result: Responses.ResponseInput = [] + ): ResponseInput { + const result: ResponseInput = [] for (const message of messages) { // Handle tool messages - convert to FunctionToolCallOutput @@ -1312,7 +1319,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< // Handle user messages (default case) — support multimodal content const contentParts = this.normalizeContent(message.content) - const inputContent: Array = [] + const inputContent: Array = [] for (const part of contentParts) { inputContent.push(this.convertContentPartToInput(part)) @@ -1347,7 +1354,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< */ protected convertContentPartToInput( part: ContentPart, - ): Responses.ResponseInputContent { + ): ResponseInputContent { switch (part.type) { case 'text': return { diff --git a/packages/typescript/ai-openai-compatible/src/index.ts b/packages/typescript/ai-openai-compatible/src/index.ts index 371bbc0b7..98aa8065b 100644 --- a/packages/typescript/ai-openai-compatible/src/index.ts +++ b/packages/typescript/ai-openai-compatible/src/index.ts @@ -1,31 +1,33 @@ export { makeStructuredOutputCompatible } from './utils/schema-converter' export * from './tools/index' export { OpenAICompatibleChatCompletionsTextAdapter } from './adapters/chat-completions-text' -// Re-export the OpenAI SDK types subclasses need when implementing the -// `callChatCompletion*` hooks. Type-only — `openai` is an optional peer in -// this package, so consumers that use these types must declare `openai` -// in their own deps (or devDeps if they only need types). +// Wire-format types for subclasses implementing the `callChatCompletion*` +// hooks. Locally defined (see ./types/chat-completions) so this package's +// emitted .d.ts has zero `from 'openai'` references — downstream adapters +// (e.g. ai-openrouter) can implement the contract without installing the +// openai SDK. export type { ChatCompletion, ChatCompletionChunk, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, -} from 'openai/resources/chat/completions' +} from './types/chat-completions' export { convertFunctionToolToChatCompletionsFormat, convertToolsToChatCompletionsFormat, type ChatCompletionFunctionTool, } from './adapters/chat-completions-tool-converter' export { OpenAICompatibleResponsesTextAdapter } from './adapters/responses-text' -// Type-only re-exports for subclasses implementing the `callResponse*` hooks. +// Wire-format types for subclasses implementing the `callResponse*` hooks. export type { Response as ResponsesResponse, ResponseCreateParams, ResponseCreateParamsNonStreaming, ResponseCreateParamsStreaming, + ResponseInput, ResponseInputContent, ResponseStreamEvent, -} from 'openai/resources/responses/responses' +} from './types/responses' export { convertFunctionToolToResponsesFormat, convertToolsToResponsesFormat, diff --git a/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts index 6bc157aa4..e26bec30c 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { ApplyPatchToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type ApplyPatchToolConfig = OpenAI.Responses.ApplyPatchTool +export type { ApplyPatchToolConfig } /** @deprecated Renamed to `ApplyPatchToolConfig`. Will be removed in a future release. */ export type ApplyPatchTool = ApplyPatchToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts index 53f130588..7aa265f87 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts @@ -1,7 +1,7 @@ +import type { CodeInterpreterToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -import type OpenAI from 'openai' -export type CodeInterpreterToolConfig = OpenAI.Responses.Tool.CodeInterpreter +export type { CodeInterpreterToolConfig } /** @deprecated Renamed to `CodeInterpreterToolConfig`. Will be removed in a future release. */ export type CodeInterpreterTool = CodeInterpreterToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts index 487e6486c..d839f3756 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { ComputerUseToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type ComputerUseToolConfig = OpenAI.Responses.ComputerTool +export type { ComputerUseToolConfig } /** @deprecated Renamed to `ComputerUseToolConfig`. Will be removed in a future release. */ export type ComputerUseTool = ComputerUseToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts index 6e0cb8e5f..818eed4fa 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { CustomToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type CustomToolConfig = OpenAI.Responses.CustomTool +export type { CustomToolConfig } /** @deprecated Renamed to `CustomToolConfig`. Will be removed in a future release. */ export type CustomTool = CustomToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts index 82eb472aa..5749a8329 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts @@ -1,6 +1,8 @@ -import type OpenAI from 'openai' +import type { FileSearchToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' +export type { FileSearchToolConfig } + const validateMaxNumResults = (maxNumResults: number | undefined) => { if ( maxNumResults !== undefined && @@ -10,8 +12,6 @@ const validateMaxNumResults = (maxNumResults: number | undefined) => { } } -export type FileSearchToolConfig = OpenAI.Responses.FileSearchTool - /** @deprecated Renamed to `FileSearchToolConfig`. Will be removed in a future release. */ export type FileSearchTool = FileSearchToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts index bf06804c6..28a9e7e88 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts @@ -1,8 +1,8 @@ import { makeStructuredOutputCompatible } from '../utils/schema-converter' +import type { FunctionToolConfig } from '../types/tools' import type { JSONSchema, Tool } from '@tanstack/ai' -import type OpenAI from 'openai' -export type FunctionToolConfig = OpenAI.Responses.FunctionTool +export type { FunctionToolConfig } /** @deprecated Renamed to `FunctionToolConfig`. Will be removed in a future release. */ export type FunctionTool = FunctionToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts index f81fee40f..bb372fa10 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { ImageGenerationToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type ImageGenerationToolConfig = OpenAI.Responses.Tool.ImageGeneration +export type { ImageGenerationToolConfig } /** @deprecated Renamed to `ImageGenerationToolConfig`. Will be removed in a future release. */ export type ImageGenerationTool = ImageGenerationToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts index dc15f46c5..4c1aa3f57 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { LocalShellToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type LocalShellToolConfig = OpenAI.Responses.Tool.LocalShell +export type { LocalShellToolConfig } /** @deprecated Renamed to `LocalShellToolConfig`. Will be removed in a future release. */ export type LocalShellTool = LocalShellToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts index 6693a466b..5d1be45e1 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { MCPToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type MCPToolConfig = OpenAI.Responses.Tool.Mcp +export type { MCPToolConfig } /** @deprecated Renamed to `MCPToolConfig`. Will be removed in a future release. */ export type MCPTool = MCPToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts index 4912a33c6..dfa0d8143 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { ShellToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type ShellToolConfig = OpenAI.Responses.FunctionShellTool +export type { ShellToolConfig } /** @deprecated Renamed to `ShellToolConfig`. Will be removed in a future release. */ export type ShellTool = ShellToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts index 0f020fde4..6310afeb8 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { WebSearchPreviewToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type WebSearchPreviewToolConfig = OpenAI.Responses.WebSearchPreviewTool +export type { WebSearchPreviewToolConfig } /** @deprecated Renamed to `WebSearchPreviewToolConfig`. Will be removed in a future release. */ export type WebSearchPreviewTool = WebSearchPreviewToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts b/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts index ac5bfdfc9..43bfa8a6b 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts @@ -1,7 +1,7 @@ -import type OpenAI from 'openai' +import type { WebSearchToolConfig } from '../types/tools' import type { Tool } from '@tanstack/ai' -export type WebSearchToolConfig = OpenAI.Responses.WebSearchTool +export type { WebSearchToolConfig } /** @deprecated Renamed to `WebSearchToolConfig`. Will be removed in a future release. */ export type WebSearchTool = WebSearchToolConfig diff --git a/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts b/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts new file mode 100644 index 000000000..1354c1840 --- /dev/null +++ b/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts @@ -0,0 +1,239 @@ +/** + * Local wire-format types for the OpenAI Chat Completions API. + * + * These are not a full clone of `openai`'s types — only the fields the base + * adapter reads/writes and the public abstract-method surface subclasses + * must satisfy. Structural compatibility means subclasses can still hand the + * openai SDK's own `ChatCompletion` / `ChatCompletionChunk` objects to the + * base without conversion: the SDK shapes are supersets of ours. + * + * Open-ended index signatures (`[key: string]: unknown`) on request and + * message shapes let subclasses spread their own SDK params / message shapes + * through `modelOptions` without TS complaining about unknown fields. + */ + +export interface ChatCompletionTool { + type: 'function' + function: { + name: string + description?: string + parameters?: Record + strict?: boolean | null + } +} + +export interface ChatCompletionContentPartText { + type: 'text' + text: string +} + +export interface ChatCompletionContentPartImage { + type: 'image_url' + image_url: { + url: string + detail?: 'auto' | 'low' | 'high' + } +} + +export interface ChatCompletionContentPartInputAudio { + type: 'input_audio' + input_audio: { + data: string + format: 'wav' | 'mp3' + } +} + +export interface ChatCompletionContentPartFile { + type: 'file' + file: { + file_data?: string + file_id?: string + filename?: string + } +} + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPartFile + +export interface ChatCompletionSystemMessageParam { + role: 'system' + content: string | Array + name?: string +} + +export interface ChatCompletionUserMessageParam { + role: 'user' + content: string | Array + name?: string +} + +export interface ChatCompletionMessageFunctionToolCall { + id: string + type: 'function' + function: { + name: string + arguments: string + } +} + +export interface ChatCompletionMessageCustomToolCall { + id: string + type: 'custom' + custom: { + name: string + input: string + } +} + +export type ChatCompletionMessageToolCall = + | ChatCompletionMessageFunctionToolCall + | ChatCompletionMessageCustomToolCall + +export interface ChatCompletionAssistantMessageParam { + role: 'assistant' + content?: string | Array | null + name?: string + tool_calls?: Array + refusal?: string | null +} + +export interface ChatCompletionToolMessageParam { + role: 'tool' + content: string | Array + tool_call_id: string +} + +export interface ChatCompletionDeveloperMessageParam { + role: 'developer' + content: string | Array + name?: string +} + +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + | ChatCompletionDeveloperMessageParam + +export type ChatCompletionToolChoiceOption = + | 'none' + | 'auto' + | 'required' + | { + type: 'function' + function: { name: string } + } + +export interface ChatCompletionStreamOptions { + include_usage?: boolean +} + +export interface ChatCompletionCreateParamsBase { + model: string + messages: Array + temperature?: number | null + top_p?: number | null + max_tokens?: number | null + max_completion_tokens?: number | null + n?: number | null + tools?: Array + tool_choice?: ChatCompletionToolChoiceOption + response_format?: + | { type: 'text' } + | { type: 'json_object' } + | { + type: 'json_schema' + json_schema: { + name: string + description?: string + schema?: Record + strict?: boolean | null + } + } + stream_options?: ChatCompletionStreamOptions | null + user?: string + metadata?: Record | null +} + +export interface ChatCompletionCreateParamsNonStreaming + extends ChatCompletionCreateParamsBase { + stream?: false | null +} + +export interface ChatCompletionCreateParamsStreaming + extends ChatCompletionCreateParamsBase { + stream: true +} + +export interface CompletionUsage { + prompt_tokens: number + completion_tokens: number + total_tokens: number +} + +export interface ChatCompletionMessage { + role: 'assistant' + content: string | null + refusal?: string | null + tool_calls?: Array +} + +export interface ChatCompletion { + id: string + object: 'chat.completion' + created: number + model: string + choices: Array<{ + index: number + message: ChatCompletionMessage + finish_reason: ChatCompletionFinishReason | null + logprobs?: unknown + }> + usage?: CompletionUsage | null + system_fingerprint?: string +} + +export type ChatCompletionFinishReason = + | 'stop' + | 'length' + | 'tool_calls' + | 'content_filter' + | 'function_call' + +export interface ChatCompletionChunkChoiceDeltaToolCall { + index: number + id?: string + type?: 'function' + function?: { + name?: string + arguments?: string + } +} + +export interface ChatCompletionChunkChoiceDelta { + role?: 'system' | 'user' | 'assistant' | 'tool' | 'developer' + content?: string | null + tool_calls?: Array + refusal?: string | null +} + +export interface ChatCompletionChunkChoice { + index: number + delta: ChatCompletionChunkChoiceDelta + finish_reason: ChatCompletionFinishReason | null + logprobs?: unknown +} + +export interface ChatCompletionChunk { + id: string + object: 'chat.completion.chunk' + created: number + model: string + choices: Array + usage?: CompletionUsage | null + system_fingerprint?: string +} diff --git a/packages/typescript/ai-openai-compatible/src/types/responses.ts b/packages/typescript/ai-openai-compatible/src/types/responses.ts new file mode 100644 index 000000000..4eefc581e --- /dev/null +++ b/packages/typescript/ai-openai-compatible/src/types/responses.ts @@ -0,0 +1,282 @@ +/** + * Local wire-format types for the OpenAI Responses API. + * + * Same philosophy as `chat-completions.ts`: model only the fields the base + * adapter reads/writes plus the abstract surface subclasses must satisfy. + * Structural compatibility means subclasses can still hand the openai SDK's + * `ResponseStreamEvent` discriminated union into the base — each openai + * variant is a subtype of one of ours, or falls through into the catch-all + * `{ type: string }` arm we never narrow on. + */ + +import type { Tool } from '@tanstack/ai' + +export interface ResponseInputText { + type: 'input_text' + text: string +} + +export interface ResponseInputImage { + type: 'input_image' + image_url?: string + file_id?: string + detail?: 'auto' | 'low' | 'high' +} + +export interface ResponseInputFile { + type: 'input_file' + file_id?: string + file_url?: string + file_data?: string + filename?: string +} + +export type ResponseInputContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + +export interface ResponseInputMessage { + type?: 'message' + role: 'user' | 'assistant' | 'system' | 'developer' + content: string | Array +} + +export interface ResponseFunctionToolCallInput { + type: 'function_call' + call_id: string + name: string + arguments: string +} + +export interface ResponseFunctionCallOutput { + type: 'function_call_output' + call_id: string + output: string +} + +export type ResponseInputItem = + | ResponseInputMessage + | ResponseFunctionToolCallInput + | ResponseFunctionCallOutput + +export type ResponseInput = Array + +export interface ResponseOutputText { + type: 'output_text' + text: string + annotations?: Array +} + +export interface ResponseOutputRefusal { + type: 'refusal' + refusal: string +} + +export interface ResponseOutputReasoningText { + type: 'reasoning_text' + text: string +} + +export type ResponseOutputContent = + | ResponseOutputText + | ResponseOutputRefusal + | ResponseOutputReasoningText + +/** + * A single item in `Response.output`. The Responses API returns ~12 variants + * (message, function_call, reasoning, file_search_call, web_search_call, + * computer_call, image_generation_call, code_interpreter_call, mcp_call, + * local_shell_call, etc.); we model it as a loose interface because the + * base reads heterogeneous optional fields (`id`, `name`, `arguments`, + * `content`) across multiple variants without discriminated narrowing. + * + * Subclasses passing the openai SDK's richer discriminated union satisfy + * this shape structurally. + */ +export interface ResponseOutputItem { + type: string + id?: string + name?: string + arguments?: string + call_id?: string + status?: string + role?: 'assistant' + // The base only iterates `content` after narrowing on `type === 'message'`, + // where the wire format guarantees it. Modelling it as required keeps the + // narrowed access free of `if (content)` guards in the base; non-message + // items just won't carry it at runtime — the base doesn't reach for it. + content: Array +} + +export interface ResponseUsage { + input_tokens: number + output_tokens: number + total_tokens: number +} + +export interface Response { + id: string + object: 'response' + created_at: number + model: string + status?: string + output: Array + usage?: ResponseUsage + error: { message: string; code?: string } | null + incomplete_details: { reason: string } | null +} + +export interface ResponseCreateParamsBase { + model: string + input: string | ResponseInput + instructions?: string | null + temperature?: number | null + top_p?: number | null + max_output_tokens?: number | null + metadata?: Record | null + tools?: Array + tool_choice?: unknown + /** Response-format config (json_schema lives here on the Responses API, + * unlike `response_format` on Chat Completions). */ + text?: { + format?: + | { type: 'text' } + | { type: 'json_object' } + | { + type: 'json_schema' + name: string + description?: string + schema?: Record + strict?: boolean | null + } + } +} + +export interface ResponseCreateParamsNonStreaming + extends ResponseCreateParamsBase { + stream?: false | null +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + stream: true +} + +export type ResponseCreateParams = + | ResponseCreateParamsNonStreaming + | ResponseCreateParamsStreaming + +/** + * Streamed events from the Responses API. Modelled as a discriminated union + * over the `type` literal: variants the base narrows on declare every field + * it requires for that branch, so accessing `chunk.response.model` after a + * `chunk.type === 'response.failed'` check typechecks without a guard. + * + * The trailing `{ type: string }` arm is the catch-all for event types the + * base never tests — openai's SDK union has many we ignore (web_search_call.*, + * file_search_call.*, mcp_call.*, etc.). Subclasses can still pass openai's + * full union here; each openai variant is structurally assignable to one of + * ours or the catch-all. + */ +export type ResponseStreamEvent = + | { + type: 'response.created' + response: Response + sequence_number?: number + } + | { + type: 'response.in_progress' + response: Response + sequence_number?: number + } + | { + type: 'response.failed' + response: Response + sequence_number?: number + } + | { + type: 'response.incomplete' + response: Response + sequence_number?: number + } + | { + type: 'response.completed' + response: Response + sequence_number?: number + } + | { + type: 'response.output_text.delta' + delta: string | Array + item_id: string + output_index: number + content_index: number + sequence_number?: number + } + | { + type: 'response.reasoning_text.delta' + delta: string | Array + item_id: string + output_index: number + content_index: number + sequence_number?: number + } + | { + type: 'response.reasoning_summary_text.delta' + delta: string + item_id?: string + output_index?: number + summary_index?: number + sequence_number?: number + } + | { + type: 'response.content_part.added' + part: ResponseOutputContent + item_id: string + output_index: number + content_index: number + sequence_number?: number + } + | { + type: 'response.content_part.done' + part: ResponseOutputContent + item_id: string + output_index: number + content_index: number + sequence_number?: number + } + | { + type: 'response.output_item.added' + item: ResponseOutputItem + output_index: number + sequence_number?: number + } + | { + type: 'response.output_item.done' + item: ResponseOutputItem + output_index: number + sequence_number?: number + } + | { + type: 'response.function_call_arguments.delta' + delta: string + item_id: string + output_index: number + sequence_number?: number + } + | { + type: 'response.function_call_arguments.done' + arguments: string + item_id: string + output_index: number + sequence_number?: number + } + | { + type: 'error' + message: string + code?: string + sequence_number?: number + } + +// Re-export the framework `Tool` only because subclass call sites +// frequently import it alongside Responses types — no semantic dependency. +export type { Tool } diff --git a/packages/typescript/ai-openai-compatible/src/types/tools.ts b/packages/typescript/ai-openai-compatible/src/types/tools.ts new file mode 100644 index 000000000..7d8bebc64 --- /dev/null +++ b/packages/typescript/ai-openai-compatible/src/types/tools.ts @@ -0,0 +1,166 @@ +/** + * Local tool-config wire shapes for the OpenAI Responses API tool catalogue. + * + * Each interface here matches the OpenAI Responses API tool wire format + * (the `type` discriminator + the documented config fields). Don't add + * `[key: string]: unknown` to these — it makes `keyof T` resolve to `string`, + * so `Omit` drops every named required field along with + * `type`. The convert-from-metadata helpers in `src/tools/*.ts` rely on that + * `Omit` pattern. + */ + +import type { Tool } from '@tanstack/ai' + +export type { Tool } + +// ───────────────────────────────────────────────────────────────────────── +// Apply Patch +// ───────────────────────────────────────────────────────────────────────── + +export interface ApplyPatchToolConfig { + type: 'apply_patch' +} + +// ───────────────────────────────────────────────────────────────────────── +// Code Interpreter +// ───────────────────────────────────────────────────────────────────────── + +export interface CodeInterpreterToolConfig { + type: 'code_interpreter' + container: string | { type: 'auto'; file_ids?: Array } +} + +// ───────────────────────────────────────────────────────────────────────── +// Computer Use +// ───────────────────────────────────────────────────────────────────────── + +export interface ComputerUseToolConfig { + type: 'computer_use_preview' + display_width: number + display_height: number + environment: 'mac' | 'windows' | 'ubuntu' | 'linux' | 'browser' +} + +// ───────────────────────────────────────────────────────────────────────── +// Custom (free-form tool with grammar/format) +// ───────────────────────────────────────────────────────────────────────── + +export interface CustomToolConfig { + type: 'custom' + name: string + description?: string + format?: + | { type: 'text' } + | { + type: 'grammar' + grammar: { definition: string; syntax: 'lark' | 'regex' } + } +} + +// ───────────────────────────────────────────────────────────────────────── +// File Search +// ───────────────────────────────────────────────────────────────────────── + +export interface FileSearchToolConfig { + type: 'file_search' + vector_store_ids: Array + max_num_results?: number + ranking_options?: { + ranker?: string + score_threshold?: number + } + filters?: unknown +} + +// ───────────────────────────────────────────────────────────────────────── +// Function (Responses-flavoured; flatter than Chat Completions) +// ───────────────────────────────────────────────────────────────────────── + +export interface FunctionToolConfig { + type: 'function' + name: string + description?: string | null + parameters: Record | null + strict: boolean | null +} + +// ───────────────────────────────────────────────────────────────────────── +// Image Generation +// ───────────────────────────────────────────────────────────────────────── + +export interface ImageGenerationToolConfig { + type: 'image_generation' + background?: 'transparent' | 'opaque' | 'auto' + model?: string + moderation?: 'auto' | 'low' + output_compression?: number + output_format?: 'png' | 'webp' | 'jpeg' + partial_images?: number + quality?: 'low' | 'medium' | 'high' | 'auto' + size?: '1024x1024' | '1024x1536' | '1536x1024' | 'auto' +} + +// ───────────────────────────────────────────────────────────────────────── +// Local Shell +// ───────────────────────────────────────────────────────────────────────── + +export interface LocalShellToolConfig { + type: 'local_shell' +} + +// ───────────────────────────────────────────────────────────────────────── +// MCP +// ───────────────────────────────────────────────────────────────────────── + +export interface MCPToolConfig { + type: 'mcp' + server_label: string + server_description?: string + server_url?: string + connector_id?: string + authorization?: string + headers?: Record | null + require_approval?: unknown + allowed_tools?: unknown +} + +// ───────────────────────────────────────────────────────────────────────── +// Shell (function-shaped shell — distinct from local_shell) +// ───────────────────────────────────────────────────────────────────────── + +export interface ShellToolConfig { + type: 'shell' +} + +// ───────────────────────────────────────────────────────────────────────── +// Web Search (branded) +// ───────────────────────────────────────────────────────────────────────── + +export interface WebSearchToolConfig { + type: 'web_search' + filters?: { allowed_domains?: Array } | null + user_location?: { + type: 'approximate' + city?: string + country?: string + region?: string + timezone?: string + } | null + search_context_size?: 'low' | 'medium' | 'high' +} + +// ───────────────────────────────────────────────────────────────────────── +// Web Search Preview +// ───────────────────────────────────────────────────────────────────────── + +export interface WebSearchPreviewToolConfig { + type: 'web_search_preview' + search_context_size?: 'low' | 'medium' | 'high' + user_location?: { + type: 'approximate' + city?: string + country?: string + region?: string + timezone?: string + } | null +} diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 5854452c1..6e37bd0fd 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -10,6 +10,13 @@ import type { OpenAIChatModelToolCapabilitiesByName, OpenAIModelInputModalitiesByName, } from '../model-meta' +import type { + ResponseCreateParams, + ResponseCreateParamsNonStreaming, + ResponseCreateParamsStreaming, + ResponseStreamEvent, + ResponsesResponse, +} from '@tanstack/ai-openai-compatible' import type OpenAI_SDK from 'openai' import type { Modality, TextOptions } from '@tanstack/ai' import type { @@ -96,18 +103,32 @@ export class OpenAITextAdapter< this.client = new OpenAI(config) } + // The override signatures use the local protocol types from + // `@tanstack/ai-openai-compatible` so we stay variance-compatible with the + // base. Inside the body we still call the openai SDK; casting at the SDK + // boundary (where we already own the runtime contract) is the cleanest + // place to land the two-type-name reality. + protected async callResponse( - params: OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, + params: ResponseCreateParamsNonStreaming, requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - return this.client.responses.create(params, requestOptions) + ): Promise { + const response = await this.client.responses.create( + params as unknown as OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, + requestOptions, + ) + return response as unknown as ResponsesResponse } protected async callResponseStream( - params: OpenAI_SDK.Responses.ResponseCreateParamsStreaming, + params: ResponseCreateParamsStreaming, requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - return this.client.responses.create(params, requestOptions) + ): Promise> { + const stream = await this.client.responses.create( + params as unknown as OpenAI_SDK.Responses.ResponseCreateParamsStreaming, + requestOptions, + ) + return stream as unknown as AsyncIterable } /** @@ -118,7 +139,7 @@ export class OpenAITextAdapter< */ protected override mapOptionsToRequest( options: TextOptions, - ): Omit { + ): Omit { // The structural type the validator expects is broader than what // `TProviderOptions` is bound to per-model, so narrow via the internal // shape rather than re-exposing it on the public override signature. @@ -143,10 +164,7 @@ export class OpenAITextAdapter< // previous override spread `...modelOptions` LAST and wrote // `temperature: options.temperature` unconditionally — re-introducing the // exact regression the base class's nullish-aware merge fixes. - const requestParams: Omit< - OpenAI_SDK.Responses.ResponseCreateParams, - 'stream' - > = { + const requestParams: Omit = { ...modelOptions, model: options.model, ...(options.temperature !== undefined && { diff --git a/packages/typescript/ai-openai/src/text/text-provider-options.ts b/packages/typescript/ai-openai/src/text/text-provider-options.ts index ba9d60498..5df05d315 100644 --- a/packages/typescript/ai-openai/src/text/text-provider-options.ts +++ b/packages/typescript/ai-openai/src/text/text-provider-options.ts @@ -1,3 +1,4 @@ +import type { ResponseInput } from '@tanstack/ai-openai-compatible' import type OpenAI from 'openai' import type { ApplyPatchTool } from '../tools/apply-patch-tool' import type { CodeInterpreterTool } from '../tools/code-interpreter-tool' @@ -246,7 +247,7 @@ export type ExternalTextProviderOptions = OpenAIBaseOptions & * Tip: gate these by model capability in your SDK, not just by presence. */ export interface InternalTextProviderOptions extends ExternalTextProviderOptions { - input: string | OpenAI.Responses.ResponseInput + input: string | ResponseInput /** * A system (or developer) message inserted into the model's context. diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 2b3fcef6e..87faf4e9a 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -50,7 +50,6 @@ "devDependencies": { "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", - "openai": "^6.9.1", "vite": "^7.2.7", "zod": "^4.2.0" }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 734ac81df..3d589e600 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1353,9 +1353,6 @@ importers: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - openai: - specifier: ^6.9.1 - version: 6.10.0(ws@8.19.0)(zod@4.3.6) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1378,9 +1375,6 @@ importers: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - openai: - specifier: ^6.9.1 - version: 6.10.0(ws@8.19.0)(zod@4.3.6) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) From 71cf0f4f43ccb9177fb2b98c2f8a95e18f6b5314 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 07:11:17 +0000 Subject: [PATCH 35/49] ci: apply automated fixes --- .../src/adapters/chat-completions-text.ts | 14 ++++---------- .../src/adapters/responses-text.ts | 17 ++++------------- .../src/types/chat-completions.ts | 6 ++---- .../ai-openai-compatible/src/types/responses.ts | 3 +-- 4 files changed, 11 insertions(+), 29 deletions(-) diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts index ef5142a0a..5ef9a4b05 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts @@ -295,9 +295,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< // earlier `finish_reason` chunk does NOT include token counts. We must // therefore defer RUN_FINISHED until the iterator is exhausted so we can // pick up usage from the trailing chunk regardless of arrival order. - let lastUsage: - | ChatCompletionChunk['usage'] - | undefined + let lastUsage: ChatCompletionChunk['usage'] | undefined let pendingFinishReason: | ChatCompletionChunkChoice['finish_reason'] | undefined @@ -770,8 +768,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< : undefined // Build messages array with system prompts - const messages: Array = - [] + const messages: Array = [] // Add system prompts first if (options.systemPrompts && options.systemPrompts.length > 0) { @@ -818,9 +815,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< * Converts a single ModelMessage to the Chat Completions API message format. * Override this in subclasses to handle provider-specific message formats. */ - protected convertMessage( - message: ModelMessage, - ): ChatCompletionMessageParam { + protected convertMessage(message: ModelMessage): ChatCompletionMessageParam { // Handle tool messages if (message.role === 'tool') { return { @@ -886,8 +881,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< // content parts rather than silently dropping them — a message of all // unsupported parts would otherwise turn into an empty user prompt and // mask a real capability mismatch. - const parts: Array = - [] + const parts: Array = [] for (const part of contentParts) { const converted = this.convertContentPart(part) if (!converted) { diff --git a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts index f1fc41213..ef3a7fccd 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts @@ -188,10 +188,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< ) const response = await this.callResponse( { - ...(cleanParams as Omit< - ResponseCreateParams, - 'stream' - >), + ...(cleanParams as Omit), stream: false, // Configure structured output via text.format text: { @@ -210,9 +207,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< // SDK return type to `Response`, but the explicit annotation makes // that contract local rather than relying on inference through the // overloaded `client.responses.create` signature. - const rawText = this.extractTextFromResponse( - response satisfies Response, - ) + const rawText = this.extractTextFromResponse(response satisfies Response) // Fail loud on empty content rather than letting it cascade into a // confusing "Failed to parse JSON. Content: " error — the root cause @@ -306,9 +301,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< * Extract text content from a non-streaming Responses API response. * Override this in subclasses for provider-specific response shapes. */ - protected extractTextFromResponse( - response: Response, - ): string { + protected extractTextFromResponse(response: Response): string { let textContent = '' let refusal: string | undefined let sawMessageItem = false @@ -1352,9 +1345,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< * Handles text, image, and audio content parts. * Override this in subclasses for additional content types or provider-specific metadata. */ - protected convertContentPartToInput( - part: ContentPart, - ): ResponseInputContent { + protected convertContentPartToInput(part: ContentPart): ResponseInputContent { switch (part.type) { case 'text': return { diff --git a/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts b/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts index 1354c1840..04cdf54ab 100644 --- a/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts +++ b/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts @@ -159,13 +159,11 @@ export interface ChatCompletionCreateParamsBase { metadata?: Record | null } -export interface ChatCompletionCreateParamsNonStreaming - extends ChatCompletionCreateParamsBase { +export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { stream?: false | null } -export interface ChatCompletionCreateParamsStreaming - extends ChatCompletionCreateParamsBase { +export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { stream: true } diff --git a/packages/typescript/ai-openai-compatible/src/types/responses.ts b/packages/typescript/ai-openai-compatible/src/types/responses.ts index 4eefc581e..048d73e77 100644 --- a/packages/typescript/ai-openai-compatible/src/types/responses.ts +++ b/packages/typescript/ai-openai-compatible/src/types/responses.ts @@ -153,8 +153,7 @@ export interface ResponseCreateParamsBase { } } -export interface ResponseCreateParamsNonStreaming - extends ResponseCreateParamsBase { +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { stream?: false | null } From 7aff8b1811390af93145c3b3b64a44fedb3a9529 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 20:33:44 +1000 Subject: [PATCH 36/49] refactor(openai-base): rename, adopt openai SDK, decouple ai-openrouter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename @tanstack/ai-openai-compatible → @tanstack/openai-base. Vendored wire types deleted; the base now imports from openai/resources/* directly. - Drop the abstract callChatCompletion* / callResponse* hooks. Base takes a pre-built OpenAI client in its constructor and calls the SDK itself; subclasses (ai-openai, ai-grok, ai-groq) just pass a configured client. - Decouple @tanstack/ai-openrouter from openai-base entirely. The two adapters now extend BaseTextAdapter directly, inline their stream processors, and read OpenRouter's camelCase types natively — dropping ~300 LOC of snake↔camel reshaping that existed only to satisfy the inherited base. Co-Authored-By: Claude Opus 4.7 (1M context) --- ...ecouple-openrouter-collapse-openai-base.md | 35 + .../migrate-groq-openrouter-to-openai-base.md | 20 - ...ame-openai-base-to-ai-openai-compatible.md | 27 - packages/typescript/ai-grok/package.json | 2 +- .../typescript/ai-grok/src/adapters/text.ts | 30 +- .../typescript/ai-grok/src/tools/index.ts | 2 +- .../ai-grok/src/utils/schema-converter.ts | 2 +- packages/typescript/ai-groq/package.json | 2 +- .../typescript/ai-groq/src/adapters/text.ts | 26 +- .../ai-groq/src/utils/schema-converter.ts | 6 +- .../ai-groq/tests/groq-adapter.test.ts | 2 +- .../ai-groq/tests/schema-converter.test.ts | 4 +- .../ai-openai-compatible/src/index.ts | 35 - .../src/types/chat-completions.ts | 237 --- .../src/types/responses.ts | 281 --- .../ai-openai-compatible/src/types/tools.ts | 166 -- packages/typescript/ai-openai/package.json | 4 +- .../typescript/ai-openai/src/adapters/text.ts | 55 +- .../ai-openai/src/adapters/video.ts | 2 +- .../src/text/text-provider-options.ts | 2 +- .../ai-openai/src/tools/apply-patch-tool.ts | 4 +- .../src/tools/code-interpreter-tool.ts | 6 +- .../ai-openai/src/tools/computer-use-tool.ts | 6 +- .../ai-openai/src/tools/custom-tool.ts | 2 +- .../ai-openai/src/tools/file-search-tool.ts | 6 +- .../ai-openai/src/tools/function-tool.ts | 2 +- .../src/tools/image-generation-tool.ts | 6 +- .../typescript/ai-openai/src/tools/index.ts | 2 +- .../ai-openai/src/tools/local-shell-tool.ts | 4 +- .../ai-openai/src/tools/mcp-tool.ts | 6 +- .../ai-openai/src/tools/shell-tool.ts | 4 +- .../ai-openai/src/tools/tool-choice.ts | 2 +- .../ai-openai/src/tools/tool-converter.ts | 2 +- .../src/tools/web-search-preview-tool.ts | 6 +- .../ai-openai/src/tools/web-search-tool.ts | 6 +- .../ai-openai/src/utils/schema-converter.ts | 2 +- .../typescript/ai-openrouter/package.json | 1 - .../src/adapters/responses-text.ts | 1550 +++++++++++++---- .../ai-openrouter/src/adapters/text.ts | 1112 ++++++++---- .../src/internal/request-options.ts | 17 + .../src/internal/responses-tool-converter.ts | 57 + .../src/internal/schema-converter.ts | 90 + .../openrouter-responses-adapter.test.ts | 2 +- .../CHANGELOG.md | 2 +- .../README.md | 14 +- .../package.json | 16 +- .../src/adapters/chat-completions-text.ts | 48 +- .../chat-completions-tool-converter.ts | 13 +- .../src/adapters/responses-text.ts | 44 +- .../src/adapters/responses-tool-converter.ts | 0 packages/typescript/openai-base/src/index.ts | 14 + .../src/tools/apply-patch-tool.ts | 2 +- .../src/tools/code-interpreter-tool.ts | 4 +- .../src/tools/computer-use-tool.ts | 2 +- .../src/tools/custom-tool.ts | 2 +- .../src/tools/file-search-tool.ts | 2 +- .../src/tools/function-tool.ts | 2 +- .../src/tools/image-generation-tool.ts | 4 +- .../src/tools/index.ts | 0 .../src/tools/local-shell-tool.ts | 4 +- .../src/tools/mcp-tool.ts | 4 +- .../src/tools/shell-tool.ts | 2 +- .../src/tools/tool-choice.ts | 0 .../src/tools/tool-converter.ts | 0 .../src/tools/web-search-preview-tool.ts | 2 +- .../src/tools/web-search-tool.ts | 2 +- .../src/utils/request-options.ts | 0 .../src/utils/schema-converter.ts | 0 .../tests/chat-completions-text.test.ts | 57 +- .../tests/mcp-tool.test.ts | 0 .../tests/responses-text.test.ts | 48 +- .../tests/schema-converter.test.ts | 0 .../tsconfig.json | 0 .../vite.config.ts | 0 pnpm-lock.yaml | 70 +- 75 files changed, 2451 insertions(+), 1742 deletions(-) create mode 100644 .changeset/decouple-openrouter-collapse-openai-base.md delete mode 100644 .changeset/migrate-groq-openrouter-to-openai-base.md delete mode 100644 .changeset/rename-openai-base-to-ai-openai-compatible.md delete mode 100644 packages/typescript/ai-openai-compatible/src/index.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/types/chat-completions.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/types/responses.ts delete mode 100644 packages/typescript/ai-openai-compatible/src/types/tools.ts create mode 100644 packages/typescript/ai-openrouter/src/internal/request-options.ts create mode 100644 packages/typescript/ai-openrouter/src/internal/responses-tool-converter.ts create mode 100644 packages/typescript/ai-openrouter/src/internal/schema-converter.ts rename packages/typescript/{ai-openai-compatible => openai-base}/CHANGELOG.md (96%) rename packages/typescript/{ai-openai-compatible => openai-base}/README.md (91%) rename packages/typescript/{ai-openai-compatible => openai-base}/package.json (71%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/adapters/chat-completions-text.ts (95%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/adapters/chat-completions-tool-converter.ts (80%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/adapters/responses-text.ts (97%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/adapters/responses-tool-converter.ts (100%) create mode 100644 packages/typescript/openai-base/src/index.ts rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/apply-patch-tool.ts (88%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/code-interpreter-tool.ts (88%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/computer-use-tool.ts (91%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/custom-tool.ts (89%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/file-search-tool.ts (93%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/function-tool.ts (92%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/image-generation-tool.ts (92%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/index.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/local-shell-tool.ts (86%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/mcp-tool.ts (91%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/shell-tool.ts (87%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/tool-choice.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/tool-converter.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/web-search-preview-tool.ts (91%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/tools/web-search-tool.ts (92%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/utils/request-options.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/src/utils/schema-converter.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/tests/chat-completions-text.test.ts (95%) rename packages/typescript/{ai-openai-compatible => openai-base}/tests/mcp-tool.test.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/tests/responses-text.test.ts (97%) rename packages/typescript/{ai-openai-compatible => openai-base}/tests/schema-converter.test.ts (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/tsconfig.json (100%) rename packages/typescript/{ai-openai-compatible => openai-base}/vite.config.ts (100%) diff --git a/.changeset/decouple-openrouter-collapse-openai-base.md b/.changeset/decouple-openrouter-collapse-openai-base.md new file mode 100644 index 000000000..3dab48e95 --- /dev/null +++ b/.changeset/decouple-openrouter-collapse-openai-base.md @@ -0,0 +1,35 @@ +--- +'@tanstack/openai-base': minor +'@tanstack/ai-openai': patch +'@tanstack/ai-grok': patch +'@tanstack/ai-groq': patch +'@tanstack/ai-openrouter': patch +--- + +Decouple `@tanstack/ai-openrouter` from the shared OpenAI base, and collapse the base into a thinner shim over the `openai` SDK. + +Three changes that ship together: + +**1. Rename `@tanstack/ai-openai-compatible` → `@tanstack/openai-base`.** The previous name implied a multi-vendor protocol surface. After ai-openrouter is decoupled (see below), the only remaining consumers (`ai-openai`, `ai-grok`, `ai-groq`) all back onto the `openai` SDK with a different `baseURL` — "base" describes that role accurately. Imports change: + +```diff +- import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' ++ import { OpenAIBaseChatCompletionsTextAdapter } from '@tanstack/openai-base' +- import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/ai-openai-compatible' ++ import { OpenAIBaseResponsesTextAdapter } from '@tanstack/openai-base' +``` + +`@tanstack/ai-openai-compatible@0.2.x` remains published for anyone with a pinned lockfile reference but will receive no further updates. + +**2. `@tanstack/openai-base` adopts the `openai` SDK directly.** The previous package vendored ~720 LOC of hand-written wire-format types (`ChatCompletion`, `ResponseStreamEvent`, etc.) and exposed abstract `callChatCompletion*` / `callResponse*` hooks subclasses had to implement. Both are gone: + +- The base now depends on `openai` again and imports types directly from `openai/resources/...`. The vendored `src/types/` directory is removed; consumers that imported wire types from the package (e.g. `import type { ResponseInput } from '@tanstack/ai-openai-compatible'`) should now import from the openai SDK. +- The abstract SDK-call methods are removed. The base constructor takes a pre-built `OpenAI` client (`new OpenAIBaseChatCompletionsTextAdapter(model, name, openaiClient)`) and calls `client.chat.completions.create` / `client.responses.create` itself. Subclasses (`ai-openai`, `ai-grok`, `ai-groq`) now just construct the SDK with their provider-specific `baseURL` and pass it to `super` — `callChatCompletion*` / `callResponse*` overrides go away. + +The other extension hooks (`extractReasoning`, `extractTextFromResponse`, `processStreamChunks`, `makeStructuredOutputCompatible`, `transformStructuredOutput`, `mapOptionsToRequest`, `convertMessage`) remain. Groq's `processStreamChunks` and `makeStructuredOutputCompatible` overrides (for `x_groq.usage` promotion and Groq's structured-output schema quirks) are unchanged. + +**3. Decouple `@tanstack/ai-openrouter` from the OpenAI base entirely.** OpenRouter ships its own SDK (`@openrouter/sdk`) with a camelCase shape, so inheriting from the OpenAI-shaped base forced a snake_case ↔ camelCase round-trip on every request and stream event. ai-openrouter now extends `BaseTextAdapter` directly and inlines its own stream processors (`OpenRouterTextAdapter` for chat-completions, `OpenRouterResponsesTextAdapter` for the Responses beta), reading OpenRouter's camelCase types natively. The `@tanstack/openai-base` and `openai` dependencies are removed from ai-openrouter; only `@openrouter/sdk`, `@tanstack/ai`, and `@tanstack/ai-utils` remain. + +Public API is unchanged: `openRouterText`, `openRouterResponsesText`, `createOpenRouterText`, `createOpenRouterResponsesText`, the OpenRouter tool factories, provider routing surface (`provider`, `models`, `plugins`, `variant`, `transforms`), app attribution headers (`httpReferer`, `appTitle`), `:variant` model suffixing, `RequestAbortedError` propagation, and the OpenRouter-specific structured-output null-preservation all behave the same. The ~300 LOC of inbound/outbound shape converters (`toOpenRouterRequest`, `toChatCompletion`, `adaptOpenRouterStreamChunks`, `toSnakeResponseResult`, …) are gone. + +`ai-ollama` remains on `BaseTextAdapter` directly — its native API uses a different wire format from Chat Completions and was never on the shared base. diff --git a/.changeset/migrate-groq-openrouter-to-openai-base.md b/.changeset/migrate-groq-openrouter-to-openai-base.md deleted file mode 100644 index 3ec7467bd..000000000 --- a/.changeset/migrate-groq-openrouter-to-openai-base.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -'@tanstack/ai-openai-compatible': minor -'@tanstack/ai-groq': patch -'@tanstack/ai-openrouter': patch -'@tanstack/ai': patch ---- - -Migrate `ai-groq` and `ai-openrouter` onto `OpenAICompatibleChatCompletionsTextAdapter` so they share the stream accumulator, partial-JSON tool-call buffer, RUN_ERROR taxonomy, and lifecycle gates with `ai-openai` / `ai-grok`. Removes ~1k LOC of duplicated stream processing. - -`@tanstack/ai-openai-compatible` adds four protected hooks on `OpenAICompatibleChatCompletionsTextAdapter` so providers with non-OpenAI SDK shapes can reuse the base: `callChatCompletion` and `callChatCompletionStream` (SDK call sites for non-streaming and streaming Chat Completions), `extractReasoning` (surface reasoning content from chunk shapes that carry it, e.g. OpenRouter's `delta.reasoningDetails`, into the base's REASONING\_\* + legacy STEP_STARTED/STEP_FINISHED lifecycle), and `transformStructuredOutput` (subclasses like OpenRouter can preserve nulls in structured output instead of converting them to undefined). - -`@tanstack/ai-openai-compatible` fixes two error-handling regressions in the shared base: `structuredOutput` now throws a distinct `"response contained no content"` error rather than letting empty content cascade into a misleading JSON-parse error, and the post-loop tool-args drain block now logs malformed JSON via `logger.errors` (matching the in-loop finish_reason path) so truncated streams emitting partial tool args are debuggable instead of silently invoking the tool with `{}`. - -`@tanstack/ai` normalizes abort-shaped errors (`AbortError`, `APIUserAbortError`, `RequestAbortedError`) to a stable `{ message: 'Request aborted', code: 'aborted' }` payload in `toRunErrorPayload`, so consumers can discriminate user-initiated cancellation from other failures without matching on provider-specific message strings. - -`@tanstack/ai-groq` drops the `groq-sdk` dependency in favour of the OpenAI SDK pointed at `https://api.groq.com/openai/v1` (the same pattern as `ai-grok` against xAI). The Groq-specific quirk where streaming usage arrives under `chunk.x_groq.usage` is preserved via a small `processStreamChunks` wrapper that promotes it to the standard `chunk.usage` slot. - -`@tanstack/ai-openrouter` keeps `@openrouter/sdk` (the source of truth for OpenRouter's typed provider routing, plugins, and metadata) but routes the SDK call through the base via overridden hooks. A small request shape converter (`max_tokens` → `maxCompletionTokens`, etc.) and chunk shape adapter (camelCase → snake_case for the base's reader) bridge the SDKs. No public API changes; provider routing, app attribution headers (`httpReferer`, `appTitle`), reasoning variants (`:thinking`), and `RequestAbortedError` handling are preserved. Fixes: `stream_options.include_usage` is now correctly camelCased to `includeUsage` so streaming `RUN_FINISHED.usage` is populated (previously silently dropped by the SDK Zod schema); mid-stream `chunk.error.code` is stringified so provider error codes (401, 429, 500, …) survive the `toRunErrorPayload` narrow; assistant `toolCalls[].function.arguments` is stringified to match the SDK's `string` contract; and `convertMessage` now mirrors the base's fail-loud guards (throws on empty user content and unsupported content parts) instead of silently sending empty paid requests. - -`ai-ollama` remains on `BaseTextAdapter` — its native API uses a different wire format from Chat Completions (different chunk shape, request shape, tool-call streaming, and reasoning surface) and doesn't fit the OpenAI base without rebuilding most of the processing it would otherwise inherit. Migrating it remains a separate effort. diff --git a/.changeset/rename-openai-base-to-ai-openai-compatible.md b/.changeset/rename-openai-base-to-ai-openai-compatible.md deleted file mode 100644 index 851100706..000000000 --- a/.changeset/rename-openai-base-to-ai-openai-compatible.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -'@tanstack/ai-openai-compatible': minor -'@tanstack/ai-openai': patch -'@tanstack/ai-openrouter': patch -'@tanstack/ai-groq': patch -'@tanstack/ai-grok': patch ---- - -Rename `@tanstack/openai-base` → `@tanstack/ai-openai-compatible`. - -The previous "base" name implied this package tracked OpenAI's product roadmap. In reality it implements two OpenAI-shaped _wire-format protocols_ that multiple providers ship: - -- **Chat Completions** (`/v1/chat/completions`) — natively implemented by OpenAI, Groq, Grok, OpenRouter, vLLM, SGLang, Together, etc. -- **Responses** (`/v1/responses`) — OpenAI's reference implementation plus OpenRouter's beta routing implementation (which fans out to Anthropic, Google, and other underlying models). - -"OpenAI-compatible" is the actual industry term for this family of wire formats (cf. Vercel's `@ai-sdk/openai-compatible`, LiteLLM's "OpenAI-compatible endpoint", BentoML / Lightning AI docs). The renamed package makes the boundary explicit: it holds the protocol, while OpenAI-specific tools, models, and behaviors continue to live in `@tanstack/ai-openai`. - -No runtime behavior changes. Class names (`OpenAICompatibleChatCompletionsTextAdapter`, `OpenAICompatibleResponsesTextAdapter`, …) and protected hook contracts are unchanged. Consumer packages (`ai-openai`, `ai-openrouter`, `ai-groq`, `ai-grok`) only update their internal import paths — public API is unchanged. - -If you were importing from `@tanstack/openai-base` directly (uncommon — the package was not yet documented as a public extension point), update your imports: - -```diff -- import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/openai-base' -+ import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' -``` - -`@tanstack/openai-base@0.2.x` remains published on npm for anyone with a pinned lockfile reference but will receive no further updates. diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index 254d7c8e7..46f1a7b6f 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -44,7 +44,7 @@ "adapter" ], "dependencies": { - "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", "openai": "^6.9.1" }, diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index 1324e0556..0a25fa3fb 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,5 +1,5 @@ import OpenAI from 'openai' -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' +import { OpenAIBaseChatCompletionsTextAdapter } from '@tanstack/openai-base' import { getGrokApiKeyFromEnv, withGrokDefaults } from '../utils/client' import type { GROK_CHAT_MODELS, @@ -7,7 +7,6 @@ import type { ResolveInputModalities, ResolveProviderOptions, } from '../model-meta' -import type OpenAI_SDK from 'openai' import type { Modality } from '@tanstack/ai' import type { GrokMessageMetadataByModality } from '../message-types' import type { GrokClientConfig } from '../utils' @@ -36,9 +35,9 @@ export type { ExternalTextProviderOptions as GrokTextProviderOptions } from '../ * Tree-shakeable adapter for Grok chat/text completion functionality. * Uses OpenAI-compatible Chat Completions API (not Responses API). * - * Delegates implementation to {@link OpenAICompatibleChatCompletionsTextAdapter} - * from `@tanstack/ai-openai-compatible` and threads Grok-specific tool-capability typing - * through the 5th generic of the base class. + * Delegates implementation to {@link OpenAIBaseChatCompletionsTextAdapter} + * from `@tanstack/openai-base` and threads Grok-specific tool-capability + * typing through the 5th generic of the base class. */ export class GrokTextAdapter< TModel extends (typeof GROK_CHAT_MODELS)[number], @@ -47,7 +46,7 @@ export class GrokTextAdapter< ResolveInputModalities, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends OpenAICompatibleChatCompletionsTextAdapter< +> extends OpenAIBaseChatCompletionsTextAdapter< TModel, TProviderOptions, TInputModalities, @@ -57,25 +56,8 @@ export class GrokTextAdapter< readonly kind = 'text' as const readonly name = 'grok' as const - protected client: OpenAI - constructor(config: GrokTextConfig, model: TModel) { - super(model, 'grok') - this.client = new OpenAI(withGrokDefaults(config)) - } - - protected async callChatCompletion( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - return await this.client.chat.completions.create(params, requestOptions) - } - - protected async callChatCompletionStream( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - return await this.client.chat.completions.create(params, requestOptions) + super(model, 'grok', new OpenAI(withGrokDefaults(config))) } } diff --git a/packages/typescript/ai-grok/src/tools/index.ts b/packages/typescript/ai-grok/src/tools/index.ts index 6f39deef9..95a570117 100644 --- a/packages/typescript/ai-grok/src/tools/index.ts +++ b/packages/typescript/ai-grok/src/tools/index.ts @@ -2,4 +2,4 @@ export { type ChatCompletionFunctionTool as FunctionTool, convertFunctionToolToChatCompletionsFormat as convertFunctionToolToAdapterFormat, convertToolsToChatCompletionsFormat as convertToolsToProviderFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-grok/src/utils/schema-converter.ts b/packages/typescript/ai-grok/src/utils/schema-converter.ts index 0d1c7aa5b..20c2d36d3 100644 --- a/packages/typescript/ai-grok/src/utils/schema-converter.ts +++ b/packages/typescript/ai-grok/src/utils/schema-converter.ts @@ -1,2 +1,2 @@ export { transformNullsToUndefined } from '@tanstack/ai-utils' -export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' +export { makeStructuredOutputCompatible as makeGrokStructuredOutputCompatible } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index a16486515..408e44e17 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -52,7 +52,7 @@ "zod": "^4.0.0" }, "dependencies": { - "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", "openai": "^6.9.1" } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index c92e52e93..879342187 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -1,10 +1,9 @@ import OpenAI from 'openai' -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' +import { OpenAIBaseChatCompletionsTextAdapter } from '@tanstack/openai-base' import { getGroqApiKeyFromEnv, withGroqDefaults } from '../utils/client' import { makeGroqStructuredOutputCompatible } from '../utils/schema-converter' -import type OpenAI_SDK from 'openai' import type { Modality, TextOptions } from '@tanstack/ai' -import type { ChatCompletionChunk } from '@tanstack/ai-openai-compatible' +import type { ChatCompletionChunk } from 'openai/resources/chat/completions/completions' import type { GROQ_CHAT_MODELS, GroqChatModelToolCapabilitiesByName, @@ -49,7 +48,7 @@ export class GroqTextAdapter< ResolveInputModalities, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends OpenAICompatibleChatCompletionsTextAdapter< +> extends OpenAIBaseChatCompletionsTextAdapter< TModel, TProviderOptions, TInputModalities, @@ -59,25 +58,8 @@ export class GroqTextAdapter< readonly kind = 'text' as const readonly name = 'groq' as const - protected client: OpenAI - constructor(config: GroqTextConfig, model: TModel) { - super(model, 'groq') - this.client = new OpenAI(withGroqDefaults(config)) - } - - protected async callChatCompletion( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsNonStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - return this.client.chat.completions.create(params, requestOptions) - } - - protected async callChatCompletionStream( - params: OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - return this.client.chat.completions.create(params, requestOptions) + super(model, 'groq', new OpenAI(withGroqDefaults(config))) } protected override makeStructuredOutputCompatible( diff --git a/packages/typescript/ai-groq/src/utils/schema-converter.ts b/packages/typescript/ai-groq/src/utils/schema-converter.ts index 814e05303..d5d5e01b9 100644 --- a/packages/typescript/ai-groq/src/utils/schema-converter.ts +++ b/packages/typescript/ai-groq/src/utils/schema-converter.ts @@ -1,4 +1,4 @@ -import { makeStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' +import { makeStructuredOutputCompatible } from '@tanstack/openai-base' import { transformNullsToUndefined } from '@tanstack/ai-utils' export { transformNullsToUndefined } @@ -62,7 +62,7 @@ function removeEmptyRequired(schema: Record): Record { /** * Recursively normalise object schemas so any `{ type: 'object' }` node * without `properties` gets an empty `properties: {}` object. The - * ai-openai-compatible transformer only descends into objects that already have + * ai-openai-base transformer only descends into objects that already have * `properties` set, so a Zod `z.object({})` nested inside `properties`, * `items`, `additionalProperties`, or a combinator branch would otherwise * skip the strict-mode rewrite and fail Groq validation. @@ -140,7 +140,7 @@ export function makeGroqStructuredOutputCompatible( schema: Record, originalRequired: Array = [], ): Record { - // Recursively patch every `{ type: 'object' }` node so the ai-openai-compatible + // Recursively patch every `{ type: 'object' }` node so the ai-openai-base // transformer descends into nested empty objects too. const normalised = normalizeObjectSchemas(schema) diff --git a/packages/typescript/ai-groq/tests/groq-adapter.test.ts b/packages/typescript/ai-groq/tests/groq-adapter.test.ts index c058d621f..f09944346 100644 --- a/packages/typescript/ai-groq/tests/groq-adapter.test.ts +++ b/packages/typescript/ai-groq/tests/groq-adapter.test.ts @@ -20,7 +20,7 @@ const testLogger = resolveDebugOption(false) // Stub the OpenAI SDK so adapter construction doesn't open a real network // handle. The per-test mock client is injected post-construction via // `setupMockSdkClient` (mirrors the ai-grok pattern). We avoid relying on -// vi.mock to intercept transitive openai imports — the built ai-openai-compatible +// vi.mock to intercept transitive openai imports — the built ai-openai-base // dist resolves `openai` independently and is unaffected by vi.mock here. vi.mock('openai', () => { return { diff --git a/packages/typescript/ai-groq/tests/schema-converter.test.ts b/packages/typescript/ai-groq/tests/schema-converter.test.ts index 38a74021a..d05fcdb15 100644 --- a/packages/typescript/ai-groq/tests/schema-converter.test.ts +++ b/packages/typescript/ai-groq/tests/schema-converter.test.ts @@ -65,7 +65,7 @@ describe('makeGroqStructuredOutputCompatible', () => { it('should normalise nested empty-object schemas in properties', () => { // Reproduces the bug where a nested `{ type: 'object' }` without - // `properties` slipped past the ai-openai-compatible transformer because the + // `properties` slipped past the ai-openai-base transformer because the // ai-groq layer only normalised the top-level node. const schema = { type: 'object', @@ -79,7 +79,7 @@ describe('makeGroqStructuredOutputCompatible', () => { expect(result.properties.child.type).toBe('object') expect(result.properties.child.properties).toEqual({}) - // ai-openai-compatible sets additionalProperties: false on every rewritten object + // ai-openai-base sets additionalProperties: false on every rewritten object expect(result.properties.child.additionalProperties).toBe(false) }) diff --git a/packages/typescript/ai-openai-compatible/src/index.ts b/packages/typescript/ai-openai-compatible/src/index.ts deleted file mode 100644 index 98aa8065b..000000000 --- a/packages/typescript/ai-openai-compatible/src/index.ts +++ /dev/null @@ -1,35 +0,0 @@ -export { makeStructuredOutputCompatible } from './utils/schema-converter' -export * from './tools/index' -export { OpenAICompatibleChatCompletionsTextAdapter } from './adapters/chat-completions-text' -// Wire-format types for subclasses implementing the `callChatCompletion*` -// hooks. Locally defined (see ./types/chat-completions) so this package's -// emitted .d.ts has zero `from 'openai'` references — downstream adapters -// (e.g. ai-openrouter) can implement the contract without installing the -// openai SDK. -export type { - ChatCompletion, - ChatCompletionChunk, - ChatCompletionCreateParamsNonStreaming, - ChatCompletionCreateParamsStreaming, -} from './types/chat-completions' -export { - convertFunctionToolToChatCompletionsFormat, - convertToolsToChatCompletionsFormat, - type ChatCompletionFunctionTool, -} from './adapters/chat-completions-tool-converter' -export { OpenAICompatibleResponsesTextAdapter } from './adapters/responses-text' -// Wire-format types for subclasses implementing the `callResponse*` hooks. -export type { - Response as ResponsesResponse, - ResponseCreateParams, - ResponseCreateParamsNonStreaming, - ResponseCreateParamsStreaming, - ResponseInput, - ResponseInputContent, - ResponseStreamEvent, -} from './types/responses' -export { - convertFunctionToolToResponsesFormat, - convertToolsToResponsesFormat, - type ResponsesFunctionTool, -} from './adapters/responses-tool-converter' diff --git a/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts b/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts deleted file mode 100644 index 04cdf54ab..000000000 --- a/packages/typescript/ai-openai-compatible/src/types/chat-completions.ts +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Local wire-format types for the OpenAI Chat Completions API. - * - * These are not a full clone of `openai`'s types — only the fields the base - * adapter reads/writes and the public abstract-method surface subclasses - * must satisfy. Structural compatibility means subclasses can still hand the - * openai SDK's own `ChatCompletion` / `ChatCompletionChunk` objects to the - * base without conversion: the SDK shapes are supersets of ours. - * - * Open-ended index signatures (`[key: string]: unknown`) on request and - * message shapes let subclasses spread their own SDK params / message shapes - * through `modelOptions` without TS complaining about unknown fields. - */ - -export interface ChatCompletionTool { - type: 'function' - function: { - name: string - description?: string - parameters?: Record - strict?: boolean | null - } -} - -export interface ChatCompletionContentPartText { - type: 'text' - text: string -} - -export interface ChatCompletionContentPartImage { - type: 'image_url' - image_url: { - url: string - detail?: 'auto' | 'low' | 'high' - } -} - -export interface ChatCompletionContentPartInputAudio { - type: 'input_audio' - input_audio: { - data: string - format: 'wav' | 'mp3' - } -} - -export interface ChatCompletionContentPartFile { - type: 'file' - file: { - file_data?: string - file_id?: string - filename?: string - } -} - -export type ChatCompletionContentPart = - | ChatCompletionContentPartText - | ChatCompletionContentPartImage - | ChatCompletionContentPartInputAudio - | ChatCompletionContentPartFile - -export interface ChatCompletionSystemMessageParam { - role: 'system' - content: string | Array - name?: string -} - -export interface ChatCompletionUserMessageParam { - role: 'user' - content: string | Array - name?: string -} - -export interface ChatCompletionMessageFunctionToolCall { - id: string - type: 'function' - function: { - name: string - arguments: string - } -} - -export interface ChatCompletionMessageCustomToolCall { - id: string - type: 'custom' - custom: { - name: string - input: string - } -} - -export type ChatCompletionMessageToolCall = - | ChatCompletionMessageFunctionToolCall - | ChatCompletionMessageCustomToolCall - -export interface ChatCompletionAssistantMessageParam { - role: 'assistant' - content?: string | Array | null - name?: string - tool_calls?: Array - refusal?: string | null -} - -export interface ChatCompletionToolMessageParam { - role: 'tool' - content: string | Array - tool_call_id: string -} - -export interface ChatCompletionDeveloperMessageParam { - role: 'developer' - content: string | Array - name?: string -} - -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam - | ChatCompletionDeveloperMessageParam - -export type ChatCompletionToolChoiceOption = - | 'none' - | 'auto' - | 'required' - | { - type: 'function' - function: { name: string } - } - -export interface ChatCompletionStreamOptions { - include_usage?: boolean -} - -export interface ChatCompletionCreateParamsBase { - model: string - messages: Array - temperature?: number | null - top_p?: number | null - max_tokens?: number | null - max_completion_tokens?: number | null - n?: number | null - tools?: Array - tool_choice?: ChatCompletionToolChoiceOption - response_format?: - | { type: 'text' } - | { type: 'json_object' } - | { - type: 'json_schema' - json_schema: { - name: string - description?: string - schema?: Record - strict?: boolean | null - } - } - stream_options?: ChatCompletionStreamOptions | null - user?: string - metadata?: Record | null -} - -export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { - stream?: false | null -} - -export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { - stream: true -} - -export interface CompletionUsage { - prompt_tokens: number - completion_tokens: number - total_tokens: number -} - -export interface ChatCompletionMessage { - role: 'assistant' - content: string | null - refusal?: string | null - tool_calls?: Array -} - -export interface ChatCompletion { - id: string - object: 'chat.completion' - created: number - model: string - choices: Array<{ - index: number - message: ChatCompletionMessage - finish_reason: ChatCompletionFinishReason | null - logprobs?: unknown - }> - usage?: CompletionUsage | null - system_fingerprint?: string -} - -export type ChatCompletionFinishReason = - | 'stop' - | 'length' - | 'tool_calls' - | 'content_filter' - | 'function_call' - -export interface ChatCompletionChunkChoiceDeltaToolCall { - index: number - id?: string - type?: 'function' - function?: { - name?: string - arguments?: string - } -} - -export interface ChatCompletionChunkChoiceDelta { - role?: 'system' | 'user' | 'assistant' | 'tool' | 'developer' - content?: string | null - tool_calls?: Array - refusal?: string | null -} - -export interface ChatCompletionChunkChoice { - index: number - delta: ChatCompletionChunkChoiceDelta - finish_reason: ChatCompletionFinishReason | null - logprobs?: unknown -} - -export interface ChatCompletionChunk { - id: string - object: 'chat.completion.chunk' - created: number - model: string - choices: Array - usage?: CompletionUsage | null - system_fingerprint?: string -} diff --git a/packages/typescript/ai-openai-compatible/src/types/responses.ts b/packages/typescript/ai-openai-compatible/src/types/responses.ts deleted file mode 100644 index 048d73e77..000000000 --- a/packages/typescript/ai-openai-compatible/src/types/responses.ts +++ /dev/null @@ -1,281 +0,0 @@ -/** - * Local wire-format types for the OpenAI Responses API. - * - * Same philosophy as `chat-completions.ts`: model only the fields the base - * adapter reads/writes plus the abstract surface subclasses must satisfy. - * Structural compatibility means subclasses can still hand the openai SDK's - * `ResponseStreamEvent` discriminated union into the base — each openai - * variant is a subtype of one of ours, or falls through into the catch-all - * `{ type: string }` arm we never narrow on. - */ - -import type { Tool } from '@tanstack/ai' - -export interface ResponseInputText { - type: 'input_text' - text: string -} - -export interface ResponseInputImage { - type: 'input_image' - image_url?: string - file_id?: string - detail?: 'auto' | 'low' | 'high' -} - -export interface ResponseInputFile { - type: 'input_file' - file_id?: string - file_url?: string - file_data?: string - filename?: string -} - -export type ResponseInputContent = - | ResponseInputText - | ResponseInputImage - | ResponseInputFile - -export interface ResponseInputMessage { - type?: 'message' - role: 'user' | 'assistant' | 'system' | 'developer' - content: string | Array -} - -export interface ResponseFunctionToolCallInput { - type: 'function_call' - call_id: string - name: string - arguments: string -} - -export interface ResponseFunctionCallOutput { - type: 'function_call_output' - call_id: string - output: string -} - -export type ResponseInputItem = - | ResponseInputMessage - | ResponseFunctionToolCallInput - | ResponseFunctionCallOutput - -export type ResponseInput = Array - -export interface ResponseOutputText { - type: 'output_text' - text: string - annotations?: Array -} - -export interface ResponseOutputRefusal { - type: 'refusal' - refusal: string -} - -export interface ResponseOutputReasoningText { - type: 'reasoning_text' - text: string -} - -export type ResponseOutputContent = - | ResponseOutputText - | ResponseOutputRefusal - | ResponseOutputReasoningText - -/** - * A single item in `Response.output`. The Responses API returns ~12 variants - * (message, function_call, reasoning, file_search_call, web_search_call, - * computer_call, image_generation_call, code_interpreter_call, mcp_call, - * local_shell_call, etc.); we model it as a loose interface because the - * base reads heterogeneous optional fields (`id`, `name`, `arguments`, - * `content`) across multiple variants without discriminated narrowing. - * - * Subclasses passing the openai SDK's richer discriminated union satisfy - * this shape structurally. - */ -export interface ResponseOutputItem { - type: string - id?: string - name?: string - arguments?: string - call_id?: string - status?: string - role?: 'assistant' - // The base only iterates `content` after narrowing on `type === 'message'`, - // where the wire format guarantees it. Modelling it as required keeps the - // narrowed access free of `if (content)` guards in the base; non-message - // items just won't carry it at runtime — the base doesn't reach for it. - content: Array -} - -export interface ResponseUsage { - input_tokens: number - output_tokens: number - total_tokens: number -} - -export interface Response { - id: string - object: 'response' - created_at: number - model: string - status?: string - output: Array - usage?: ResponseUsage - error: { message: string; code?: string } | null - incomplete_details: { reason: string } | null -} - -export interface ResponseCreateParamsBase { - model: string - input: string | ResponseInput - instructions?: string | null - temperature?: number | null - top_p?: number | null - max_output_tokens?: number | null - metadata?: Record | null - tools?: Array - tool_choice?: unknown - /** Response-format config (json_schema lives here on the Responses API, - * unlike `response_format` on Chat Completions). */ - text?: { - format?: - | { type: 'text' } - | { type: 'json_object' } - | { - type: 'json_schema' - name: string - description?: string - schema?: Record - strict?: boolean | null - } - } -} - -export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { - stream?: false | null -} - -export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { - stream: true -} - -export type ResponseCreateParams = - | ResponseCreateParamsNonStreaming - | ResponseCreateParamsStreaming - -/** - * Streamed events from the Responses API. Modelled as a discriminated union - * over the `type` literal: variants the base narrows on declare every field - * it requires for that branch, so accessing `chunk.response.model` after a - * `chunk.type === 'response.failed'` check typechecks without a guard. - * - * The trailing `{ type: string }` arm is the catch-all for event types the - * base never tests — openai's SDK union has many we ignore (web_search_call.*, - * file_search_call.*, mcp_call.*, etc.). Subclasses can still pass openai's - * full union here; each openai variant is structurally assignable to one of - * ours or the catch-all. - */ -export type ResponseStreamEvent = - | { - type: 'response.created' - response: Response - sequence_number?: number - } - | { - type: 'response.in_progress' - response: Response - sequence_number?: number - } - | { - type: 'response.failed' - response: Response - sequence_number?: number - } - | { - type: 'response.incomplete' - response: Response - sequence_number?: number - } - | { - type: 'response.completed' - response: Response - sequence_number?: number - } - | { - type: 'response.output_text.delta' - delta: string | Array - item_id: string - output_index: number - content_index: number - sequence_number?: number - } - | { - type: 'response.reasoning_text.delta' - delta: string | Array - item_id: string - output_index: number - content_index: number - sequence_number?: number - } - | { - type: 'response.reasoning_summary_text.delta' - delta: string - item_id?: string - output_index?: number - summary_index?: number - sequence_number?: number - } - | { - type: 'response.content_part.added' - part: ResponseOutputContent - item_id: string - output_index: number - content_index: number - sequence_number?: number - } - | { - type: 'response.content_part.done' - part: ResponseOutputContent - item_id: string - output_index: number - content_index: number - sequence_number?: number - } - | { - type: 'response.output_item.added' - item: ResponseOutputItem - output_index: number - sequence_number?: number - } - | { - type: 'response.output_item.done' - item: ResponseOutputItem - output_index: number - sequence_number?: number - } - | { - type: 'response.function_call_arguments.delta' - delta: string - item_id: string - output_index: number - sequence_number?: number - } - | { - type: 'response.function_call_arguments.done' - arguments: string - item_id: string - output_index: number - sequence_number?: number - } - | { - type: 'error' - message: string - code?: string - sequence_number?: number - } - -// Re-export the framework `Tool` only because subclass call sites -// frequently import it alongside Responses types — no semantic dependency. -export type { Tool } diff --git a/packages/typescript/ai-openai-compatible/src/types/tools.ts b/packages/typescript/ai-openai-compatible/src/types/tools.ts deleted file mode 100644 index 7d8bebc64..000000000 --- a/packages/typescript/ai-openai-compatible/src/types/tools.ts +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Local tool-config wire shapes for the OpenAI Responses API tool catalogue. - * - * Each interface here matches the OpenAI Responses API tool wire format - * (the `type` discriminator + the documented config fields). Don't add - * `[key: string]: unknown` to these — it makes `keyof T` resolve to `string`, - * so `Omit` drops every named required field along with - * `type`. The convert-from-metadata helpers in `src/tools/*.ts` rely on that - * `Omit` pattern. - */ - -import type { Tool } from '@tanstack/ai' - -export type { Tool } - -// ───────────────────────────────────────────────────────────────────────── -// Apply Patch -// ───────────────────────────────────────────────────────────────────────── - -export interface ApplyPatchToolConfig { - type: 'apply_patch' -} - -// ───────────────────────────────────────────────────────────────────────── -// Code Interpreter -// ───────────────────────────────────────────────────────────────────────── - -export interface CodeInterpreterToolConfig { - type: 'code_interpreter' - container: string | { type: 'auto'; file_ids?: Array } -} - -// ───────────────────────────────────────────────────────────────────────── -// Computer Use -// ───────────────────────────────────────────────────────────────────────── - -export interface ComputerUseToolConfig { - type: 'computer_use_preview' - display_width: number - display_height: number - environment: 'mac' | 'windows' | 'ubuntu' | 'linux' | 'browser' -} - -// ───────────────────────────────────────────────────────────────────────── -// Custom (free-form tool with grammar/format) -// ───────────────────────────────────────────────────────────────────────── - -export interface CustomToolConfig { - type: 'custom' - name: string - description?: string - format?: - | { type: 'text' } - | { - type: 'grammar' - grammar: { definition: string; syntax: 'lark' | 'regex' } - } -} - -// ───────────────────────────────────────────────────────────────────────── -// File Search -// ───────────────────────────────────────────────────────────────────────── - -export interface FileSearchToolConfig { - type: 'file_search' - vector_store_ids: Array - max_num_results?: number - ranking_options?: { - ranker?: string - score_threshold?: number - } - filters?: unknown -} - -// ───────────────────────────────────────────────────────────────────────── -// Function (Responses-flavoured; flatter than Chat Completions) -// ───────────────────────────────────────────────────────────────────────── - -export interface FunctionToolConfig { - type: 'function' - name: string - description?: string | null - parameters: Record | null - strict: boolean | null -} - -// ───────────────────────────────────────────────────────────────────────── -// Image Generation -// ───────────────────────────────────────────────────────────────────────── - -export interface ImageGenerationToolConfig { - type: 'image_generation' - background?: 'transparent' | 'opaque' | 'auto' - model?: string - moderation?: 'auto' | 'low' - output_compression?: number - output_format?: 'png' | 'webp' | 'jpeg' - partial_images?: number - quality?: 'low' | 'medium' | 'high' | 'auto' - size?: '1024x1024' | '1024x1536' | '1536x1024' | 'auto' -} - -// ───────────────────────────────────────────────────────────────────────── -// Local Shell -// ───────────────────────────────────────────────────────────────────────── - -export interface LocalShellToolConfig { - type: 'local_shell' -} - -// ───────────────────────────────────────────────────────────────────────── -// MCP -// ───────────────────────────────────────────────────────────────────────── - -export interface MCPToolConfig { - type: 'mcp' - server_label: string - server_description?: string - server_url?: string - connector_id?: string - authorization?: string - headers?: Record | null - require_approval?: unknown - allowed_tools?: unknown -} - -// ───────────────────────────────────────────────────────────────────────── -// Shell (function-shaped shell — distinct from local_shell) -// ───────────────────────────────────────────────────────────────────────── - -export interface ShellToolConfig { - type: 'shell' -} - -// ───────────────────────────────────────────────────────────────────────── -// Web Search (branded) -// ───────────────────────────────────────────────────────────────────────── - -export interface WebSearchToolConfig { - type: 'web_search' - filters?: { allowed_domains?: Array } | null - user_location?: { - type: 'approximate' - city?: string - country?: string - region?: string - timezone?: string - } | null - search_context_size?: 'low' | 'medium' | 'high' -} - -// ───────────────────────────────────────────────────────────────────────── -// Web Search Preview -// ───────────────────────────────────────────────────────────────────────── - -export interface WebSearchPreviewToolConfig { - type: 'web_search_preview' - search_context_size?: 'low' | 'medium' | 'high' - user_location?: { - type: 'approximate' - city?: string - country?: string - region?: string - timezone?: string - } | null -} diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index d0ccb241b..d81885287 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -44,7 +44,7 @@ "adapter" ], "dependencies": { - "@tanstack/ai-openai-compatible": "workspace:*", + "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", "openai": "^6.9.1" }, @@ -58,6 +58,6 @@ "@tanstack/ai-client": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7", - "zod": "^4.2.0" + "zod": "^4.3.0" } } diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 6e37bd0fd..a17b95267 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,5 +1,5 @@ import OpenAI from 'openai' -import { OpenAICompatibleResponsesTextAdapter } from '@tanstack/ai-openai-compatible' +import { OpenAIBaseResponsesTextAdapter } from '@tanstack/openai-base' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' import { getOpenAIApiKeyFromEnv } from '../utils/client' @@ -10,14 +10,7 @@ import type { OpenAIChatModelToolCapabilitiesByName, OpenAIModelInputModalitiesByName, } from '../model-meta' -import type { - ResponseCreateParams, - ResponseCreateParamsNonStreaming, - ResponseCreateParamsStreaming, - ResponseStreamEvent, - ResponsesResponse, -} from '@tanstack/ai-openai-compatible' -import type OpenAI_SDK from 'openai' +import type { ResponseCreateParams } from 'openai/resources/responses/responses' import type { Modality, TextOptions } from '@tanstack/ai' import type { ExternalTextProviderOptions, @@ -75,9 +68,12 @@ type ResolveToolCapabilities = * OpenAI Text (Chat) Adapter * * Tree-shakeable adapter for OpenAI chat/text completion functionality. - * Delegates implementation to {@link OpenAICompatibleResponsesTextAdapter} from - * `@tanstack/ai-openai-compatible` and threads OpenAI-specific tool-capability typing - * through the 5th generic of the base class. + * Delegates implementation to {@link OpenAIBaseResponsesTextAdapter} from + * `@tanstack/openai-base`. The base calls `openai.responses.create` + * directly; this subclass just hands it a configured client and overrides + * `mapOptionsToRequest` to route through OpenAI's full tool converter + * (supporting file_search, web_search, etc.) and to apply provider option + * validation. */ export class OpenAITextAdapter< TModel extends OpenAIChatModel, @@ -86,7 +82,7 @@ export class OpenAITextAdapter< ResolveInputModalities, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends OpenAICompatibleResponsesTextAdapter< +> extends OpenAIBaseResponsesTextAdapter< TModel, TProviderOptions, TInputModalities, @@ -96,39 +92,8 @@ export class OpenAITextAdapter< readonly kind = 'text' as const readonly name = 'openai' as const - protected client: OpenAI - constructor(config: OpenAITextConfig, model: TModel) { - super(model, 'openai') - this.client = new OpenAI(config) - } - - // The override signatures use the local protocol types from - // `@tanstack/ai-openai-compatible` so we stay variance-compatible with the - // base. Inside the body we still call the openai SDK; casting at the SDK - // boundary (where we already own the runtime contract) is the cleanest - // place to land the two-type-name reality. - - protected async callResponse( - params: ResponseCreateParamsNonStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - const response = await this.client.responses.create( - params as unknown as OpenAI_SDK.Responses.ResponseCreateParamsNonStreaming, - requestOptions, - ) - return response as unknown as ResponsesResponse - } - - protected async callResponseStream( - params: ResponseCreateParamsStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - const stream = await this.client.responses.create( - params as unknown as OpenAI_SDK.Responses.ResponseCreateParamsStreaming, - requestOptions, - ) - return stream as unknown as AsyncIterable + super(model, 'openai', new OpenAI(config)) } /** diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index 7b009237d..133f204f9 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -137,7 +137,7 @@ export class OpenAIVideoAdapter< getContent?: (id: string) => Promise download?: (id: string) => Promise } { - return (this.client as unknown as { videos: any }).videos + return (this.client as { videos: any }).videos } async getVideoStatus(jobId: string): Promise { diff --git a/packages/typescript/ai-openai/src/text/text-provider-options.ts b/packages/typescript/ai-openai/src/text/text-provider-options.ts index 5df05d315..e3e8be740 100644 --- a/packages/typescript/ai-openai/src/text/text-provider-options.ts +++ b/packages/typescript/ai-openai/src/text/text-provider-options.ts @@ -1,5 +1,5 @@ -import type { ResponseInput } from '@tanstack/ai-openai-compatible' import type OpenAI from 'openai' +import type { ResponseInput } from 'openai/resources/responses/responses' import type { ApplyPatchTool } from '../tools/apply-patch-tool' import type { CodeInterpreterTool } from '../tools/code-interpreter-tool' import type { ComputerUseTool } from '../tools/computer-use-tool' diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index 0cf7ee8c8..ab4ed63df 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,11 +1,11 @@ -import { applyPatchTool as baseApplyPatchTool } from '@tanstack/ai-openai-compatible' +import { applyPatchTool as baseApplyPatchTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' export { type ApplyPatchToolConfig, type ApplyPatchTool, convertApplyPatchToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIApplyPatchTool = ProviderTool<'openai', 'apply_patch'> diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index f4d0cd5f0..52c43d89f 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,12 +1,12 @@ -import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/ai-openai-compatible' +import { codeInterpreterTool as baseCodeInterpreterTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { CodeInterpreterToolConfig } from '@tanstack/ai-openai-compatible' +import type { CodeInterpreterToolConfig } from '@tanstack/openai-base' export { type CodeInterpreterToolConfig, type CodeInterpreterTool, convertCodeInterpreterToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAICodeInterpreterTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index 9035d698c..8226c7acd 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,12 +1,12 @@ -import { computerUseTool as baseComputerUseTool } from '@tanstack/ai-openai-compatible' +import { computerUseTool as baseComputerUseTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { ComputerUseToolConfig } from '@tanstack/ai-openai-compatible' +import type { ComputerUseToolConfig } from '@tanstack/openai-base' export { type ComputerUseToolConfig, type ComputerUseTool, convertComputerUseToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' // The brand discriminator (`computer_use`) intentionally differs from the // runtime tool name (`computer_use_preview`). The brand matches the model-meta diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index 7067ec552..9d898a897 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -3,4 +3,4 @@ export { type CustomTool, convertCustomToolToAdapterFormat, customTool, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index 84cedb669..c90af1011 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,12 +1,12 @@ -import { fileSearchTool as baseFileSearchTool } from '@tanstack/ai-openai-compatible' +import { fileSearchTool as baseFileSearchTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { FileSearchToolConfig } from '@tanstack/ai-openai-compatible' +import type { FileSearchToolConfig } from '@tanstack/openai-base' export { type FileSearchToolConfig, type FileSearchTool, convertFileSearchToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIFileSearchTool = ProviderTool<'openai', 'file_search'> diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index ae94e03d3..fefd46433 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -2,4 +2,4 @@ export { type FunctionToolConfig, type FunctionTool, convertFunctionToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index 47e92d5a9..d621889f9 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,12 +1,12 @@ -import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/ai-openai-compatible' +import { imageGenerationTool as baseImageGenerationTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { ImageGenerationToolConfig } from '@tanstack/ai-openai-compatible' +import type { ImageGenerationToolConfig } from '@tanstack/openai-base' export { type ImageGenerationToolConfig, type ImageGenerationTool, convertImageGenerationToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIImageGenerationTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/index.ts b/packages/typescript/ai-openai/src/tools/index.ts index e9ffb399f..7eff9fc69 100644 --- a/packages/typescript/ai-openai/src/tools/index.ts +++ b/packages/typescript/ai-openai/src/tools/index.ts @@ -1,4 +1,4 @@ -export { type OpenAITool } from '@tanstack/ai-openai-compatible' +export { type OpenAITool } from '@tanstack/openai-base' export { applyPatchTool, diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index 140f7ae1b..f49850b84 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,11 +1,11 @@ -import { localShellTool as baseLocalShellTool } from '@tanstack/ai-openai-compatible' +import { localShellTool as baseLocalShellTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' export { type LocalShellToolConfig, type LocalShellTool, convertLocalShellToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAILocalShellTool = ProviderTool<'openai', 'local_shell'> diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index aebe919fb..73c6b95b7 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,13 +1,13 @@ -import { mcpTool as baseMcpTool } from '@tanstack/ai-openai-compatible' +import { mcpTool as baseMcpTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { MCPToolConfig } from '@tanstack/ai-openai-compatible' +import type { MCPToolConfig } from '@tanstack/openai-base' export { type MCPToolConfig, type MCPTool, validateMCPtool, convertMCPToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIMCPTool = ProviderTool<'openai', 'mcp'> diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 1ca64208c..9f48503a4 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,11 +1,11 @@ -import { shellTool as baseShellTool } from '@tanstack/ai-openai-compatible' +import { shellTool as baseShellTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' export { type ShellToolConfig, type ShellTool, convertShellToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIShellTool = ProviderTool<'openai', 'shell'> diff --git a/packages/typescript/ai-openai/src/tools/tool-choice.ts b/packages/typescript/ai-openai/src/tools/tool-choice.ts index 04e2e7bb8..99df1824f 100644 --- a/packages/typescript/ai-openai/src/tools/tool-choice.ts +++ b/packages/typescript/ai-openai/src/tools/tool-choice.ts @@ -1 +1 @@ -export { type ToolChoice } from '@tanstack/ai-openai-compatible' +export { type ToolChoice } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/tool-converter.ts b/packages/typescript/ai-openai/src/tools/tool-converter.ts index acc35f22f..3d78a1b18 100644 --- a/packages/typescript/ai-openai/src/tools/tool-converter.ts +++ b/packages/typescript/ai-openai/src/tools/tool-converter.ts @@ -1 +1 @@ -export { convertToolsToProviderFormat } from '@tanstack/ai-openai-compatible' +export { convertToolsToProviderFormat } from '@tanstack/openai-base' diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index 0fb9e4ff3..b822bafbf 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,12 +1,12 @@ -import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/ai-openai-compatible' +import { webSearchPreviewTool as baseWebSearchPreviewTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchPreviewToolConfig } from '@tanstack/ai-openai-compatible' +import type { WebSearchPreviewToolConfig } from '@tanstack/openai-base' export { type WebSearchPreviewToolConfig, type WebSearchPreviewTool, convertWebSearchPreviewToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIWebSearchPreviewTool = ProviderTool< 'openai', diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index a0e42fb66..bdb39c944 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,12 +1,12 @@ -import { webSearchTool as baseWebSearchTool } from '@tanstack/ai-openai-compatible' +import { webSearchTool as baseWebSearchTool } from '@tanstack/openai-base' import type { ProviderTool } from '@tanstack/ai' -import type { WebSearchToolConfig } from '@tanstack/ai-openai-compatible' +import type { WebSearchToolConfig } from '@tanstack/openai-base' export { type WebSearchToolConfig, type WebSearchTool, convertWebSearchToolToAdapterFormat, -} from '@tanstack/ai-openai-compatible' +} from '@tanstack/openai-base' export type OpenAIWebSearchTool = ProviderTool<'openai', 'web_search'> diff --git a/packages/typescript/ai-openai/src/utils/schema-converter.ts b/packages/typescript/ai-openai/src/utils/schema-converter.ts index d85cc81d0..fb9ee165e 100644 --- a/packages/typescript/ai-openai/src/utils/schema-converter.ts +++ b/packages/typescript/ai-openai/src/utils/schema-converter.ts @@ -1,5 +1,5 @@ import { transformNullsToUndefined } from '@tanstack/ai-utils' -import { makeStructuredOutputCompatible } from '@tanstack/ai-openai-compatible' +import { makeStructuredOutputCompatible } from '@tanstack/openai-base' export { transformNullsToUndefined } diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 87faf4e9a..892a80cbd 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -44,7 +44,6 @@ ], "dependencies": { "@openrouter/sdk": "0.12.14", - "@tanstack/ai-openai-compatible": "workspace:*", "@tanstack/ai-utils": "workspace:*" }, "devDependencies": { diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index cbef702a0..1e791747e 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -1,26 +1,32 @@ import { OpenRouter } from '@openrouter/sdk' -import { - OpenAICompatibleResponsesTextAdapter, - convertFunctionToolToResponsesFormat, -} from '@tanstack/ai-openai-compatible' +import { EventType } from '@tanstack/ai' +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' +import { extractRequestOptions } from '../internal/request-options' +import { makeStructuredOutputCompatible } from '../internal/schema-converter' +import { convertFunctionToolToResponsesFormat } from '../internal/responses-tool-converter' import { isWebSearchTool } from '../tools/web-search-tool' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' +import type { ResponsesFunctionTool } from '../internal/responses-tool-converter' import type { InputsUnion, + OpenResponsesResult, ResponsesRequest, StreamEvents, } from '@openrouter/sdk/models' import type { - ResponseCreateParams, - ResponseCreateParamsNonStreaming, - ResponseCreateParamsStreaming, - ResponseInputContent, - ResponseStreamEvent, - ResponsesFunctionTool, - ResponsesResponse, -} from '@tanstack/ai-openai-compatible' -import type { ContentPart, ModelMessage, TextOptions, Tool } from '@tanstack/ai' + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, + Tool, +} from '@tanstack/ai' import type { ExternalResponsesProviderOptions } from '../text/responses-provider-options' import type { OPENROUTER_CHAT_MODELS, @@ -31,9 +37,11 @@ import type { OpenRouterMessageMetadataByModality } from '../message-types' /** Element type of `ResponsesRequest.input` when it's the array form (the * SDK union also allows a bare string). Pinning to the array element lets - * the convertMessagesToInput override narrow to the per-item discriminated + * the convertMessagesToInput logic narrow to the per-item discriminated * union so a TS rename surfaces here. */ type InputsItem = Extract>[number] +/** ResponsesRequest input content part shape (per-content-part discriminated union). */ +type ResponsesInputContent = unknown export interface OpenRouterResponsesConfig extends SDKOptions {} export type OpenRouterResponsesTextModels = @@ -52,36 +60,16 @@ type ResolveToolCapabilities = : readonly [] /** - * OpenRouter Responses (beta) Adapter. - * - * Why this extends `OpenAICompatibleResponsesTextAdapter` from - * `@tanstack/ai-openai-compatible`: - * - * OpenRouter's `/v1/responses` (beta) endpoint accepts OpenAI's Responses - * wire format and fans out to any underlying model — including Anthropic - * Claude and Google Gemini, neither of which has a native Responses - * endpoint. That makes Responses a multi-vendor protocol from OpenRouter's - * perspective, not an OpenAI-only product, and the shared compatible base - * is the right place for the streaming event lifecycle, structured-output - * flow, tool-call accumulator, and RUN_ERROR taxonomy that any Responses - * implementer needs. If we duplicated that here we'd ship the same ~1.2k - * LOC in OpenRouter and OpenAI separately and have to keep them in sync. - * - * What's different about OpenRouter (and why we still need overrides): + * OpenRouter Responses (beta) Adapter — standalone implementation that talks + * to OpenRouter's `/v1/responses` (beta) endpoint via the `@openrouter/sdk` + * SDK. * - * The wire format is OpenAI-Responses-compatible, but the `@openrouter/sdk` - * SDK exposes a different call shape — `client.beta.responses.send - * ({ responsesRequest })` with camelCase fields. We override the two - * SDK-call hooks (`callResponse` / `callResponseStream`) to bridge that, - * plus chunk and result shape adapters on the way back. - * - * Behaviour preserved from the chat-completions migration: - * - Provider routing surface (`provider`, `models`, `plugins`, - * `variant`) passes through `modelOptions`. - * - App attribution headers (`httpReferer`, `appTitle`) and base URL - * overrides flow through the SDK `SDKOptions` constructor. - * - Model variant suffixing (e.g. `:thinking`, `:free`) via - * `modelOptions.variant`. + * The wire format is OpenAI-Responses-compatible (so OpenRouter can route + * Responses requests to GPT, Claude, Gemini, etc.) but the SDK exposes the + * request/response in camelCase TS shapes (`callId`, `imageUrl`, + * `fileData`, `outputIndex`, `itemId`, `inputTokens`, `incompleteDetails`, + * etc.). This adapter operates directly in those camelCase shapes — there's + * no snake_case ↔ camelCase round-trip. * * v1 routes function tools only. Passing a `webSearchTool()` brand throws * — OpenRouter's Responses API exposes richer server-tool variants @@ -92,7 +80,7 @@ export class OpenRouterResponsesTextAdapter< TModel extends OpenRouterResponsesTextModels, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends OpenAICompatibleResponsesTextAdapter< +> extends BaseTextAdapter< TModel, OpenRouterResponsesTextProviderOptions, ResolveInputModalities, @@ -105,80 +93,1027 @@ export class OpenRouterResponsesTextAdapter< protected orClient: OpenRouter constructor(config: OpenRouterResponsesConfig, model: TModel) { - super(model, 'openrouter-responses') + super({}, model) this.orClient = new OpenRouter(config) } + async *chatStream( + options: TextOptions, + ): AsyncIterable { + // Track tool call metadata by unique ID. The Responses API streams tool + // calls with deltas — first chunk has ID/name, subsequent chunks only + // have args. We assign our own indices as we encounter unique ids. + const toolCallMetadata = new Map< + string, + { + index: number + name: string + started: boolean + ended?: boolean + pendingArguments?: string + } + >() + + // AG-UI lifecycle tracking + const aguiState = { + runId: generateId(this.name), + threadId: options.threadId ?? generateId(this.name), + messageId: generateId(this.name), + hasEmittedRunStarted: false, + } + + try { + // mapOptionsToRequest can throw on caller-side validation failures + // (empty user content, unsupported parts, webSearchTool() rejection). + // Keep it inside the try so those failures surface as RUN_ERROR events + // instead of iterator throws. + const responsesRequest = this.mapOptionsToRequest(options) + options.logger.request( + `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: this.name, model: this.model }, + ) + const reqOptions = extractRequestOptions(options.request) + const response = (await this.orClient.beta.responses.send( + { responsesRequest: { ...responsesRequest, stream: true } }, + { + signal: reqOptions.signal ?? undefined, + ...(reqOptions.headers && { headers: reqOptions.headers }), + }, + )) as AsyncIterable + + yield* this.processStreamChunks( + response, + toolCallMetadata, + options, + aguiState, + ) + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + const errorPayload = toRunErrorPayload( + error, + `${this.name}.chatStream failed`, + ) + + // Emit RUN_STARTED if not yet emitted + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: EventType.RUN_STARTED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + yield { + type: EventType.RUN_ERROR, + model: options.model, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, + error: errorPayload, + } satisfies StreamChunk + + options.logger.errors(`${this.name}.chatStream fatal`, { + error: errorPayload, + source: `${this.name}.chatStream`, + }) + } + } + /** - * Preserve nulls in structured-output results. OpenRouter routes through - * a wide variety of upstream providers; some of them return `null` as a - * distinct sentinel ("the field exists, the value is null") rather than - * collapsing it to absent. Stripping nulls here would erase that - * distinction. Mirrors the chat-completions adapter override. + * Generate structured output via OpenRouter's Responses API + * `text.format: { type: 'json_schema', ... }`. Uses stream: false. */ - protected override transformStructuredOutput(parsed: unknown): unknown { + async structuredOutput( + options: StructuredOutputOptions, + ): Promise> { + const { chatOptions, outputSchema } = options + const responsesRequest = this.mapOptionsToRequest(chatOptions) + + const jsonSchema = this.makeStructuredOutputCompatible( + outputSchema, + outputSchema.required, + ) + + try { + chatOptions.logger.request( + `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, + { provider: this.name, model: this.model }, + ) + const reqOptions = extractRequestOptions(chatOptions.request) + const response = await this.orClient.beta.responses.send( + { + responsesRequest: { + ...responsesRequest, + stream: false, + text: { + format: { + type: 'json_schema', + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + } as ResponsesRequest['text'], + }, + }, + { + signal: reqOptions.signal ?? undefined, + ...(reqOptions.headers && { headers: reqOptions.headers }), + }, + ) + + const rawText = this.extractTextFromResponse(response) + + if (rawText.length === 0) { + throw new Error( + `${this.name}.structuredOutput: response contained no content`, + ) + } + + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // OpenRouter override: pass nulls through unchanged. + const transformed = this.transformStructuredOutput(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + chatOptions.logger.errors(`${this.name}.structuredOutput fatal`, { + error: toRunErrorPayload(error, `${this.name}.structuredOutput failed`), + source: `${this.name}.structuredOutput`, + }) + throw error + } + } + + protected makeStructuredOutputCompatible( + schema: Record, + originalRequired?: Array, + ): Record { + return makeStructuredOutputCompatible(schema, originalRequired) + } + + /** + * OpenRouter routes through a wide variety of upstream providers; some + * return `null` as a distinct sentinel rather than collapsing it to absent. + * Stripping nulls would erase that distinction, so we passthrough. + * + * `transformNullsToUndefined` is imported for parity with the other + * provider adapters but intentionally not invoked here. + */ + protected transformStructuredOutput(parsed: unknown): unknown { + void transformNullsToUndefined return parsed } - // ──────────────────────────────────────────────────────────────────────── - // SDK call hooks — the params we get here were built by our overridden - // mapOptionsToRequest / convertMessagesToInput / convertContentPartToInput - // already in OpenRouter's camelCase TS shape, so only a type cast bridges - // the base's static snake_case signature. The inbound result/stream still - // needs camel → snake reshaping because the base's processStreamChunks / - // extractTextFromResponse read documented snake_case fields like - // `response.usage.input_tokens` and `chunk.item_id`. - // ──────────────────────────────────────────────────────────────────────── - - protected override async callResponseStream( - params: ResponseCreateParamsStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - const responsesRequest = params as unknown as Omit< - ResponsesRequest, - 'stream' - > - // The SDK's EventStream is an AsyncIterable; treat it - // structurally so we don't need to depend on the SDK's class export. - const stream = (await this.orClient.beta.responses.send( - { responsesRequest: { ...responsesRequest, stream: true } }, - { - signal: requestOptions.signal ?? undefined, - ...(requestOptions.headers && { headers: requestOptions.headers }), - }, - )) as unknown as AsyncIterable - return adaptOpenRouterResponsesStreamEvents(stream) + /** + * Extract text content from a non-streaming Responses API response. + * Reads OpenRouter's camelCase `OpenResponsesResult` shape directly. + */ + protected extractTextFromResponse(response: OpenResponsesResult): string { + let textContent = '' + let refusal: string | undefined + let sawMessageItem = false + const observedItemTypes = new Set() + + for (const rawItem of response.output) { + const item = rawItem as { type: string; content?: ReadonlyArray } + observedItemTypes.add(item.type) + if (item.type === 'message') { + sawMessageItem = true + for (const part of item.content ?? []) { + // Cast off the discriminated union before the type discrimination + // so future SDK variants (e.g. `output_audio`, `output_image`) hit + // the explicit error path rather than being misreported as refusals + // when they get added to the union. + const partType = (part as { type: string }).type + if (partType === 'output_text') { + textContent += (part as { text?: string }).text ?? '' + } else if (partType === 'refusal') { + const refusalText = (part as { refusal?: string }).refusal + refusal = refusalText || refusal || 'Refused without explanation' + } else { + throw new Error( + `${this.name}.extractTextFromResponse: unsupported message content part type "${partType}"`, + ) + } + } + } + } + + // Surface refusals as an explicit error so callers don't see a generic + // "Failed to parse structured output as JSON. Content: " when the model + // refused for safety / content-policy reasons. + if (!textContent && refusal !== undefined) { + const err = new Error(`Model refused to respond: ${refusal}`) + ;(err as Error & { code?: string }).code = 'refusal' + throw err + } + + // Response had items but none carried message text (e.g. only + // function_call or reasoning items). Surface that explicitly so a + // downstream structured-output caller doesn't see a misleading + // "Failed to parse JSON. Content: " from an empty string. + if (!textContent && response.output.length > 0 && !sawMessageItem) { + throw new Error( + `${this.name}.extractTextFromResponse: response.output contained items of type(s) [${[...observedItemTypes].sort().join(', ')}] but no message text — the model returned a non-text response`, + ) + } + + return textContent } - protected override async callResponse( - params: ResponseCreateParamsNonStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - const responsesRequest = params as unknown as Omit< - ResponsesRequest, - 'stream' - > - const result = await this.orClient.beta.responses.send( - { responsesRequest: { ...responsesRequest, stream: false } }, + /** + * Processes streamed events from the OpenRouter Responses API and yields + * AG-UI events. Reads the SDK's camelCase event shape directly + * (`itemId`, `outputIndex`, `incompleteDetails`, `inputTokens`, etc.). + * + * Speakeasy's discriminated-union parser falls back to + * `{ raw, type: 'UNKNOWN', isUnknown: true }` when an event's strict + * per-variant schema rejects (missing optional fields like `sequenceNumber` + * that some upstreams omit). The `raw` payload is the original wire-shape + * event in snake_case. We translate snake_case keys to camelCase for those + * unknown events so the rest of the processor reads a uniform shape. + */ + protected async *processStreamChunks( + stream: AsyncIterable, + toolCallMetadata: Map< + string, { - signal: requestOptions.signal ?? undefined, - ...(requestOptions.headers && { headers: requestOptions.headers }), - }, - ) - return adaptOpenRouterResponsesResult(result) - } + index: number + name: string + started: boolean + ended?: boolean + pendingArguments?: string + } + >, + options: TextOptions, + aguiState: { + runId: string + threadId: string + messageId: string + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + let accumulatedReasoning = '' + + let hasStreamedContentDeltas = false + let hasStreamedReasoningDeltas = false + + let model: string = options.model + + let stepId: string | null = null + let hasEmittedTextMessageStart = false + let hasEmittedStepStarted = false + let runFinishedEmitted = false + + try { + for await (const rawEvent of stream) { + const chunk = normalizeStreamEvent(rawEvent) + options.logger.provider(`provider=${this.name} type=${chunk.type}`, { + provider: this.name, + type: chunk.type, + }) + + // Emit RUN_STARTED on first chunk + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: EventType.RUN_STARTED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + const handleContentPart = (contentPart: { + type: string + text?: string + refusal?: string + }): StreamChunk => { + if (contentPart.type === 'output_text') { + accumulatedContent += contentPart.text || '' + return { + type: EventType.TEXT_MESSAGE_CONTENT, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + delta: contentPart.text || '', + content: accumulatedContent, + } satisfies StreamChunk + } + + if (contentPart.type === 'reasoning_text') { + accumulatedReasoning += contentPart.text || '' + // Cache the fallback stepId rather than generating a fresh one + // on every call. + if (!stepId) { + stepId = generateId(this.name) + } + return { + type: EventType.STEP_FINISHED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + delta: contentPart.text || '', + content: accumulatedReasoning, + } satisfies StreamChunk + } + // Either a real refusal or an unknown content_part type. Surface + // the part type in the error so unknown parts are debuggable + // instead of being misreported as "Unknown refusal". + const isRefusal = contentPart.type === 'refusal' + const message = isRefusal + ? contentPart.refusal || 'Refused without explanation' + : `Unsupported response content_part type: ${contentPart.type}` + const code = isRefusal ? 'refusal' : contentPart.type + return { + type: EventType.RUN_ERROR, + model: model || options.model, + timestamp: Date.now(), + message, + code, + error: { message, code }, + } satisfies StreamChunk + } + + // Capture model metadata from any of these events. + if ( + chunk.type === 'response.created' || + chunk.type === 'response.in_progress' || + chunk.type === 'response.incomplete' || + chunk.type === 'response.failed' + ) { + const r = chunk.response as { model?: string } | undefined + if (r?.model) model = r.model + } + + // response.created marks the start of a fresh run — safe to reset + // the per-run accumulators here. + if (chunk.type === 'response.created') { + hasStreamedContentDeltas = false + hasStreamedReasoningDeltas = false + hasEmittedTextMessageStart = false + hasEmittedStepStarted = false + accumulatedContent = '' + accumulatedReasoning = '' + } + + // response.failed and response.incomplete are TERMINAL events. + if ( + chunk.type === 'response.failed' || + chunk.type === 'response.incomplete' + ) { + if (hasEmittedTextMessageStart) { + yield { + type: EventType.TEXT_MESSAGE_END, + messageId: aguiState.messageId, + model, + timestamp: Date.now(), + } satisfies StreamChunk + hasEmittedTextMessageStart = false + } + const r = (chunk.response ?? {}) as { + error?: { message?: string; code?: unknown } | null + incompleteDetails?: { reason?: string } | null + } + const errorMessage = + r.error?.message || + r.incompleteDetails?.reason || + (chunk.type === 'response.failed' + ? 'Response failed' + : 'Response ended incomplete') + const errorCode = + normalizeCode(r.error?.code) ?? + (r.incompleteDetails ? 'incomplete' : undefined) ?? + undefined + yield { + type: EventType.RUN_ERROR, + model, + timestamp: Date.now(), + message: errorMessage, + ...(errorCode !== undefined && { code: errorCode }), + error: { + message: errorMessage, + ...(errorCode !== undefined && { code: errorCode }), + }, + } satisfies StreamChunk + runFinishedEmitted = true + return + } + + // Handle output text deltas (token-by-token streaming) + if (chunk.type === 'response.output_text.delta' && chunk.delta) { + const textDelta = Array.isArray(chunk.delta) + ? chunk.delta.join('') + : typeof chunk.delta === 'string' + ? chunk.delta + : '' + + if (textDelta) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: EventType.TEXT_MESSAGE_START, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + role: 'assistant', + } satisfies StreamChunk + } + + accumulatedContent += textDelta + hasStreamedContentDeltas = true + yield { + type: EventType.TEXT_MESSAGE_CONTENT, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + delta: textDelta, + content: accumulatedContent, + } satisfies StreamChunk + } + } + + // Handle reasoning deltas + if (chunk.type === 'response.reasoning_text.delta' && chunk.delta) { + const reasoningDelta = Array.isArray(chunk.delta) + ? chunk.delta.join('') + : typeof chunk.delta === 'string' + ? chunk.delta + : '' + + if (reasoningDelta) { + if (!hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + + accumulatedReasoning += reasoningDelta + hasStreamedReasoningDeltas = true + const fallbackStepId = stepId || generateId(this.name) + yield { + type: EventType.STEP_FINISHED, + stepName: fallbackStepId, + stepId: fallbackStepId, + model: model || options.model, + timestamp: Date.now(), + delta: reasoningDelta, + content: accumulatedReasoning, + } satisfies StreamChunk + } + } - // ──────────────────────────────────────────────────────────────────────── - // Request construction — emit OpenRouter's camelCase TS shape directly so - // a `Pick` annotation catches any field-name drift at - // compile time. Returned via `unknown as Omit` - // because the base's signature is the OpenAI snake_case type; the SDK call - // hooks above just pass the value through. - // ──────────────────────────────────────────────────────────────────────── + // Handle reasoning summary deltas + if ( + chunk.type === 'response.reasoning_summary_text.delta' && + chunk.delta + ) { + const summaryDelta = + typeof chunk.delta === 'string' ? chunk.delta : '' - protected override mapOptionsToRequest( + if (summaryDelta) { + if (!hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + + accumulatedReasoning += summaryDelta + hasStreamedReasoningDeltas = true + const fallbackStepId = stepId || generateId(this.name) + yield { + type: EventType.STEP_FINISHED, + stepName: fallbackStepId, + stepId: fallbackStepId, + model: model || options.model, + timestamp: Date.now(), + delta: summaryDelta, + content: accumulatedReasoning, + } satisfies StreamChunk + } + } + + // handle content_part added events for text, reasoning and refusals + if (chunk.type === 'response.content_part.added') { + const contentPart = chunk.part as { + type: string + text?: string + refusal?: string + } + if ( + contentPart.type === 'output_text' && + !hasEmittedTextMessageStart + ) { + hasEmittedTextMessageStart = true + yield { + type: EventType.TEXT_MESSAGE_START, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + role: 'assistant', + } satisfies StreamChunk + } + if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + if (contentPart.type === 'output_text') { + hasStreamedContentDeltas = true + } else if (contentPart.type === 'reasoning_text') { + hasStreamedReasoningDeltas = true + } + const partChunk = handleContentPart(contentPart) + yield partChunk + if (partChunk.type === 'RUN_ERROR') { + runFinishedEmitted = true + return + } + } + + if (chunk.type === 'response.content_part.done') { + const contentPart = chunk.part as { + type: string + text?: string + refusal?: string + } + + // Skip emitting chunks for content parts that we've already streamed via deltas + if (contentPart.type === 'output_text' && hasStreamedContentDeltas) { + continue + } + if ( + contentPart.type === 'reasoning_text' && + hasStreamedReasoningDeltas + ) { + continue + } + + // Upstreams that emit `content_part.done` without any preceding + // deltas (or `content_part.added`) still need a START event before + // CONTENT. + if ( + contentPart.type === 'output_text' && + !hasEmittedTextMessageStart + ) { + hasEmittedTextMessageStart = true + yield { + type: EventType.TEXT_MESSAGE_START, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + role: 'assistant', + } satisfies StreamChunk + } else if ( + contentPart.type === 'reasoning_text' && + !hasEmittedStepStarted + ) { + hasEmittedStepStarted = true + stepId = generateId(this.name) + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + + const doneChunk = handleContentPart(contentPart) + yield doneChunk + if (doneChunk.type === 'RUN_ERROR') { + runFinishedEmitted = true + return + } + } + + // handle output_item.added to capture function call metadata (name) + if (chunk.type === 'response.output_item.added') { + const item = chunk.item as { + type: string + id?: string + name?: string + } + if (item.type === 'function_call' && item.id) { + const existing = toolCallMetadata.get(item.id) + if (!existing) { + toolCallMetadata.set(item.id, { + index: chunk.outputIndex ?? 0, + name: item.name || '', + started: false, + }) + } else if (!existing.name && item.name) { + existing.name = item.name + } + const metadata = toolCallMetadata.get(item.id)! + if (!metadata.started && metadata.name) { + yield { + type: EventType.TOOL_CALL_START, + toolCallId: item.id, + toolCallName: metadata.name, + toolName: metadata.name, + model: model || options.model, + timestamp: Date.now(), + index: chunk.outputIndex ?? 0, + } satisfies StreamChunk + metadata.started = true + } + } + } + + // Handle function call arguments delta (streaming). + if ( + chunk.type === 'response.function_call_arguments.delta' && + chunk.delta + ) { + const itemId = chunk.itemId ?? '' + const metadata = toolCallMetadata.get(itemId) + if (!metadata?.started) { + options.logger.errors( + `${this.name}.processStreamChunks orphan function_call_arguments.delta`, + { + source: `${this.name}.processStreamChunks`, + toolCallId: itemId, + rawDelta: chunk.delta, + }, + ) + continue + } + yield { + type: EventType.TOOL_CALL_ARGS, + toolCallId: itemId, + model: model || options.model, + timestamp: Date.now(), + delta: typeof chunk.delta === 'string' ? chunk.delta : '', + } satisfies StreamChunk + } + + if (chunk.type === 'response.function_call_arguments.done') { + const itemId = chunk.itemId ?? '' + + const metadata = toolCallMetadata.get(itemId) + if (!metadata?.started) { + if (metadata) { + metadata.pendingArguments = chunk.arguments + } + options.logger.errors( + `${this.name}.processStreamChunks deferring function_call_arguments.done — TOOL_CALL_START not yet emitted (waiting for name)`, + { + source: `${this.name}.processStreamChunks`, + toolCallId: itemId, + rawArguments: chunk.arguments, + }, + ) + continue + } + if (metadata.ended) continue + const name = metadata.name || '' + metadata.ended = true + + let parsedInput: unknown = {} + if (chunk.arguments) { + try { + const parsed = JSON.parse(chunk.arguments) + parsedInput = parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed`, + { + error: toRunErrorPayload( + parseError, + `tool ${name} (${itemId}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: itemId, + toolName: name, + rawArguments: chunk.arguments, + }, + ) + parsedInput = {} + } + } + + yield { + type: EventType.TOOL_CALL_END, + toolCallId: itemId, + toolCallName: name, + toolName: name, + model: model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + } + + // `output_item.done` is the last point at which a function_call's + // name is guaranteed to be on the wire. + if (chunk.type === 'response.output_item.done') { + const item = chunk.item as { + type: string + id?: string + name?: string + arguments?: string + } + if (item.type === 'function_call' && item.id) { + const metadata = toolCallMetadata.get(item.id) ?? { + index: chunk.outputIndex ?? 0, + name: item.name || '', + started: false, + } + if (!toolCallMetadata.has(item.id)) { + toolCallMetadata.set(item.id, metadata) + } else if (!metadata.name && item.name) { + metadata.name = item.name + } + if (!metadata.started && metadata.name) { + yield { + type: EventType.TOOL_CALL_START, + toolCallId: item.id, + toolCallName: metadata.name, + toolName: metadata.name, + model: model || options.model, + timestamp: Date.now(), + index: metadata.index, + } satisfies StreamChunk + metadata.started = true + } + const rawArgs = + typeof item.arguments === 'string' && item.arguments.length > 0 + ? item.arguments + : metadata.pendingArguments + if (metadata.started && !metadata.ended && rawArgs !== undefined) { + const name = metadata.name || '' + let parsedInput: unknown = {} + if (rawArgs) { + try { + const parsed = JSON.parse(rawArgs) + parsedInput = + parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (output_item.done backfill)`, + { + error: toRunErrorPayload( + parseError, + `tool ${name} (${item.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: item.id, + toolName: name, + rawArguments: rawArgs, + }, + ) + parsedInput = {} + } + } + yield { + type: EventType.TOOL_CALL_END, + toolCallId: item.id, + toolCallName: name, + toolName: name, + model: model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + metadata.ended = true + metadata.pendingArguments = undefined + } + } + } + + if (chunk.type === 'response.completed') { + const responseObj = (chunk.response ?? {}) as { + output?: ReadonlyArray + usage?: { + inputTokens?: number + outputTokens?: number + totalTokens?: number + } | null + incompleteDetails?: { reason?: string } | null + } + const outputItems = Array.isArray(responseObj.output) + ? responseObj.output + : [] + + // Final backstop for function_call lifecycle. + for (const rawItem of outputItems) { + const item = rawItem as { + type?: string + id?: string + name?: string + arguments?: string + } + if (item.type !== 'function_call' || !item.id) continue + const metadata = toolCallMetadata.get(item.id) ?? { + index: 0, + name: item.name || '', + started: false, + } + if (!toolCallMetadata.has(item.id)) { + toolCallMetadata.set(item.id, metadata) + } else if (!metadata.name && item.name) { + metadata.name = item.name + } + if (!metadata.started && metadata.name) { + yield { + type: EventType.TOOL_CALL_START, + toolCallId: item.id, + toolCallName: metadata.name, + toolName: metadata.name, + model: model || options.model, + timestamp: Date.now(), + index: metadata.index, + } satisfies StreamChunk + metadata.started = true + } + const rawArgs = + typeof item.arguments === 'string' && item.arguments.length > 0 + ? item.arguments + : metadata.pendingArguments + if (metadata.started && !metadata.ended) { + const name = metadata.name || '' + let parsedInput: unknown = {} + if (rawArgs) { + try { + const parsed = JSON.parse(rawArgs) + parsedInput = + parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (response.completed backfill)`, + { + error: toRunErrorPayload( + parseError, + `tool ${name} (${item.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: item.id, + toolName: name, + rawArguments: rawArgs, + }, + ) + parsedInput = {} + } + } + yield { + type: EventType.TOOL_CALL_END, + toolCallId: item.id, + toolCallName: name, + toolName: name, + model: model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + metadata.ended = true + metadata.pendingArguments = undefined + } + } + + if (hasEmittedTextMessageStart) { + yield { + type: EventType.TEXT_MESSAGE_END, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + hasEmittedTextMessageStart = false + } + + const hasFunctionCalls = outputItems.some( + (item) => (item as { type?: string }).type === 'function_call', + ) + const incompleteReason = responseObj.incompleteDetails?.reason + const finishReason: + | 'tool_calls' + | 'length' + | 'content_filter' + | 'stop' = hasFunctionCalls + ? 'tool_calls' + : incompleteReason === 'max_output_tokens' + ? 'length' + : incompleteReason === 'content_filter' + ? 'content_filter' + : 'stop' + + yield { + type: EventType.RUN_FINISHED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: model || options.model, + timestamp: Date.now(), + usage: { + promptTokens: responseObj.usage?.inputTokens || 0, + completionTokens: responseObj.usage?.outputTokens || 0, + totalTokens: responseObj.usage?.totalTokens || 0, + }, + finishReason, + } satisfies StreamChunk + runFinishedEmitted = true + } + + if (chunk.type === 'error') { + const code = normalizeCode(chunk.code) + yield { + type: EventType.RUN_ERROR, + model: model || options.model, + timestamp: Date.now(), + message: chunk.message ?? '', + ...(code !== undefined && { code }), + error: { + message: chunk.message ?? '', + ...(code !== undefined && { code }), + }, + } satisfies StreamChunk + runFinishedEmitted = true + return + } + } + + // Synthetic terminal RUN_FINISHED if the stream ended without a + // response.completed event. + if (!runFinishedEmitted && aguiState.hasEmittedRunStarted) { + if (hasEmittedTextMessageStart) { + yield { + type: EventType.TEXT_MESSAGE_END, + messageId: aguiState.messageId, + model: model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + yield { + type: EventType.RUN_FINISHED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: model || options.model, + timestamp: Date.now(), + usage: undefined, + finishReason: toolCallMetadata.size > 0 ? 'tool_calls' : 'stop', + } satisfies StreamChunk + } + } catch (error: unknown) { + const errorPayload = toRunErrorPayload( + error, + `${this.name}.processStreamChunks failed`, + ) + options.logger.errors(`${this.name}.processStreamChunks fatal`, { + error: errorPayload, + source: `${this.name}.processStreamChunks`, + }) + yield { + type: EventType.RUN_ERROR, + model: options.model, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, + error: errorPayload, + } satisfies StreamChunk + } + } + + /** + * Build an OpenRouter `ResponsesRequest` (camelCase) from `TextOptions`. + */ + protected mapOptionsToRequest( options: TextOptions, - ): Omit { + ): Omit { // Fail loud on webSearchTool() — v1 only routes function tools. if (options.tools) { for (const tool of options.tools) { @@ -192,7 +1127,6 @@ export class OpenRouterResponsesTextAdapter< } } - // Apply the same modelOptions/variant precedence as the chat adapter. const modelOptions = options.modelOptions as | (Partial & { variant?: string }) | undefined @@ -200,14 +1134,10 @@ export class OpenRouterResponsesTextAdapter< ? `:${modelOptions.variant}` : '' - // The override below returns Array — re-cast through the - // base's documented shape so this local has the type a Pick<…> expects. - const input = this.convertMessagesToInput(options.messages) as unknown as - | ResponsesRequest['input'] - | undefined + const input = this.convertMessagesToInput(options.messages) - // Reuse the ai-openai-compatible function-tool converter. ResponsesFunctionTool - // already matches OpenRouter's ResponsesRequestToolFunction shape: + // ResponsesFunctionTool already matches OpenRouter's + // ResponsesRequestToolFunction shape: // `{ type:'function', name, parameters, description, strict }`. const tools: Array | undefined = options.tools ? options.tools.map((tool) => @@ -218,9 +1148,6 @@ export class OpenRouterResponsesTextAdapter< ) : undefined - // `Pick` is the static gate — if the SDK renames any - // of these keys in a future version this annotation breaks the build - // instead of silently producing a request the wire schema drops. const built: Pick< ResponsesRequest, | 'model' @@ -248,37 +1175,28 @@ export class OpenRouterResponsesTextAdapter< options.systemPrompts.length > 0 && { instructions: options.systemPrompts.join('\n'), }), - input, + input: input as ResponsesRequest['input'], ...(tools && tools.length > 0 && { - tools: tools as unknown as ResponsesRequest['tools'], + tools: tools as ResponsesRequest['tools'], }), } - return built as unknown as Omit + return built } - // ──────────────────────────────────────────────────────────────────────── - // Message + content converters — emit OpenRouter's camelCase TS shape - // (`callId`, `imageUrl`, `inputAudio`, `videoUrl`, `fileData`, `fileUrl`) - // directly. The return-type cast through `unknown` bridges to the base's - // signature without giving up the OpenRouter-shape return inside. - // ──────────────────────────────────────────────────────────────────────── - - protected override convertMessagesToInput( + /** + * Convert a list of ModelMessage to OpenRouter's `InputsUnion` array form. + * Emits camelCase shapes (`callId`, `imageUrl`, `videoUrl`, `fileData`, + * `fileUrl`). + */ + protected convertMessagesToInput( messages: Array, - ): ReturnType< - OpenAICompatibleResponsesTextAdapter['convertMessagesToInput'] - > { + ): Array { const result: Array = [] for (const message of messages) { if (message.role === 'tool') { - // For structured (Array) tool results, extract the text - // content rather than JSON-stringifying the parts — sending the raw - // ContentPart shape (e.g. `[{"type":"text","content":"…"}]`) into the - // `output` field would feed the literal JSON of the parts back to the - // model instead of the tool's textual result. result.push({ type: 'function_call_output', callId: message.toolCallId || '', @@ -286,16 +1204,13 @@ export class OpenRouterResponsesTextAdapter< typeof message.content === 'string' ? message.content : this.extractTextContent(message.content), - } as unknown as InputsItem) + } as InputsItem) continue } if (message.role === 'assistant') { if (message.toolCalls && message.toolCalls.length > 0) { for (const toolCall of message.toolCalls) { - // Stringify object-shaped args to match the SDK's `arguments: - // string` contract — mirrors the chat adapter's fix (see - // commit 0171b18e). const argumentsString = typeof toolCall.function.arguments === 'string' ? toolCall.function.arguments @@ -306,7 +1221,7 @@ export class OpenRouterResponsesTextAdapter< id: toolCall.id, name: toolCall.function.name, arguments: argumentsString, - } as unknown as InputsItem) + } as InputsItem) } } @@ -317,15 +1232,15 @@ export class OpenRouterResponsesTextAdapter< type: 'message', role: 'assistant', content: contentStr, - } as unknown as InputsItem) + } as InputsItem) } } continue } - // user — fail loud on empty / unsupported content (mirrors the base). + // user — fail loud on empty / unsupported content. const contentParts = this.normalizeContent(message.content) - const inputContent: Array = [] + const inputContent: Array = [] for (const part of contentParts) { inputContent.push(this.convertContentPartToInput(part)) } @@ -340,23 +1255,19 @@ export class OpenRouterResponsesTextAdapter< type: 'message', role: 'user', content: inputContent, - } as unknown as InputsItem) + } as InputsItem) } - return result as unknown as ReturnType< - OpenAICompatibleResponsesTextAdapter['convertMessagesToInput'] - > + return result } - protected override convertContentPartToInput( - part: ContentPart, - ): ResponseInputContent { + protected convertContentPartToInput(part: ContentPart): ResponsesInputContent { switch (part.type) { case 'text': return { type: 'input_text', text: part.content, - } as ResponseInputContent + } case 'image': { const meta = part.metadata as | { detail?: 'auto' | 'low' | 'high' } @@ -370,7 +1281,7 @@ export class OpenRouterResponsesTextAdapter< type: 'input_image', imageUrl, detail: meta?.detail || 'auto', - } as unknown as ResponseInputContent + } } case 'audio': { if (part.source.type === 'url') { @@ -380,24 +1291,24 @@ export class OpenRouterResponsesTextAdapter< return { type: 'input_file', fileUrl: part.source.value, - } as unknown as ResponseInputContent + } } return { type: 'input_audio', inputAudio: { data: part.source.value, format: 'mp3' }, - } as unknown as ResponseInputContent + } } case 'video': return { type: 'input_video', videoUrl: part.source.value, - } as unknown as ResponseInputContent + } case 'document': { if (part.source.type === 'url') { return { type: 'input_file', fileUrl: part.source.value, - } as unknown as ResponseInputContent + } } const mime = part.source.mimeType || 'application/octet-stream' const data = part.source.value.startsWith('data:') @@ -406,7 +1317,7 @@ export class OpenRouterResponsesTextAdapter< return { type: 'input_file', fileData: data, - } as unknown as ResponseInputContent + } } default: throw new Error( @@ -414,218 +1325,153 @@ export class OpenRouterResponsesTextAdapter< ) } } + + protected normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + protected extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } } -// ────────────────────────────────────────────────────────────────────────── -// Inbound stream-event bridge: OpenRouter SDK camelCase → OpenAI snake_case -// so the base's `processStreamChunks` reads documented fields unchanged. -// (Outbound conversion is no longer needed — the adapter overrides above -// emit OpenRouter camelCase directly.) -// ────────────────────────────────────────────────────────────────────────── +/** + * Normalised event shape we read off each OpenRouter SDK stream event after + * camel-case translation. Models the loose superset of fields we consult + * across all event-type branches; specific branches narrow further inline. + */ +interface NormalizedStreamEvent { + type: string + itemId?: string + outputIndex?: number + contentIndex?: number + delta?: string | Array + text?: string + arguments?: string + message?: string + code?: unknown + param?: string | null + sequenceNumber?: number + response?: unknown + item?: unknown + part?: unknown +} /** - * Adapt OpenRouter's streaming events (camelCase, with extended event types) - * into the OpenAI Responses event shape the base's `processStreamChunks` - * reads. Reshapes the nested `response` payload for terminal events - * (`response.completed`, `response.failed`, `response.incomplete`, - * `response.created`) into snake_case so reads like - * `chunk.response.incomplete_details?.reason` and - * `chunk.response.usage.input_tokens` work unchanged. + * Translate the SDK's discriminated-union event into a uniform camelCase + * shape our processor reads. + * + * The SDK's discriminated-union parser falls back to + * `{ raw, type: 'UNKNOWN', isUnknown: true }` when an event's strict per- + * variant schema rejects (missing optional-ish fields like `sequenceNumber`/ + * `logprobs` that some upstreams — including aimock — omit). The `raw` + * payload is the original wire-shape event in snake_case. We translate + * snake_case keys to camelCase for those unknown events so the rest of the + * processor reads a uniform shape. + * + * Known events already have camelCase fields and are passed through. */ -async function* adaptOpenRouterResponsesStreamEvents( - stream: AsyncIterable, -): AsyncIterable { - for await (const event of stream) { - const e = event as Record - - // Speakeasy's discriminated-union parser falls back to `{ raw, type: - // 'UNKNOWN', isUnknown: true }` when an event's strict per-variant schema - // rejects (missing optional-ish fields like `sequence_number`/`logprobs` - // that some upstreams — including aimock — omit). The `raw` payload is - // the original wire-shape event in snake_case, which is exactly what the - // base's `processStreamChunks` reads. Re-emit it verbatim. - if (e.isUnknown && e.raw && typeof e.raw === 'object') { - yield e.raw as ResponseStreamEvent - continue - } +function normalizeStreamEvent(event: StreamEvents): NormalizedStreamEvent { + const e = event as { + isUnknown?: boolean + raw?: unknown + type?: string + [k: string]: unknown + } - switch (e.type) { - case 'response.created': - case 'response.in_progress': - case 'response.completed': - case 'response.failed': - case 'response.incomplete': { - yield { - type: e.type, - response: toSnakeResponseResult(e.response), - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - case 'response.output_text.delta': - case 'response.output_text.done': - case 'response.reasoning_text.delta': - case 'response.reasoning_text.done': - case 'response.reasoning_summary_text.delta': - case 'response.reasoning_summary_text.done': { - yield { - type: e.type, - item_id: e.itemId, - output_index: e.outputIndex, - content_index: e.contentIndex, - delta: e.delta, - text: e.text, - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - case 'response.content_part.added': - case 'response.content_part.done': { - yield { - type: e.type, - item_id: e.itemId, - output_index: e.outputIndex, - content_index: e.contentIndex, - part: toSnakeContentPart(e.part), - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - case 'response.output_item.added': - case 'response.output_item.done': { - yield { - type: e.type, - item: toSnakeOutputItem(e.item), - output_index: e.outputIndex, - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - case 'response.function_call_arguments.delta': - case 'response.function_call_arguments.done': { - yield { - type: e.type, - item_id: e.itemId, - output_index: e.outputIndex, - delta: e.delta, - arguments: e.arguments, - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - case 'error': { - // The base reads `chunk.error.code` directly into a string-typed - // RUN_ERROR.code slot (no `toRunErrorPayload` narrowing on this path), - // so coerce here. Typeof-narrow rather than `!= null` so objects / - // symbols / non-finite numbers fall through to undefined instead of - // shipping `"[object Object]"`. - const code = - typeof e.code === 'string' - ? e.code - : typeof e.code === 'number' && Number.isFinite(e.code) - ? String(e.code) - : undefined - yield { - type: 'error', - message: e.message, - code, - param: e.param, - sequence_number: e.sequenceNumber, - } as unknown as ResponseStreamEvent - break - } - default: { - // Pass through unknown event types with sequenceNumber renamed so - // the base's debug logging still sees a usable `type`. Forwarding - // verbatim is safer than dropping silently — a new event type - // OpenRouter ships shouldn't be discarded by us. - const { sequenceNumber, ...rest } = e - yield { - ...rest, - ...(sequenceNumber !== undefined && { - sequence_number: sequenceNumber, - }), - } as unknown as ResponseStreamEvent - } + if (e.isUnknown && e.raw && typeof e.raw === 'object') { + const raw = e.raw as Record + // Translate the snake_case wire-shape fields we need into camelCase. The + // adapter only consults the fields below; any others are passed through + // verbatim so downstream extraction (e.g. for unknown event types) still + // sees them. + const out: Record = { ...raw } + if ('item_id' in raw) out.itemId = raw.item_id + if ('output_index' in raw) out.outputIndex = raw.output_index + if ('content_index' in raw) out.contentIndex = raw.content_index + if ('sequence_number' in raw) out.sequenceNumber = raw.sequence_number + if ('summary_index' in raw) out.summaryIndex = raw.summary_index + if ( + 'response' in raw && + raw.response && + typeof raw.response === 'object' + ) { + out.response = camelCaseResponseShape(raw.response as Record) } + if ('item' in raw && raw.item && typeof raw.item === 'object') { + out.item = camelCaseOutputItem(raw.item as Record) + } + if ('part' in raw) out.part = raw.part + out.type = + typeof raw.type === 'string' ? raw.type : (e.type as string) || 'unknown' + return out as unknown as NormalizedStreamEvent } -} -/** Convert a non-streaming `OpenResponsesResult` so the base's - * `extractTextFromResponse` (which iterates `response.output[].content` for - * `type === 'output_text'`) reads it unchanged. */ -function adaptOpenRouterResponsesResult(result: unknown): ResponsesResponse { - return toSnakeResponseResult(result) as ResponsesResponse + return event as unknown as NormalizedStreamEvent } -function toSnakeResponseResult(r: any): Record { - if (!r || typeof r !== 'object') return r - return { - ...r, - model: r.model, - incomplete_details: r.incompleteDetails ?? null, - ...(r.usage && { - usage: { - input_tokens: r.usage.inputTokens ?? 0, - output_tokens: r.usage.outputTokens ?? 0, - total_tokens: r.usage.totalTokens ?? 0, - ...(r.usage.inputTokensDetails && { - input_tokens_details: r.usage.inputTokensDetails, - }), - ...(r.usage.outputTokensDetails && { - output_tokens_details: r.usage.outputTokensDetails, - }), - }, - }), - output: Array.isArray(r.output) - ? r.output.map((it: any) => toSnakeOutputItem(it)) - : r.output, - ...(r.error && { - // Typeof-narrow the code (same rule as `normalizeCode` in - // `toRunErrorPayload`) — the base reads `chunk.response.error?.code` - // directly into a string slot, so object/symbol/NaN must fall through - // to undefined rather than ship `"[object Object]"`. - error: { - message: r.error.message, - code: - typeof r.error.code === 'string' - ? r.error.code - : typeof r.error.code === 'number' && Number.isFinite(r.error.code) - ? String(r.error.code) - : undefined, - }, - }), +/** Translate snake_case keys in a `response` payload to camelCase for the + * fields our terminal-event handlers read. Unknown keys passthrough. */ +function camelCaseResponseShape( + src: Record, +): Record { + const out: Record = { ...src } + if ('incomplete_details' in src) out.incompleteDetails = src.incomplete_details + if ('input_tokens' in src || 'output_tokens' in src || 'total_tokens' in src) { + // never mutate src; rewrite usage in place if present. } + if (src.usage && typeof src.usage === 'object') { + const u = src.usage as Record + out.usage = { + ...u, + ...(('input_tokens' in u) && { inputTokens: u.input_tokens }), + ...(('output_tokens' in u) && { outputTokens: u.output_tokens }), + ...(('total_tokens' in u) && { totalTokens: u.total_tokens }), + } + } + if (Array.isArray(src.output)) { + out.output = src.output.map((item) => + item && typeof item === 'object' + ? camelCaseOutputItem(item as Record) + : item, + ) + } + return out } -function toSnakeOutputItem(item: any): any { - if (!item || typeof item !== 'object') return item - switch (item.type) { - case 'function_call': - return { - type: 'function_call', - id: item.id, - call_id: item.callId, - name: item.name, - arguments: item.arguments, - ...(item.status !== undefined && { status: item.status }), - } - case 'message': - return { - ...item, - // content parts already use { type:'output_text', text } — no rename - // needed; refusal has `refusal` either way. - } - default: - return item - } +/** Translate snake_case keys in an output item to camelCase. */ +function camelCaseOutputItem( + src: Record, +): Record { + const out: Record = { ...src } + if ('call_id' in src) out.callId = src.call_id + return out } -function toSnakeContentPart(part: any): any { - if (!part || typeof part !== 'object') return part - // Both output_text and refusal already share the same key names across - // SDKs (`text`, `refusal`, `type`). Pass through. - return part +/** Normalize an `error.code` to the string slot our RUN_ERROR event reads. */ +function normalizeCode(code: unknown): string | undefined { + if (typeof code === 'string') return code + if (typeof code === 'number' && Number.isFinite(code)) return String(code) + return undefined } export function createOpenRouterResponsesText< diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 06c23130a..50ba6a5a2 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -1,14 +1,13 @@ import { OpenRouter } from '@openrouter/sdk' -import { OpenAICompatibleChatCompletionsTextAdapter } from '@tanstack/ai-openai-compatible' +import { EventType } from '@tanstack/ai' +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { toRunErrorPayload } from '@tanstack/ai/adapter-internals' +import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' +import { extractRequestOptions } from '../internal/request-options' +import { makeStructuredOutputCompatible } from '../internal/schema-converter' import { convertToolsToProviderFormat } from '../tools' import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' -import type { - ChatCompletion, - ChatCompletionChunk, - ChatCompletionCreateParamsNonStreaming, - ChatCompletionCreateParamsStreaming, -} from '@tanstack/ai-openai-compatible' import type { ChatContentItems, ChatMessages, @@ -16,13 +15,22 @@ import type { ChatStreamChoice, ChatStreamChunk, } from '@openrouter/sdk/models' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' import type { OPENROUTER_CHAT_MODELS, OpenRouterChatModelToolCapabilitiesByName, OpenRouterModelInputModalitiesByName, OpenRouterModelOptionsByName, } from '../model-meta' -import type { ContentPart, ModelMessage, TextOptions } from '@tanstack/ai' import type { ExternalTextProviderOptions } from '../text/text-provider-options' import type { OpenRouterImageMetadata, @@ -50,48 +58,33 @@ type ResolveToolCapabilities = : readonly [] /** - * OpenRouter Text (Chat) Adapter. - * - * Why this extends `OpenAICompatibleChatCompletionsTextAdapter` from - * `@tanstack/ai-openai-compatible`: - * - * OpenRouter's `/v1/chat/completions` endpoint implements OpenAI's Chat - * Completions wire format verbatim (it's how OpenRouter routes a single - * client request to GPT, Claude, Gemini, Llama, etc.). Extending the shared - * compatible base means we inherit ~1k LOC of stream accumulation, - * partial-JSON tool-call buffering, AG-UI lifecycle emission, RUN_ERROR - * taxonomy, and structured-output coercion that every OpenAI-compatible - * provider needs — without copy-pasting it. The compatible package is - * deliberately not "the OpenAI adapter"; it is the shared implementation of - * the wire-format protocol that OpenAI, OpenRouter, Groq, Grok, vLLM, - * SGLang, and others all speak. - * - * What's different about OpenRouter (and why we still need overrides): + * OpenRouter Text (Chat) Adapter — standalone implementation that talks to + * OpenRouter's `/v1/chat/completions` endpoint via the `@openrouter/sdk` SDK. * - * The wire format is identical to OpenAI's Chat Completions, but the - * `@openrouter/sdk` SDK exposes a different call shape — `client.chat.send - * ({ chatRequest })` with camelCase fields. We override the two SDK-call - * hooks (`callChatCompletion` / `callChatCompletionStream`) to bridge that, - * plus a small chunk-shape adapter on the way back, and `extractReasoning` - * to surface OpenRouter's reasoning deltas through the shared REASONING_* - * lifecycle. + * The wire format is OpenAI-Chat-Completions-compatible, but the SDK exposes + * the request/response in camelCase TS shapes (`toolCalls`, `finishReason`, + * `maxCompletionTokens`, `responseFormat: { jsonSchema: ... }`, etc.). This + * adapter operates directly in those camelCase shapes — there's no + * snake_case ↔ camelCase round-trip. * - * Behaviour preserved from the pre-migration implementation: + * Behaviour preserved from the pre-decoupling implementation: * - Provider routing surface (`provider`, `models`, `plugins`, `variant`, * `transforms`) passes through `modelOptions`. * - App attribution headers (`httpReferer`, `appTitle`) and base URL * overrides flow through the SDK `SDKOptions` constructor. - * - `RequestAbortedError` from the SDK propagates up — the base's - * `chatStream` wraps unknown errors into a single RUN_ERROR event via - * `toRunErrorPayload`, so the abort lifecycle is unchanged. + * - `RequestAbortedError` from the SDK propagates up — `chatStream` wraps + * unknown errors into a single RUN_ERROR event via `toRunErrorPayload`. * - Model variant suffixing (e.g. `:thinking`, `:free`) via * `modelOptions.variant`. + * - OpenRouter-specific reasoning extraction (`delta.reasoningDetails`). + * - OpenRouter preserves nulls in structured-output results + * (`transformStructuredOutput` is a passthrough). */ export class OpenRouterTextAdapter< TModel extends OpenRouterTextModels, TToolCapabilities extends ReadonlyArray = ResolveToolCapabilities, -> extends OpenAICompatibleChatCompletionsTextAdapter< +> extends BaseTextAdapter< TModel, ResolveProviderOptions, ResolveInputModalities, @@ -104,77 +97,733 @@ export class OpenRouterTextAdapter< protected orClient: OpenRouter constructor(config: OpenRouterConfig, model: TModel) { - super(model, 'openrouter') + super({}, model) this.orClient = new OpenRouter(config) } - // ──────────────────────────────────────────────────────────────────────── - // SDK call hooks — adapt OpenAI snake_case params to OpenRouter camelCase - // and adapt the returned shape back to the OpenAI structural contract the - // base's processStreamChunks reads. - // ──────────────────────────────────────────────────────────────────────── - - protected override async callChatCompletionStream( - params: ChatCompletionCreateParamsStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise> { - const chatRequest = toOpenRouterRequest(params, true) - const stream = (await this.orClient.chat.send( - { chatRequest: { ...chatRequest, stream: true } }, - { - signal: requestOptions.signal ?? undefined, - ...(requestOptions.headers && { headers: requestOptions.headers }), - }, - )) as AsyncIterable - return adaptOpenRouterStreamChunks(stream) + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + // AG-UI lifecycle tracking (mutable state object for ESLint compatibility) + const aguiState = { + runId: generateId(this.name), + threadId: options.threadId ?? generateId(this.name), + messageId: generateId(this.name), + hasEmittedRunStarted: false, + } + + try { + // mapOptionsToRequest can throw (e.g. fail-loud guards in convertMessage + // for empty content or unsupported parts). Keep it inside the try so + // those failures surface as a single RUN_ERROR event, matching every + // other failure mode here — callers iterating chatStream then only need + // one error-handling path. + const chatRequest = this.mapOptionsToRequest(options) + options.logger.request( + `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, + { provider: this.name, model: this.model }, + ) + const reqOptions = extractRequestOptions(options.request) + const stream = (await this.orClient.chat.send( + { + chatRequest: { + ...chatRequest, + stream: true, + streamOptions: { + ...(chatRequest.streamOptions ?? {}), + includeUsage: true, + }, + }, + }, + { + signal: reqOptions.signal ?? undefined, + ...(reqOptions.headers && { headers: reqOptions.headers }), + }, + )) as AsyncIterable + + yield* this.processStreamChunks(stream, options, aguiState) + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + const errorPayload = toRunErrorPayload( + error, + `${this.name}.chatStream failed`, + ) + + // Emit RUN_STARTED if not yet emitted + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: EventType.RUN_STARTED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + // Emit AG-UI RUN_ERROR + yield { + type: EventType.RUN_ERROR, + model: options.model, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, + error: errorPayload, + } satisfies StreamChunk + + options.logger.errors(`${this.name}.chatStream fatal`, { + error: errorPayload, + source: `${this.name}.chatStream`, + }) + } } - protected override async callChatCompletion( - params: ChatCompletionCreateParamsNonStreaming, - requestOptions: { signal?: AbortSignal | null; headers?: HeadersInit }, - ): Promise { - const chatRequest = toOpenRouterRequest(params, false) - const response = await this.orClient.chat.send( - { chatRequest: { ...chatRequest, stream: false } }, - { - signal: requestOptions.signal ?? undefined, - ...(requestOptions.headers && { headers: requestOptions.headers }), - }, + /** + * Generate structured output via OpenRouter's `responseFormat: { type: + * 'json_schema', jsonSchema: ... }` (camelCase). Uses stream: false to get + * the complete response in one call. + * + * The outputSchema is already JSON Schema (converted in the ai layer). + * We apply OpenAI-strict transformations for cross-provider compatibility. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const chatRequest = this.mapOptionsToRequest(chatOptions) + + const jsonSchema = this.makeStructuredOutputCompatible( + outputSchema, + outputSchema.required, ) - // The base only reads `response.choices[0]?.message.content`. The SDK's - // non-streaming response carries that under the same path. - return response as unknown as ChatCompletion + + try { + // Strip streamOptions which is only valid for streaming calls + const { streamOptions: _streamOptions, ...cleanParams } = chatRequest + void _streamOptions + chatOptions.logger.request( + `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, + { provider: this.name, model: this.model }, + ) + const reqOptions = extractRequestOptions(chatOptions.request) + const response = await this.orClient.chat.send( + { + chatRequest: { + ...cleanParams, + stream: false, + responseFormat: { + type: 'json_schema', + jsonSchema: { + name: 'structured_output', + schema: jsonSchema, + strict: true, + }, + }, + }, + }, + { + signal: reqOptions.signal ?? undefined, + ...(reqOptions.headers && { headers: reqOptions.headers }), + }, + ) + + // Extract text content from the response. Fail loud on empty content + // rather than letting it cascade into a JSON-parse error on '' — the + // root cause (the model returned no content for the structured request) + // is then visible in logs. + const message = response.choices[0]?.message + const rawText = + typeof message?.content === 'string' ? message.content : '' + if (rawText.length === 0) { + throw new Error( + `${this.name}.structuredOutput: response contained no content`, + ) + } + + // Parse the JSON response + let parsed: unknown + try { + parsed = JSON.parse(rawText) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${rawText.slice(0, 200)}${rawText.length > 200 ? '...' : ''}`, + ) + } + + // OpenRouter override: pass nulls through unchanged (consumers that + // discriminate "field present but null" from "field absent" rely on + // this). + const transformed = this.transformStructuredOutput(parsed) + + return { + data: transformed, + rawText, + } + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + chatOptions.logger.errors(`${this.name}.structuredOutput fatal`, { + error: toRunErrorPayload(error, `${this.name}.structuredOutput failed`), + source: `${this.name}.structuredOutput`, + }) + throw error + } } - // ──────────────────────────────────────────────────────────────────────── - // Reasoning hook — surface OpenRouter's `delta.reasoningDetails` through - // the base's REASONING_* lifecycle. - // ──────────────────────────────────────────────────────────────────────── + /** + * Applies provider-specific transformations for structured output compatibility. + */ + protected makeStructuredOutputCompatible( + schema: Record, + originalRequired?: Array, + ): Record { + return makeStructuredOutputCompatible(schema, originalRequired) + } - /** OpenRouter historically returns nulls in structured-output results as - * literal nulls rather than absent fields; preserve that behaviour. */ - protected override transformStructuredOutput(parsed: unknown): unknown { + /** + * Final shaping pass applied to parsed structured-output JSON before it is + * returned to the caller. OpenRouter routes through a wide variety of + * upstream providers; some return `null` as a distinct sentinel ("the field + * exists, the value is null") rather than collapsing it to absent. Stripping + * nulls would erase that distinction, so we passthrough. + * + * `transformNullsToUndefined` is imported for parity with the other + * provider adapters but intentionally not invoked here. + */ + protected transformStructuredOutput(parsed: unknown): unknown { + void transformNullsToUndefined return parsed } - protected override extractReasoning( - chunk: unknown, - ): { text: string } | undefined { - // The chunk-adapter stashes the raw reasoning deltas on a non-standard - // field so we don't need to round-trip them through camelCase ↔ - // snake_case on the OpenAI Chat Completions chunk schema. - const reasoning = (chunk as { _reasoningText?: string })._reasoningText - return reasoning ? { text: reasoning } : undefined + /** + * Processes streamed chunks from OpenRouter's chat-completions API and + * yields AG-UI events. Reads the SDK's camelCase chunk shape directly + * (`delta.toolCalls`, `delta.reasoningDetails`, `chunk.usage.promptTokens`, + * `choice.finishReason`, etc.). + */ + protected async *processStreamChunks( + stream: AsyncIterable, + options: TextOptions>, + aguiState: { + runId: string + threadId: string + messageId: string + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + let hasEmittedTextMessageStart = false + let lastModel: string | undefined + // Track usage from any chunk that carries it. With + // `streamOptions: { includeUsage: true }` OpenRouter emits a terminal + // chunk whose `choices` is `[]` and only the `usage` field is populated; + // the earlier `finishReason` chunk does NOT include token counts. We must + // therefore defer RUN_FINISHED until the iterator is exhausted so we can + // pick up usage from the trailing chunk regardless of arrival order. + let lastUsage: ChatStreamChunk['usage'] | undefined + let pendingFinishReason: ChatStreamChoice['finishReason'] | undefined + + // Track tool calls being streamed (arguments come in chunks). + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean // Track if TOOL_CALL_START has been emitted + } + >() + + // Reasoning lifecycle (driven by inline reasoning extraction below). + let reasoningMessageId: string | undefined + let hasClosedReasoning = false + // Legacy STEP_STARTED/STEP_FINISHED pair emitted alongside REASONING_* + // for back-compat with consumers (UI, devtools) that haven't migrated + // to the spec REASONING_* events yet. + let stepId: string | undefined + let accumulatedReasoning = '' + // Track whether ANY tool call lifecycle was actually completed across the + // entire stream. Lets us downgrade a `tool_calls` finishReason to `stop` + // when the upstream signalled tool calls but never produced a complete + // start/end pair — emitting RUN_FINISHED { finishReason: 'tool_calls' } + // with no matching TOOL_CALL_END would leave consumers waiting for tool + // results that never arrive. + let emittedAnyToolCallEnd = false + + try { + for await (const chunk of stream) { + const choiceForLog = chunk.choices[0] + options.logger.provider( + `provider=${this.name} finishReason=${choiceForLog?.finishReason ?? 'none'} hasContent=${!!choiceForLog?.delta.content} hasToolCalls=${!!choiceForLog?.delta.toolCalls} hasUsage=${!!chunk.usage}`, + { provider: this.name, model: chunk.model }, + ) + + // Surface upstream errors so they can be routed to RUN_ERROR. Stream + // chunks may carry an `error` field (provider-side failures that + // happen mid-stream rather than as an SDK throw). + if (chunk.error) { + throw Object.assign( + new Error(chunk.error.message || 'OpenRouter stream error'), + { code: chunk.error.code }, + ) + } + + // Capture usage from any chunk (including the terminal usage-only + // chunk emitted when `streamOptions.includeUsage` is on). + if (chunk.usage) { + lastUsage = chunk.usage + } + if (chunk.model) { + lastModel = chunk.model + } + + // Emit RUN_STARTED on the first chunk of any kind so callers see a + // run lifecycle even on streams that arrive entirely as usage-only + // (no choices). Without this, a usage-first stream would skip + // RUN_STARTED via `if (!choice) continue` below and the post-loop + // synthetic block would also skip RUN_FINISHED (it gates on + // `hasEmittedRunStarted`). + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: EventType.RUN_STARTED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + // Reasoning content (OpenRouter emits this as `delta.reasoningDetails`). + // Run before reading choice/delta so reasoning-only chunks (no `choices`) + // still drive the REASONING_* lifecycle. + const reasoningText = extractReasoningText(chunk) + if (reasoningText) { + if (!reasoningMessageId) { + reasoningMessageId = generateId(this.name) + stepId = generateId(this.name) + yield { + type: EventType.REASONING_START, + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_MESSAGE_START, + messageId: reasoningMessageId, + role: 'reasoning' as const, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + // Legacy STEP_STARTED (single emission, paired with the + // STEP_FINISHED below when reasoning closes). + yield { + type: EventType.STEP_STARTED, + stepName: stepId, + stepId, + model: chunk.model || options.model, + timestamp: Date.now(), + stepType: 'thinking', + } satisfies StreamChunk + } + accumulatedReasoning += reasoningText + yield { + type: EventType.REASONING_MESSAGE_CONTENT, + messageId: reasoningMessageId, + delta: reasoningText, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + const choice = chunk.choices[0] + + if (!choice) continue + + const delta = choice.delta + const deltaContent = delta.content + const deltaToolCalls = delta.toolCalls + + // Handle content delta + if (deltaContent) { + // Close reasoning before text starts so consumers see a clean + // REASONING_END before any TEXT_MESSAGE_START. + if (reasoningMessageId && !hasClosedReasoning) { + hasClosedReasoning = true + yield { + type: EventType.REASONING_MESSAGE_END, + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, + messageId: reasoningMessageId, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + if (stepId) { + yield { + type: EventType.STEP_FINISHED, + stepName: stepId, + stepId, + model: chunk.model || options.model, + timestamp: Date.now(), + content: accumulatedReasoning, + } satisfies StreamChunk + } + } + + // Emit TEXT_MESSAGE_START on first text content + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: EventType.TEXT_MESSAGE_START, + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp: Date.now(), + role: 'assistant', + } satisfies StreamChunk + } + + accumulatedContent += deltaContent + + // Emit AG-UI TEXT_MESSAGE_CONTENT + yield { + type: EventType.TEXT_MESSAGE_CONTENT, + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp: Date.now(), + delta: deltaContent, + content: accumulatedContent, + } satisfies StreamChunk + } + + // Handle tool calls - they come in as deltas (camelCase toolCalls) + if (deltaToolCalls) { + for (const toolCallDelta of deltaToolCalls) { + const index = toolCallDelta.index + + // Initialize or update the tool call in progress + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + started: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + // Update with any new data from the delta + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function?.arguments) { + toolCall.arguments += toolCallDelta.function.arguments + } + + // Emit TOOL_CALL_START when we have id and name + if (toolCall.id && toolCall.name && !toolCall.started) { + toolCall.started = true + yield { + type: EventType.TOOL_CALL_START, + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp: Date.now(), + index, + } satisfies StreamChunk + } + + // Emit TOOL_CALL_ARGS for argument deltas + if (toolCallDelta.function?.arguments && toolCall.started) { + yield { + type: EventType.TOOL_CALL_ARGS, + toolCallId: toolCall.id, + model: chunk.model || options.model, + timestamp: Date.now(), + delta: toolCallDelta.function.arguments, + } satisfies StreamChunk + } + } + } + + // Handle finishReason. We DO emit TOOL_CALL_END and TEXT_MESSAGE_END + // here because the corresponding _START events have already fired, + // and tool execution downstream wants to begin as soon as possible. + // RUN_FINISHED is deferred until the iterator is fully exhausted so + // we can capture the trailing usage chunk that arrives AFTER this + // chunk when streamOptions.includeUsage is on. + if (choice.finishReason) { + if ( + choice.finishReason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [, toolCall] of toolCallsInProgress) { + // Skip tool calls that never emitted TOOL_CALL_START — emitting + // a stray TOOL_CALL_END here would violate AG-UI lifecycle + // (END without matching START) for partial deltas where the + // upstream never sent both id and name. + if (!toolCall.started) continue + + // Parse arguments for TOOL_CALL_END. Surface parse failures via + // the logger so a model emitting malformed JSON for tool args + // is debuggable instead of silently invoking the tool with {}. + let parsedInput: unknown = {} + if (toolCall.arguments) { + try { + const parsed: unknown = JSON.parse(toolCall.arguments) + parsedInput = + parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed`, + { + error: toRunErrorPayload( + parseError, + `tool ${toolCall.name} (${toolCall.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: toolCall.id, + toolName: toolCall.name, + rawArguments: toolCall.arguments, + }, + ) + parsedInput = {} + } + } + + // Emit AG-UI TOOL_CALL_END + yield { + type: EventType.TOOL_CALL_END, + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + emittedAnyToolCallEnd = true + } + // Clear tool-call state after emission so a subsequent + // `finishReason: 'stop'` chunk (or the post-loop synthetic + // block) doesn't see lingering entries and misreport the finish. + toolCallsInProgress.clear() + } + + // Emit TEXT_MESSAGE_END if we had text content + if (hasEmittedTextMessageStart) { + yield { + type: EventType.TEXT_MESSAGE_END, + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + hasEmittedTextMessageStart = false + } + + // Remember the upstream finishReason; RUN_FINISHED is emitted at + // end-of-stream so we pick up the trailing usage-only chunk too. + pendingFinishReason = choice.finishReason + } + } + + // Emit a single terminal RUN_FINISHED after the iterator is exhausted. + if (aguiState.hasEmittedRunStarted) { + // Close any started tool calls that never got finishReason. + for (const [, toolCall] of toolCallsInProgress) { + if (!toolCall.started) continue + let parsedInput: unknown = {} + if (toolCall.arguments) { + try { + const parsed: unknown = JSON.parse(toolCall.arguments) + parsedInput = parsed && typeof parsed === 'object' ? parsed : {} + } catch (parseError) { + options.logger.errors( + `${this.name}.processStreamChunks tool-args JSON parse failed (drain)`, + { + error: toRunErrorPayload( + parseError, + `tool ${toolCall.name} (${toolCall.id}) returned malformed JSON arguments`, + ), + source: `${this.name}.processStreamChunks`, + toolCallId: toolCall.id, + toolName: toolCall.name, + rawArguments: toolCall.arguments, + }, + ) + parsedInput = {} + } + } + yield { + type: EventType.TOOL_CALL_END, + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: lastModel || options.model, + timestamp: Date.now(), + input: parsedInput, + } satisfies StreamChunk + emittedAnyToolCallEnd = true + } + toolCallsInProgress.clear() + + // Make sure the text message lifecycle is closed even on early + // termination paths where finishReason never arrives. + if (hasEmittedTextMessageStart) { + yield { + type: EventType.TEXT_MESSAGE_END, + messageId: aguiState.messageId, + model: lastModel || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + } + + // Close any reasoning lifecycle that text never closed (no text + // content arrived, or the stream cut off before text started). + if (reasoningMessageId && !hasClosedReasoning) { + hasClosedReasoning = true + yield { + type: EventType.REASONING_MESSAGE_END, + messageId: reasoningMessageId, + model: lastModel || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + yield { + type: EventType.REASONING_END, + messageId: reasoningMessageId, + model: lastModel || options.model, + timestamp: Date.now(), + } satisfies StreamChunk + if (stepId) { + yield { + type: EventType.STEP_FINISHED, + stepName: stepId, + stepId, + model: lastModel || options.model, + timestamp: Date.now(), + content: accumulatedReasoning, + } satisfies StreamChunk + } + } + + // Map upstream finishReason to AG-UI's narrower vocabulary while + // preserving the upstream value when it falls outside the AG-UI set. + // Use `tool_calls` only when a TOOL_CALL_END was actually emitted. + // OpenRouter emits 'error' as a finish reason for upstream errors; + // collapse to 'content_filter' (the closest AG-UI equivalent). + const finishReason: 'tool_calls' | 'length' | 'content_filter' | 'stop' = + emittedAnyToolCallEnd + ? 'tool_calls' + : pendingFinishReason === 'tool_calls' + ? 'stop' + : pendingFinishReason === 'length' + ? 'length' + : pendingFinishReason === 'content_filter' || + pendingFinishReason === 'error' + ? 'content_filter' + : 'stop' + + yield { + type: EventType.RUN_FINISHED, + runId: aguiState.runId, + threadId: aguiState.threadId, + model: lastModel || options.model, + timestamp: Date.now(), + usage: lastUsage + ? { + promptTokens: lastUsage.promptTokens || 0, + completionTokens: lastUsage.completionTokens || 0, + totalTokens: lastUsage.totalTokens || 0, + } + : undefined, + finishReason, + } satisfies StreamChunk + } + } catch (error: unknown) { + // Narrow before logging: raw SDK errors can carry request metadata + // (including auth headers) which we must never surface to user loggers. + const errorPayload = toRunErrorPayload( + error, + `${this.name}.processStreamChunks failed`, + ) + options.logger.errors(`${this.name}.processStreamChunks fatal`, { + error: errorPayload, + source: `${this.name}.processStreamChunks`, + }) + + // Emit AG-UI RUN_ERROR + yield { + type: EventType.RUN_ERROR, + model: options.model, + timestamp: Date.now(), + message: errorPayload.message, + code: errorPayload.code, + error: errorPayload, + } satisfies StreamChunk + } } - // ──────────────────────────────────────────────────────────────────────── - // Message conversion — OpenRouter uses camelCase (`toolCallId`, - // `toolCalls`, `imageUrl`, `inputAudio`, `videoUrl`). We override - // `convertMessage` and `convertContentPart` so the base's - // `mapOptionsToRequest` flows through to the SDK without a second pass. - // ──────────────────────────────────────────────────────────────────────── + /** + * Build an OpenRouter `ChatRequest` (camelCase) from `TextOptions`. Applies + * `:variant` model suffixing and routes tools through OpenRouter's + * converter (function tools + branded web_search tool). + */ + protected mapOptionsToRequest( + options: TextOptions>, + ): Omit { + const modelOptions = options.modelOptions as + | (Record & { variant?: string }) + | undefined + const variantSuffix = modelOptions?.variant + ? `:${modelOptions.variant}` + : '' + + const messages: Array = [] + if (options.systemPrompts?.length) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + } as ChatMessages) + } + for (const m of options.messages) { + messages.push(this.convertMessage(m)) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + // Spread modelOptions first so explicit top-level options (set below) win + // when defined but `undefined` doesn't clobber values the caller set in + // modelOptions. + return { + ...(modelOptions as Record), + model: options.model + variantSuffix, + messages, + ...(options.temperature !== undefined && { + temperature: options.temperature, + }), + ...(options.maxTokens !== undefined && { + maxCompletionTokens: options.maxTokens, + }), + ...(options.topP !== undefined && { topP: options.topP }), + ...(tools && tools.length > 0 && { tools: tools as ChatRequest['tools'] }), + } as Omit + } - protected override convertMessage(message: ModelMessage): any { + /** + * Convert a ModelMessage to OpenRouter's ChatMessages discriminated union + * (camelCase: `toolCallId`, `toolCalls`). + */ + protected convertMessage(message: ModelMessage): ChatMessages { if (message.role === 'tool') { // For structured (Array) tool results, extract the text // content rather than JSON-stringifying the parts — sending the raw @@ -188,7 +837,7 @@ export class OpenRouterTextAdapter< ? message.content : this.extractTextContent(message.content), toolCallId: message.toolCallId || '', - } satisfies ChatMessages + } as ChatMessages } if (message.role === 'assistant') { @@ -219,12 +868,12 @@ export class OpenRouterTextAdapter< role: 'assistant', content: hasToolCalls && !textContent ? null : textContent, toolCalls, - } satisfies ChatMessages + } as ChatMessages } - // user — mirror the base's fail-loud behaviour on empty and unsupported - // content. Silently sending an empty string would mask a real caller bug - // and produce a paid request with no input. + // user — fail loud on empty and unsupported content. Silently sending an + // empty string would mask a real caller bug and produce a paid request + // with no input. const contentParts = this.normalizeContent(message.content) if (contentParts.length === 1 && contentParts[0]?.type === 'text') { const text = contentParts[0].content @@ -238,16 +887,16 @@ export class OpenRouterTextAdapter< return { role: 'user', content: text, - } satisfies ChatMessages + } as ChatMessages } const parts: Array = [] for (const part of contentParts) { - const converted = this.convertContentPartToOpenRouter(part) + const converted = this.convertContentPart(part) if (!converted) { throw new Error( `Unsupported content part type for ${this.name}: ${part.type}. ` + - `Override convertContentPartToOpenRouter to handle this type, ` + + `Override convertContentPart to handle this type, ` + `or remove it from the message.`, ) } @@ -263,24 +912,21 @@ export class OpenRouterTextAdapter< return { role: 'user', content: parts, - } satisfies ChatMessages + } as ChatMessages } /** OpenRouter content-part converter (camelCase imageUrl/inputAudio/videoUrl). */ - private convertContentPartToOpenRouter( - part: ContentPart, - ): ChatContentItems | null { + protected convertContentPart(part: ContentPart): ChatContentItems | null { switch (part.type) { case 'text': - return { type: 'text', text: part.content } + return { type: 'text', text: part.content } as ChatContentItems case 'image': { const meta = part.metadata as OpenRouterImageMetadata | undefined const value = part.source.value // Default to `application/octet-stream` when the source didn't // provide a MIME type — interpolating `undefined` into the URI // ("data:undefined;base64,...") produces an invalid data URI the - // API rejects. Mirrors the base's defaulting in - // `OpenAICompatibleChatCompletionsTextAdapter.convertContentPart`. + // API rejects. const imageMime = part.source.mimeType || 'application/octet-stream' const url = part.source.type === 'data' && !value.startsWith('data:') @@ -289,26 +935,31 @@ export class OpenRouterTextAdapter< return { type: 'image_url', imageUrl: { url, detail: meta?.detail || 'auto' }, - } + } as ChatContentItems } case 'audio': // OpenRouter's chat-completions `input_audio` shape carries // `{ data, format }` where `data` is base64 — there's no URL // variant on this wire. For URL-sourced audio, fall back to a // text reference rather than feeding the literal URL into the - // base64 slot (which would either be rejected upstream or - // silently misinterpreted as garbage audio bytes). The - // Responses adapter does have an `input_file` URL variant and - // routes URLs there directly — see `responses-text.ts`. + // base64 slot. The Responses adapter does have an `input_file` + // URL variant and routes URLs there directly — see + // `responses-text.ts`. if (part.source.type === 'url') { - return { type: 'text', text: `[Audio: ${part.source.value}]` } + return { + type: 'text', + text: `[Audio: ${part.source.value}]`, + } as ChatContentItems } return { type: 'input_audio', inputAudio: { data: part.source.value, format: 'mp3' }, - } + } as ChatContentItems case 'video': - return { type: 'video_url', videoUrl: { url: part.source.value } } + return { + type: 'video_url', + videoUrl: { url: part.source.value }, + } as ChatContentItems case 'document': // The chat-completions SDK has no document_url type. For URL // sources, surface a text reference so the model at least sees @@ -326,202 +977,75 @@ export class OpenRouterTextAdapter< `or pass the document as a URL.`, ) } - return { type: 'text', text: `[Document: ${part.source.value}]` } + return { + type: 'text', + text: `[Document: ${part.source.value}]`, + } as ChatContentItems default: return null } } - /** Override request mapping to apply OpenRouter's `:variant` model suffix - * and route tools through OpenRouter's converter (function tools + - * branded web_search tool). The base writes snake_case fields here; the - * SDK-call hooks convert them just before sending. */ - protected override mapOptionsToRequest( - options: TextOptions, - ): ChatCompletionCreateParamsStreaming { - const modelOptions = options.modelOptions as - | (Record & { variant?: string }) - | undefined - const variantSuffix = modelOptions?.variant - ? `:${modelOptions.variant}` - : '' - - const messages: Array = [] - if (options.systemPrompts?.length) { - messages.push({ - role: 'system', - content: options.systemPrompts.join('\n'), - }) + /** + * Normalizes message content to an array of ContentPart. + * Handles backward compatibility with string content. + */ + protected normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] } - for (const m of options.messages) { - messages.push(this.convertMessage(m)) + if (typeof content === 'string') { + return [{ type: 'text', content: content }] } - - const tools = options.tools - ? convertToolsToProviderFormat(options.tools) - : undefined - - // Keep modelOptions first so explicit top-level options (set below) win - // when defined but `undefined` doesn't clobber values the caller set in - // modelOptions. Fixes the same merge-order regression openai/grok handle. - return { - ...(modelOptions as Record), - model: options.model + variantSuffix, - messages, - ...(options.temperature !== undefined && { - temperature: options.temperature, - }), - ...(options.maxTokens !== undefined && { - max_tokens: options.maxTokens, - }), - ...(options.topP !== undefined && { top_p: options.topP }), - ...(tools && tools.length > 0 && { tools }), - stream: true, - } as ChatCompletionCreateParamsStreaming + return content } -} -// ────────────────────────────────────────────────────────────────────────── -// Helpers: convert OpenAI Chat Completions params ↔ OpenRouter ChatRequest -// ────────────────────────────────────────────────────────────────────────── - -/** - * Convert the base's snake_case params shape to the OpenRouter SDK's - * camelCase ChatRequest. Only the fields the base actually writes need - * mapping — modelOptions already flows through in OpenRouter (camelCase) - * shape because the public option types derive from `ChatRequest`. - */ -function toOpenRouterRequest( - params: - | ChatCompletionCreateParamsStreaming - | ChatCompletionCreateParamsNonStreaming, - isStreaming: boolean, -): ChatRequest { - const p = params as Record - const out: Record = { ...p } - - // The base injects these snake_case fields. Rewrite to camelCase. - if ('max_tokens' in p) { - out.maxCompletionTokens = p.max_tokens - delete out.max_tokens - } - if ('top_p' in p) { - out.topP = p.top_p - delete out.top_p - } - if ('stream_options' in p) { - const so = p.stream_options as Record | undefined - if (so && typeof so === 'object') { - // The SDK's ChatStreamOptions schema uses camelCase keys and Zod - // strips unknowns at parse time — without this rename the base's - // include_usage flag would be silently dropped and RUN_FINISHED.usage - // would always be undefined for streaming OpenRouter calls. - const { include_usage, ...rest } = so - out.streamOptions = { - ...rest, - ...(include_usage !== undefined && { includeUsage: include_usage }), - } - } else { - out.streamOptions = so + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + protected extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' } - delete out.stream_options - } - if ('response_format' in p && p.response_format) { - const rf = p.response_format - out.responseFormat = - rf.type === 'json_schema' && rf.json_schema - ? { - type: 'json_schema', - jsonSchema: rf.json_schema, - } - : rf - delete out.response_format + if (typeof content === 'string') { + return content + } + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') } - - // Streaming flag is set per-call by the SDK call hook, not here. - delete out.stream - if (!isStreaming) delete out.streamOptions - - return out as ChatRequest } /** - * Adapt OpenRouter's stream chunks (camelCase, with `reasoningDetails`) into - * the OpenAI Chat Completions chunk shape the base's `processStreamChunks` - * reads. Reasoning text is stashed on `_reasoningText` for the - * `extractReasoning` override to consume. + * Flatten any reasoning deltas in a stream chunk into a single string. + * OpenRouter emits reasoning content via `delta.reasoningDetails`, a union of + * variants including `{ type: 'reasoning.text', text }` and + * `{ type: 'reasoning.summary', summary }`. */ -async function* adaptOpenRouterStreamChunks( - stream: AsyncIterable, -): AsyncIterable { - for await (const chunk of stream) { - // Flatten any reasoning deltas in the chunk into a single string. - let reasoningText = '' - const adaptedChoices = chunk.choices.map((c: ChatStreamChoice) => { - const delta = c.delta as Record - if (Array.isArray(delta.reasoningDetails)) { - for (const d of delta.reasoningDetails) { - if (d?.type === 'reasoning.text' && typeof d.text === 'string') { - reasoningText += d.text - } else if ( - d?.type === 'reasoning.summary' && - typeof d.summary === 'string' - ) { - reasoningText += d.summary - } - } - } - return { - index: (c as { index?: number }).index ?? 0, - delta: { - content: delta.content, - tool_calls: delta.toolCalls?.map((tc: any) => ({ - index: tc.index, - id: tc.id, - type: tc.type ?? 'function', - function: tc.function, - })), - refusal: delta.refusal, - role: delta.role, - }, - finish_reason: c.finishReason, +function extractReasoningText(chunk: ChatStreamChunk): string { + let text = '' + for (const choice of chunk.choices) { + const details = (choice.delta as { reasoningDetails?: Array }) + .reasoningDetails + if (!Array.isArray(details)) continue + for (const detail of details) { + const d = detail as { type?: string; text?: unknown; summary?: unknown } + if (d.type === 'reasoning.text' && typeof d.text === 'string') { + text += d.text + } else if ( + d.type === 'reasoning.summary' && + typeof d.summary === 'string' + ) { + text += d.summary } - }) - - const usage = (chunk as any).usage - const adapted: any = { - id: chunk.id || '', - object: 'chat.completion.chunk', - created: 0, - model: chunk.model || '', - choices: adaptedChoices, - ...(usage && { - usage: { - prompt_tokens: usage.promptTokens || 0, - completion_tokens: usage.completionTokens || 0, - total_tokens: usage.totalTokens || 0, - }, - }), - // Stash reasoning text for the extractReasoning hook. The base only - // reads documented Chat Completions fields, so an additional field is - // safe to pass alongside. - _reasoningText: reasoningText || undefined, } - - // Surface upstream errors so the base can route them to RUN_ERROR. - // `toRunErrorPayload` handles both string and finite-number codes; any - // other shape (object/array/symbol/NaN) falls through to undefined - // rather than serialising to "[object Object]". - if ((chunk as any).error) { - const errObj = (chunk as any).error - throw Object.assign( - new Error(errObj.message || 'OpenRouter stream error'), - { code: errObj.code }, - ) - } - - yield adapted as ChatCompletionChunk } + return text } export function createOpenRouterText( diff --git a/packages/typescript/ai-openrouter/src/internal/request-options.ts b/packages/typescript/ai-openrouter/src/internal/request-options.ts new file mode 100644 index 000000000..72cb5c0b1 --- /dev/null +++ b/packages/typescript/ai-openrouter/src/internal/request-options.ts @@ -0,0 +1,17 @@ +/** + * Extract `headers` and `signal` from a `Request | RequestInit` for the + * OpenRouter SDK's per-call request-options. `Request` exposes `headers` as a + * `Headers` instance (HeadersInit-compatible) while `RequestInit` exposes + * `HeadersInit` directly — this helper accepts either shape so callers don't + * need to cast. + * + * Always returns an object (possibly empty) rather than `undefined` so test + * assertions that match the second argument shape via `expect.anything()` / + * `expect.objectContaining()` keep working when no request override was set. + */ +export function extractRequestOptions( + request: Request | RequestInit | undefined, +): { headers?: HeadersInit; signal?: AbortSignal | null } { + if (!request) return {} + return { headers: request.headers, signal: request.signal ?? undefined } +} diff --git a/packages/typescript/ai-openrouter/src/internal/responses-tool-converter.ts b/packages/typescript/ai-openrouter/src/internal/responses-tool-converter.ts new file mode 100644 index 000000000..5df88fa41 --- /dev/null +++ b/packages/typescript/ai-openrouter/src/internal/responses-tool-converter.ts @@ -0,0 +1,57 @@ +import { makeStructuredOutputCompatible } from './schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' + +/** + * Responses API function tool format. + * + * Matches OpenRouter's `ResponsesRequestToolFunction` shape exactly: + * { type: 'function', name: string, description?: string, parameters: object, strict?: boolean } + */ +export interface ResponsesFunctionTool { + type: 'function' + name: string + description?: string | null + parameters: Record | null + strict: boolean | null +} + +/** + * Converts a standard Tool to the Responses API FunctionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply OpenAI-compatible transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + * + * This enables strict mode for all tools automatically. + */ +export function convertFunctionToolToResponsesFormat( + tool: Tool, + schemaConverter: ( + schema: Record, + required: Array, + ) => Record = makeStructuredOutputCompatible, +): ResponsesFunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + // Shallow-copy the converter's result before mutating — a subclass-supplied + // schemaConverter has no contract requirement to return a fresh object; + // mutating in place could corrupt the caller's tool definition. + const jsonSchema = { + ...schemaConverter(inputSchema, inputSchema.required || []), + } + jsonSchema.additionalProperties = false + + return { + type: 'function', + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + } +} diff --git a/packages/typescript/ai-openrouter/src/internal/schema-converter.ts b/packages/typescript/ai-openrouter/src/internal/schema-converter.ts new file mode 100644 index 000000000..3338770e2 --- /dev/null +++ b/packages/typescript/ai-openrouter/src/internal/schema-converter.ts @@ -0,0 +1,90 @@ +/** + * Transform a JSON schema to be compatible with OpenAI-style structured output requirements. + * The base requirements (which OpenRouter inherits because it routes to upstream OpenAI-compatible + * structured-output backends) are: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + * + * @param schema - JSON schema to transform + * @param originalRequired - Original required array (to know which fields were optional) + * @returns Transformed schema compatible with strict structured output + */ +export function makeStructuredOutputCompatible( + schema: Record, + originalRequired?: Array, +): Record { + const result = { ...schema } + const required = + originalRequired ?? (Array.isArray(result.required) ? result.required : []) + + if (result.type === 'object' && result.properties) { + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + let prop = properties[propName] + const wasOptional = !required.includes(propName) + + // Step 1: Recurse into nested structures + if (prop.type === 'object' && prop.properties) { + prop = makeStructuredOutputCompatible(prop, prop.required || []) + } else if (prop.type === 'array' && prop.items) { + prop = { + ...prop, + items: makeStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (prop.anyOf) { + prop = makeStructuredOutputCompatible(prop, prop.required || []) + } else if (prop.oneOf) { + throw new Error( + 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', + ) + } + + // Step 2: Apply null-widening for optional properties (after recursion) + if (wasOptional) { + if (prop.anyOf) { + // For anyOf, add a null variant if not already present + if (!prop.anyOf.some((v: any) => v.type === 'null')) { + prop = { ...prop, anyOf: [...prop.anyOf, { type: 'null' }] } + } + } else if (prop.type && !Array.isArray(prop.type)) { + prop = { ...prop, type: [prop.type, 'null'] } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + prop = { ...prop, type: [...prop.type, 'null'] } + } + } + + properties[propName] = prop + } + + result.properties = properties + result.required = allPropertyNames + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + if (result.anyOf && Array.isArray(result.anyOf)) { + result.anyOf = result.anyOf.map((variant) => + makeStructuredOutputCompatible(variant, variant.required || []), + ) + } + + if (result.oneOf) { + throw new Error( + 'oneOf is not supported in OpenAI structured output schemas. Check the supported outputs here: https://platform.openai.com/docs/guides/structured-outputs#supported-types', + ) + } + + return result +} diff --git a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts index ef214b8ac..b30e83c3c 100644 --- a/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts +++ b/packages/typescript/ai-openrouter/tests/openrouter-responses-adapter.test.ts @@ -196,7 +196,7 @@ describe('OpenRouter responses adapter — request shape', () => { it('rejects webSearchTool() as RUN_ERROR pointing at the chat adapter', async () => { const adapter = createAdapter() - const ws = webSearchTool() as unknown as Tool + const ws = webSearchTool() as Tool const events: Array = [] for await (const evt of adapter.chatStream({ model: 'openai/gpt-4o-mini' as any, diff --git a/packages/typescript/ai-openai-compatible/CHANGELOG.md b/packages/typescript/openai-base/CHANGELOG.md similarity index 96% rename from packages/typescript/ai-openai-compatible/CHANGELOG.md rename to packages/typescript/openai-base/CHANGELOG.md index 2350bbf06..e6aaa3e75 100644 --- a/packages/typescript/ai-openai-compatible/CHANGELOG.md +++ b/packages/typescript/openai-base/CHANGELOG.md @@ -1,4 +1,4 @@ -# @tanstack/ai-openai-compatible +# @tanstack/openai-base > Renamed from `@tanstack/openai-base` in 0.3.0. See the [README](./README.md) for context. diff --git a/packages/typescript/ai-openai-compatible/README.md b/packages/typescript/openai-base/README.md similarity index 91% rename from packages/typescript/ai-openai-compatible/README.md rename to packages/typescript/openai-base/README.md index 65d2f8db7..d981c5ec6 100644 --- a/packages/typescript/ai-openai-compatible/README.md +++ b/packages/typescript/openai-base/README.md @@ -1,4 +1,4 @@ -# @tanstack/ai-openai-compatible +# @tanstack/openai-base Shared adapters for providers that implement OpenAI's wire-format protocols. @@ -15,8 +15,8 @@ Together, Ollama's compat layer, Fireworks, and others are too. The package holds two shared base classes: -- `OpenAICompatibleChatCompletionsTextAdapter` -- `OpenAICompatibleResponsesTextAdapter` +- `OpenAIBaseChatCompletionsTextAdapter` +- `OpenAIBaseResponsesTextAdapter` Provider packages (`@tanstack/ai-openai`, `@tanstack/ai-openrouter`, `@tanstack/ai-groq`, `@tanstack/ai-grok`) subclass these and override a small @@ -46,7 +46,7 @@ originally shipped it. ## What goes here vs. in `@tanstack/ai-openai` -| Belongs in `@tanstack/ai-openai-compatible` | Belongs in `@tanstack/ai-openai` | +| Belongs in `@tanstack/openai-base` | Belongs in `@tanstack/ai-openai` | | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | | Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | @@ -93,12 +93,12 @@ because no compatible family exists. @tanstack/ai └── BaseTextAdapter (abstract — emits AG-UI events) │ - ├── @tanstack/ai-openai-compatible::OpenAICompatibleChatCompletionsTextAdapter + ├── @tanstack/openai-base::OpenAIBaseChatCompletionsTextAdapter │ ├── ai-openrouter │ ├── ai-groq │ └── ai-grok │ - ├── @tanstack/ai-openai-compatible::OpenAICompatibleResponsesTextAdapter + ├── @tanstack/openai-base::OpenAIBaseResponsesTextAdapter │ ├── ai-openai (primary text adapter — Responses is OpenAI's preferred API) │ └── ai-openrouter (beta — routes to any underlying model) │ @@ -110,7 +110,7 @@ because no compatible family exists. Note: `ai-openai` ships only the Responses-based adapter. For pure Chat Completions use cases without OpenAI-specific behaviour, use `ai-grok` (xAI's API is a direct OpenAI Chat Completions clone) or build a new -provider package extending `OpenAICompatibleChatCompletionsTextAdapter`. +provider package extending `OpenAIBaseChatCompletionsTextAdapter`. ## Direct use diff --git a/packages/typescript/ai-openai-compatible/package.json b/packages/typescript/openai-base/package.json similarity index 71% rename from packages/typescript/ai-openai-compatible/package.json rename to packages/typescript/openai-base/package.json index 7a8dd8520..8318848d9 100644 --- a/packages/typescript/ai-openai-compatible/package.json +++ b/packages/typescript/openai-base/package.json @@ -1,13 +1,13 @@ { - "name": "@tanstack/ai-openai-compatible", + "name": "@tanstack/openai-base", "version": "0.2.1", - "description": "Shared protocol adapters and utilities for OpenAI-compatible providers in TanStack AI (Chat Completions and Responses wire formats)", + "description": "Shared base adapters for OpenAI-SDK-backed providers in TanStack AI (Chat Completions and Responses)", "author": "", "license": "MIT", "repository": { "type": "git", "url": "git+https://github.com/TanStack/ai.git", - "directory": "packages/typescript/ai-openai-compatible" + "directory": "packages/typescript/openai-base" }, "type": "module", "module": "./dist/esm/index.js", @@ -35,13 +35,12 @@ "keywords": [ "ai", "openai", - "openai-compatible", "tanstack", - "adapter", - "protocol" + "adapter" ], "dependencies": { - "@tanstack/ai-utils": "workspace:*" + "@tanstack/ai-utils": "workspace:*", + "openai": "^6.9.1" }, "peerDependencies": { "@tanstack/ai": "workspace:^" @@ -49,6 +48,7 @@ "devDependencies": { "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", - "vite": "^7.2.7" + "vite": "^7.2.7", + "zod": "^4.3.0" } } diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts similarity index 95% rename from packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts rename to packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 5ef9a4b05..83b73b191 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -5,19 +5,17 @@ import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' import { extractRequestOptions } from '../utils/request-options' import { makeStructuredOutputCompatible } from '../utils/schema-converter' import { convertToolsToChatCompletionsFormat } from './chat-completions-tool-converter' +import type OpenAI from 'openai' import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' import type { - ChatCompletion, ChatCompletionChunk, - ChatCompletionChunkChoice, ChatCompletionContentPart, - ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, ChatCompletionMessageParam, -} from '../types/chat-completions' +} from 'openai/resources/chat/completions/completions' import type { ContentPart, DefaultMessageMetadataByModality, @@ -28,13 +26,12 @@ import type { } from '@tanstack/ai' /** - * Shared implementation of the OpenAI Chat Completions wire format. Holds the - * stream-accumulator + AG-UI lifecycle logic; subclasses provide the actual - * SDK calls via the abstract `callChatCompletion*` hooks. The base never - * imports the OpenAI SDK at runtime — it only borrows the SDK's TypeScript - * shapes as the canonical reference for the protocol. + * Shared implementation of the OpenAI Chat Completions API. Holds the + * stream-accumulator + AG-UI lifecycle logic and calls the OpenAI SDK + * directly. Subclasses (ai-openai, ai-grok, ai-groq) construct an OpenAI + * client with their provider-specific `baseURL` / headers and pass it in. */ -export abstract class OpenAICompatibleChatCompletionsTextAdapter< +export abstract class OpenAIBaseChatCompletionsTextAdapter< TModel extends string, TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, @@ -50,10 +47,12 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< > { readonly kind = 'text' as const readonly name: string + protected client: OpenAI - constructor(model: TModel, name: string = 'openai-compatible') { + constructor(model: TModel, name: string, client: OpenAI) { super({}, model) this.name = name + this.client = client } async *chatStream( @@ -79,7 +78,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, ) - const stream = await this.callChatCompletionStream( + const stream = await this.client.chat.completions.create( { ...requestParams, stream: true, @@ -160,7 +159,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, { provider: this.name, model: this.model }, ) - const response = await this.callChatCompletion( + const response = await this.client.chat.completions.create( { ...cleanParams, stream: false, @@ -229,27 +228,6 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< return makeStructuredOutputCompatible(schema, originalRequired) } - /** - * Performs the non-streaming Chat Completions network call. Subclasses - * implement against whatever SDK or HTTP client they bridge to. Must - * return a value structurally compatible with `ChatCompletion` — the base - * reads documented fields like `response.choices[0].message.content`. - */ - protected abstract callChatCompletion( - params: ChatCompletionCreateParamsNonStreaming, - requestOptions: ReturnType, - ): Promise - - /** - * Performs the streaming Chat Completions network call. Returns an - * `AsyncIterable`; the base's `processStreamChunks` - * only needs structural iteration over chunks. - */ - protected abstract callChatCompletionStream( - params: ChatCompletionCreateParamsStreaming, - requestOptions: ReturnType, - ): Promise> - /** * Extract reasoning content from a stream chunk. Default returns * `undefined` because the OpenAI Chat Completions chunk shape doesn't @@ -297,7 +275,7 @@ export abstract class OpenAICompatibleChatCompletionsTextAdapter< // pick up usage from the trailing chunk regardless of arrival order. let lastUsage: ChatCompletionChunk['usage'] | undefined let pendingFinishReason: - | ChatCompletionChunkChoice['finish_reason'] + | ChatCompletionChunk['choices'][number]['finish_reason'] | undefined // Track tool calls being streamed (arguments come in chunks) diff --git a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts b/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts similarity index 80% rename from packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts rename to packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts index 03b910d0b..bb691443d 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/chat-completions-tool-converter.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-tool-converter.ts @@ -1,12 +1,17 @@ import { makeStructuredOutputCompatible } from '../utils/schema-converter' -import type { ChatCompletionTool } from '../types/chat-completions' +import type { ChatCompletionTool } from 'openai/resources/chat/completions/completions' import type { JSONSchema, Tool } from '@tanstack/ai' /** - * Chat Completions API tool format. - * This is distinct from the Responses API tool format. + * Chat Completions API tool format. The SDK's `ChatCompletionTool` is the + * union `ChatCompletionFunctionTool | ChatCompletionCustomTool`; we only + * emit the function variant here. Re-exported as our own alias so consumers + * importing the converter's output don't have to reach into the SDK. */ -export type ChatCompletionFunctionTool = ChatCompletionTool +export type ChatCompletionFunctionTool = Extract< + ChatCompletionTool, + { type: 'function' } +> /** * Converts a standard Tool to OpenAI Chat Completions ChatCompletionTool format. diff --git a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts similarity index 97% rename from packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts rename to packages/typescript/openai-base/src/adapters/responses-text.ts index ef3a7fccd..609cf8087 100644 --- a/packages/typescript/ai-openai-compatible/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -5,6 +5,7 @@ import { generateId, transformNullsToUndefined } from '@tanstack/ai-utils' import { extractRequestOptions } from '../utils/request-options' import { makeStructuredOutputCompatible } from '../utils/schema-converter' import { convertToolsToResponsesFormat } from './responses-tool-converter' +import type OpenAI from 'openai' import type { StructuredOutputOptions, StructuredOutputResult, @@ -12,12 +13,10 @@ import type { import type { Response, ResponseCreateParams, - ResponseCreateParamsNonStreaming, - ResponseCreateParamsStreaming, ResponseInput, ResponseInputContent, ResponseStreamEvent, -} from '../types/responses' +} from 'openai/resources/responses/responses' import type { ContentPart, DefaultMessageMetadataByModality, @@ -28,12 +27,12 @@ import type { } from '@tanstack/ai' /** - * Shared implementation of the OpenAI Responses wire format. Holds the - * stream-event accumulator + AG-UI lifecycle; subclasses provide the actual - * SDK calls via the abstract `callResponse*` hooks. The base never imports - * the OpenAI SDK at runtime — it only borrows the SDK's TypeScript shapes. + * Shared implementation of the OpenAI Responses API. Holds the stream-event + * accumulator + AG-UI lifecycle and calls the OpenAI SDK directly. Subclasses + * (today: ai-openai) construct an OpenAI client with their provider-specific + * `baseURL` / headers and pass it in. */ -export abstract class OpenAICompatibleResponsesTextAdapter< +export abstract class OpenAIBaseResponsesTextAdapter< TModel extends string, TProviderOptions extends Record = Record, TInputModalities extends ReadonlyArray = ReadonlyArray, @@ -49,10 +48,12 @@ export abstract class OpenAICompatibleResponsesTextAdapter< > { readonly kind = 'text' as const readonly name: string + protected client: OpenAI - constructor(model: TModel, name: string = 'openai-compatible-responses') { + constructor(model: TModel, name: string, client: OpenAI) { super({}, model) this.name = name + this.client = client } async *chatStream( @@ -96,7 +97,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< `activity=chat provider=${this.name} model=${this.model} messages=${options.messages.length} tools=${options.tools?.length ?? 0} stream=true`, { provider: this.name, model: this.model }, ) - const response = await this.callResponseStream( + const response = await this.client.responses.create( { ...requestParams, stream: true, @@ -186,7 +187,7 @@ export abstract class OpenAICompatibleResponsesTextAdapter< `activity=structuredOutput provider=${this.name} model=${this.model} messages=${chatOptions.messages.length}`, { provider: this.name, model: this.model }, ) - const response = await this.callResponse( + const response = await this.client.responses.create( { ...(cleanParams as Omit), stream: false, @@ -276,27 +277,6 @@ export abstract class OpenAICompatibleResponsesTextAdapter< return transformNullsToUndefined(parsed) } - /** - * Performs the non-streaming Responses API network call. Subclasses - * implement against whatever SDK or HTTP client they bridge to. Must - * return a value structurally compatible with `Response` — the base reads - * documented fields like `response.output[...]`. - */ - protected abstract callResponse( - params: ResponseCreateParamsNonStreaming, - requestOptions: ReturnType, - ): Promise - - /** - * Performs the streaming Responses API network call. Returns an - * `AsyncIterable`; the base's `processStreamChunks` - * only needs structural iteration over events. - */ - protected abstract callResponseStream( - params: ResponseCreateParamsStreaming, - requestOptions: ReturnType, - ): Promise> - /** * Extract text content from a non-streaming Responses API response. * Override this in subclasses for provider-specific response shapes. diff --git a/packages/typescript/ai-openai-compatible/src/adapters/responses-tool-converter.ts b/packages/typescript/openai-base/src/adapters/responses-tool-converter.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/adapters/responses-tool-converter.ts rename to packages/typescript/openai-base/src/adapters/responses-tool-converter.ts diff --git a/packages/typescript/openai-base/src/index.ts b/packages/typescript/openai-base/src/index.ts new file mode 100644 index 000000000..43e149148 --- /dev/null +++ b/packages/typescript/openai-base/src/index.ts @@ -0,0 +1,14 @@ +export { makeStructuredOutputCompatible } from './utils/schema-converter' +export * from './tools/index' +export { OpenAIBaseChatCompletionsTextAdapter } from './adapters/chat-completions-text' +export { + convertFunctionToolToChatCompletionsFormat, + convertToolsToChatCompletionsFormat, + type ChatCompletionFunctionTool, +} from './adapters/chat-completions-tool-converter' +export { OpenAIBaseResponsesTextAdapter } from './adapters/responses-text' +export { + convertFunctionToolToResponsesFormat, + convertToolsToResponsesFormat, + type ResponsesFunctionTool, +} from './adapters/responses-tool-converter' diff --git a/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts b/packages/typescript/openai-base/src/tools/apply-patch-tool.ts similarity index 88% rename from packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts rename to packages/typescript/openai-base/src/tools/apply-patch-tool.ts index e26bec30c..d97594d6a 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/apply-patch-tool.ts +++ b/packages/typescript/openai-base/src/tools/apply-patch-tool.ts @@ -1,4 +1,4 @@ -import type { ApplyPatchToolConfig } from '../types/tools' +import type { ApplyPatchTool as ApplyPatchToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { ApplyPatchToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts b/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts similarity index 88% rename from packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts rename to packages/typescript/openai-base/src/tools/code-interpreter-tool.ts index 7aa265f87..448680da7 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/openai-base/src/tools/code-interpreter-tool.ts @@ -1,6 +1,8 @@ -import type { CodeInterpreterToolConfig } from '../types/tools' +import type { Tool as SDKTool } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' +type CodeInterpreterToolConfig = SDKTool.CodeInterpreter + export type { CodeInterpreterToolConfig } /** @deprecated Renamed to `CodeInterpreterToolConfig`. Will be removed in a future release. */ diff --git a/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts b/packages/typescript/openai-base/src/tools/computer-use-tool.ts similarity index 91% rename from packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts rename to packages/typescript/openai-base/src/tools/computer-use-tool.ts index d839f3756..c79481a03 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/computer-use-tool.ts +++ b/packages/typescript/openai-base/src/tools/computer-use-tool.ts @@ -1,4 +1,4 @@ -import type { ComputerUseToolConfig } from '../types/tools' +import type { ComputerTool as ComputerUseToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { ComputerUseToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts b/packages/typescript/openai-base/src/tools/custom-tool.ts similarity index 89% rename from packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts rename to packages/typescript/openai-base/src/tools/custom-tool.ts index 818eed4fa..dc6ce2732 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/custom-tool.ts +++ b/packages/typescript/openai-base/src/tools/custom-tool.ts @@ -1,4 +1,4 @@ -import type { CustomToolConfig } from '../types/tools' +import type { CustomTool as CustomToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { CustomToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts b/packages/typescript/openai-base/src/tools/file-search-tool.ts similarity index 93% rename from packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts rename to packages/typescript/openai-base/src/tools/file-search-tool.ts index 5749a8329..804b99dac 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/file-search-tool.ts +++ b/packages/typescript/openai-base/src/tools/file-search-tool.ts @@ -1,4 +1,4 @@ -import type { FileSearchToolConfig } from '../types/tools' +import type { FileSearchTool as FileSearchToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { FileSearchToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts b/packages/typescript/openai-base/src/tools/function-tool.ts similarity index 92% rename from packages/typescript/ai-openai-compatible/src/tools/function-tool.ts rename to packages/typescript/openai-base/src/tools/function-tool.ts index 28a9e7e88..1f58d229f 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/function-tool.ts +++ b/packages/typescript/openai-base/src/tools/function-tool.ts @@ -1,5 +1,5 @@ import { makeStructuredOutputCompatible } from '../utils/schema-converter' -import type { FunctionToolConfig } from '../types/tools' +import type { FunctionTool as FunctionToolConfig } from 'openai/resources/responses/responses' import type { JSONSchema, Tool } from '@tanstack/ai' export type { FunctionToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts b/packages/typescript/openai-base/src/tools/image-generation-tool.ts similarity index 92% rename from packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts rename to packages/typescript/openai-base/src/tools/image-generation-tool.ts index bb372fa10..b5150e25d 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/image-generation-tool.ts +++ b/packages/typescript/openai-base/src/tools/image-generation-tool.ts @@ -1,6 +1,8 @@ -import type { ImageGenerationToolConfig } from '../types/tools' +import type { Tool as SDKTool } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' +type ImageGenerationToolConfig = SDKTool.ImageGeneration + export type { ImageGenerationToolConfig } /** @deprecated Renamed to `ImageGenerationToolConfig`. Will be removed in a future release. */ diff --git a/packages/typescript/ai-openai-compatible/src/tools/index.ts b/packages/typescript/openai-base/src/tools/index.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/tools/index.ts rename to packages/typescript/openai-base/src/tools/index.ts diff --git a/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts b/packages/typescript/openai-base/src/tools/local-shell-tool.ts similarity index 86% rename from packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts rename to packages/typescript/openai-base/src/tools/local-shell-tool.ts index 4c1aa3f57..359c9611f 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/local-shell-tool.ts +++ b/packages/typescript/openai-base/src/tools/local-shell-tool.ts @@ -1,6 +1,8 @@ -import type { LocalShellToolConfig } from '../types/tools' +import type { Tool as SDKTool } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' +type LocalShellToolConfig = SDKTool.LocalShell + export type { LocalShellToolConfig } /** @deprecated Renamed to `LocalShellToolConfig`. Will be removed in a future release. */ diff --git a/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts b/packages/typescript/openai-base/src/tools/mcp-tool.ts similarity index 91% rename from packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts rename to packages/typescript/openai-base/src/tools/mcp-tool.ts index 5d1be45e1..26d9a59f7 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/mcp-tool.ts +++ b/packages/typescript/openai-base/src/tools/mcp-tool.ts @@ -1,6 +1,8 @@ -import type { MCPToolConfig } from '../types/tools' +import type { Tool as SDKTool } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' +type MCPToolConfig = SDKTool.Mcp + export type { MCPToolConfig } /** @deprecated Renamed to `MCPToolConfig`. Will be removed in a future release. */ diff --git a/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts b/packages/typescript/openai-base/src/tools/shell-tool.ts similarity index 87% rename from packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts rename to packages/typescript/openai-base/src/tools/shell-tool.ts index dfa0d8143..6c797d8b7 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/shell-tool.ts +++ b/packages/typescript/openai-base/src/tools/shell-tool.ts @@ -1,4 +1,4 @@ -import type { ShellToolConfig } from '../types/tools' +import type { FunctionShellTool as ShellToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { ShellToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/tool-choice.ts b/packages/typescript/openai-base/src/tools/tool-choice.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/tools/tool-choice.ts rename to packages/typescript/openai-base/src/tools/tool-choice.ts diff --git a/packages/typescript/ai-openai-compatible/src/tools/tool-converter.ts b/packages/typescript/openai-base/src/tools/tool-converter.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/tools/tool-converter.ts rename to packages/typescript/openai-base/src/tools/tool-converter.ts diff --git a/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts b/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts similarity index 91% rename from packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts rename to packages/typescript/openai-base/src/tools/web-search-preview-tool.ts index 6310afeb8..18c71525f 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/openai-base/src/tools/web-search-preview-tool.ts @@ -1,4 +1,4 @@ -import type { WebSearchPreviewToolConfig } from '../types/tools' +import type { WebSearchPreviewTool as WebSearchPreviewToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { WebSearchPreviewToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts b/packages/typescript/openai-base/src/tools/web-search-tool.ts similarity index 92% rename from packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts rename to packages/typescript/openai-base/src/tools/web-search-tool.ts index 43bfa8a6b..eb84f31d1 100644 --- a/packages/typescript/ai-openai-compatible/src/tools/web-search-tool.ts +++ b/packages/typescript/openai-base/src/tools/web-search-tool.ts @@ -1,4 +1,4 @@ -import type { WebSearchToolConfig } from '../types/tools' +import type { WebSearchTool as WebSearchToolConfig } from 'openai/resources/responses/responses' import type { Tool } from '@tanstack/ai' export type { WebSearchToolConfig } diff --git a/packages/typescript/ai-openai-compatible/src/utils/request-options.ts b/packages/typescript/openai-base/src/utils/request-options.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/utils/request-options.ts rename to packages/typescript/openai-base/src/utils/request-options.ts diff --git a/packages/typescript/ai-openai-compatible/src/utils/schema-converter.ts b/packages/typescript/openai-base/src/utils/schema-converter.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/src/utils/schema-converter.ts rename to packages/typescript/openai-base/src/utils/schema-converter.ts diff --git a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts similarity index 95% rename from packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts rename to packages/typescript/openai-base/tests/chat-completions-text.test.ts index bb8750c2c..00f77bf5a 100644 --- a/packages/typescript/ai-openai-compatible/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest' -import { OpenAICompatibleChatCompletionsTextAdapter } from '../src/adapters/chat-completions-text' +import { OpenAIBaseChatCompletionsTextAdapter } from '../src/adapters/chat-completions-text' +import type OpenAI from 'openai' import type { StreamChunk, Tool } from '@tanstack/ai' import { resolveDebugOption } from '@tanstack/ai/adapter-internals' @@ -8,28 +9,30 @@ const testLogger = resolveDebugOption(false) // Declare mockCreate at module level let mockCreate: ReturnType +/** Build a stub OpenAI client whose `chat.completions.create` defers to the + * module-level `mockCreate`. Reassigning `mockCreate` inside a test still + * takes effect because the stub looks it up at call time. */ +function makeStubClient(): OpenAI { + return { + chat: { + completions: { + create: (params: unknown, options: unknown) => + mockCreate(params, options), + }, + }, + } as unknown as OpenAI +} + /** - * Concrete test subclass — the base is abstract, so tests need a class that - * implements `callChatCompletion*`. The hooks delegate to `mockCreate` so - * each test can configure the SDK response without spinning up a real client. - * The constructor accepts (config, model, name) so test call sites read - * naturally; config is ignored since the base no longer constructs a client. + * Concrete test subclass. The base now calls the OpenAI SDK directly, so the + * subclass just supplies a stub client whose `chat.completions.create` routes + * into `mockCreate` for per-test setup. Constructor signature mirrors the + * pre-refactor `(config, model, name)` shape so existing call sites read + * naturally; `config` is ignored. */ -class TestChatCompletionsAdapter extends OpenAICompatibleChatCompletionsTextAdapter { - constructor(_config: unknown, model: string, name?: string) { - super(model, name) - } - protected async callChatCompletion( - params: any, - requestOptions: any, - ): Promise { - return mockCreate(params, requestOptions) - } - protected async callChatCompletionStream( - params: any, - requestOptions: any, - ): Promise { - return mockCreate(params, requestOptions) +class TestChatCompletionsAdapter extends OpenAIBaseChatCompletionsTextAdapter { + constructor(_config: unknown, model: string, name = 'openai-compatible') { + super(model, name, makeStubClient()) } } @@ -73,7 +76,7 @@ const weatherTool: Tool = { description: 'Return the forecast for a location', } -describe('OpenAICompatibleChatCompletionsTextAdapter', () => { +describe('OpenAIBaseChatCompletionsTextAdapter', () => { beforeEach(() => { vi.clearAllMocks() }) @@ -818,15 +821,9 @@ describe('OpenAICompatibleChatCompletionsTextAdapter', () => { describe('subclassing', () => { it('allows subclassing with custom name', () => { - class MyProviderAdapter extends OpenAICompatibleChatCompletionsTextAdapter { + class MyProviderAdapter extends OpenAIBaseChatCompletionsTextAdapter { constructor(_apiKey: string, model: string) { - super(model, 'my-provider') - } - protected async callChatCompletion(): Promise { - throw new Error('not called in this test') - } - protected async callChatCompletionStream(): Promise { - throw new Error('not called in this test') + super(model, 'my-provider', makeStubClient()) } } diff --git a/packages/typescript/ai-openai-compatible/tests/mcp-tool.test.ts b/packages/typescript/openai-base/tests/mcp-tool.test.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/tests/mcp-tool.test.ts rename to packages/typescript/openai-base/tests/mcp-tool.test.ts diff --git a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts similarity index 97% rename from packages/typescript/ai-openai-compatible/tests/responses-text.test.ts rename to packages/typescript/openai-base/tests/responses-text.test.ts index fdfa3990a..6a024dedf 100644 --- a/packages/typescript/ai-openai-compatible/tests/responses-text.test.ts +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -1,5 +1,6 @@ import { describe, it, expect, vi, afterEach, beforeEach } from 'vitest' -import { OpenAICompatibleResponsesTextAdapter } from '../src/adapters/responses-text' +import { OpenAIBaseResponsesTextAdapter } from '../src/adapters/responses-text' +import type OpenAI from 'openai' import type { StreamChunk, Tool } from '@tanstack/ai' import { resolveDebugOption } from '@tanstack/ai/adapter-internals' @@ -8,23 +9,26 @@ const testLogger = resolveDebugOption(false) // Declare mockCreate at module level let mockResponsesCreate: ReturnType +/** Build a stub OpenAI client whose `responses.create` defers to the + * module-level `mockResponsesCreate`. Reassigning the mock inside a test + * still takes effect because the stub looks it up at call time. */ +function makeStubClient(): OpenAI { + return { + responses: { + create: (params: unknown, options: unknown) => + mockResponsesCreate(params, options), + }, + } as unknown as OpenAI +} + /** - * Concrete test subclass — the base is abstract, so tests need a class that - * implements `callResponse*`. Hooks delegate to `mockResponsesCreate` so - * each test can configure the SDK response without spinning up a real client. + * Concrete test subclass. The base now calls the OpenAI SDK directly, so the + * subclass just supplies a stub client whose `responses.create` routes into + * `mockResponsesCreate` for per-test setup. */ -class TestResponsesAdapter extends OpenAICompatibleResponsesTextAdapter { - constructor(_config: unknown, model: string, name?: string) { - super(model, name) - } - protected async callResponse(params: any, requestOptions: any): Promise { - return mockResponsesCreate(params, requestOptions) - } - protected async callResponseStream( - params: any, - requestOptions: any, - ): Promise { - return mockResponsesCreate(params, requestOptions) +class TestResponsesAdapter extends OpenAIBaseResponsesTextAdapter { + constructor(_config: unknown, model: string, name = 'openai-compatible-responses') { + super(model, name, makeStubClient()) } } @@ -68,7 +72,7 @@ const weatherTool: Tool = { description: 'Return the forecast for a location', } -describe('OpenAICompatibleResponsesTextAdapter', () => { +describe('OpenAIBaseResponsesTextAdapter', () => { beforeEach(() => { vi.clearAllMocks() }) @@ -1928,15 +1932,9 @@ describe('OpenAICompatibleResponsesTextAdapter', () => { describe('subclassing', () => { it('allows subclassing with custom name', () => { - class MyProviderAdapter extends OpenAICompatibleResponsesTextAdapter { + class MyProviderAdapter extends OpenAIBaseResponsesTextAdapter { constructor(_apiKey: string, model: string) { - super(model, 'my-provider') - } - protected async callResponse(): Promise { - throw new Error('not called in this test') - } - protected async callResponseStream(): Promise { - throw new Error('not called in this test') + super(model, 'my-provider', makeStubClient()) } } diff --git a/packages/typescript/ai-openai-compatible/tests/schema-converter.test.ts b/packages/typescript/openai-base/tests/schema-converter.test.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/tests/schema-converter.test.ts rename to packages/typescript/openai-base/tests/schema-converter.test.ts diff --git a/packages/typescript/ai-openai-compatible/tsconfig.json b/packages/typescript/openai-base/tsconfig.json similarity index 100% rename from packages/typescript/ai-openai-compatible/tsconfig.json rename to packages/typescript/openai-base/tsconfig.json diff --git a/packages/typescript/ai-openai-compatible/vite.config.ts b/packages/typescript/openai-base/vite.config.ts similarity index 100% rename from packages/typescript/ai-openai-compatible/vite.config.ts rename to packages/typescript/openai-base/vite.config.ts diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3d589e600..f18560fd9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1201,12 +1201,12 @@ importers: packages/typescript/ai-grok: dependencies: - '@tanstack/ai-openai-compatible': - specifier: workspace:* - version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base openai: specifier: ^6.9.1 version: 6.10.0(ws@8.19.0)(zod@4.3.6) @@ -1229,12 +1229,12 @@ importers: packages/typescript/ai-groq: dependencies: - '@tanstack/ai-openai-compatible': - specifier: workspace:* - version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base openai: specifier: ^6.9.1 version: 6.10.0(ws@8.19.0)(zod@4.3.6) @@ -1315,15 +1315,15 @@ importers: packages/typescript/ai-openai: dependencies: - '@tanstack/ai-openai-compatible': - specifier: workspace:* - version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils + '@tanstack/openai-base': + specifier: workspace:* + version: link:../openai-base openai: specifier: ^6.9.1 - version: 6.10.0(ws@8.19.0)(zod@4.2.1) + version: 6.10.0(ws@8.19.0)(zod@4.3.6) devDependencies: '@tanstack/ai': specifier: workspace:* @@ -1338,33 +1338,14 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) zod: - specifier: ^4.2.0 - version: 4.2.1 - - packages/typescript/ai-openai-compatible: - dependencies: - '@tanstack/ai-utils': - specifier: workspace:* - version: link:../ai-utils - devDependencies: - '@tanstack/ai': - specifier: workspace:* - version: link:../ai - '@vitest/coverage-v8': - specifier: 4.0.14 - version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - vite: - specifier: ^7.2.7 - version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + specifier: ^4.3.0 + version: 4.3.6 packages/typescript/ai-openrouter: dependencies: '@openrouter/sdk': specifier: 0.12.14 version: 0.12.14 - '@tanstack/ai-openai-compatible': - specifier: workspace:* - version: link:../ai-openai-compatible '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils @@ -1663,6 +1644,28 @@ importers: specifier: ^2.2.10 version: 2.2.12(typescript@5.9.3) + packages/typescript/openai-base: + dependencies: + '@tanstack/ai-utils': + specifier: workspace:* + version: link:../ai-utils + openai: + specifier: ^6.9.1 + version: 6.10.0(ws@8.19.0)(zod@4.3.6) + devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + zod: + specifier: ^4.3.0 + version: 4.3.6 + packages/typescript/preact-ai-devtools: dependencies: '@tanstack/ai-devtools-core': @@ -21288,11 +21291,6 @@ snapshots: is-docker: 2.2.1 is-wsl: 2.2.0 - openai@6.10.0(ws@8.19.0)(zod@4.2.1): - optionalDependencies: - ws: 8.19.0 - zod: 4.2.1 - openai@6.10.0(ws@8.19.0)(zod@4.3.6): optionalDependencies: ws: 8.19.0 From b5668973857467ea7b0b06f81aea02c79b0a9747 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 10:37:07 +0000 Subject: [PATCH 37/49] ci: apply automated fixes --- .../src/adapters/responses-text.ts | 29 +++++++++++-------- .../ai-openrouter/src/adapters/text.ts | 28 ++++++++++-------- packages/typescript/openai-base/README.md | 2 +- .../openai-base/tests/responses-text.test.ts | 6 +++- 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 1e791747e..3ade50cf5 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -1261,7 +1261,9 @@ export class OpenRouterResponsesTextAdapter< return result } - protected convertContentPartToInput(part: ContentPart): ResponsesInputContent { + protected convertContentPartToInput( + part: ContentPart, + ): ResponsesInputContent { switch (part.type) { case 'text': return { @@ -1410,12 +1412,10 @@ function normalizeStreamEvent(event: StreamEvents): NormalizedStreamEvent { if ('content_index' in raw) out.contentIndex = raw.content_index if ('sequence_number' in raw) out.sequenceNumber = raw.sequence_number if ('summary_index' in raw) out.summaryIndex = raw.summary_index - if ( - 'response' in raw && - raw.response && - typeof raw.response === 'object' - ) { - out.response = camelCaseResponseShape(raw.response as Record) + if ('response' in raw && raw.response && typeof raw.response === 'object') { + out.response = camelCaseResponseShape( + raw.response as Record, + ) } if ('item' in raw && raw.item && typeof raw.item === 'object') { out.item = camelCaseOutputItem(raw.item as Record) @@ -1435,17 +1435,22 @@ function camelCaseResponseShape( src: Record, ): Record { const out: Record = { ...src } - if ('incomplete_details' in src) out.incompleteDetails = src.incomplete_details - if ('input_tokens' in src || 'output_tokens' in src || 'total_tokens' in src) { + if ('incomplete_details' in src) + out.incompleteDetails = src.incomplete_details + if ( + 'input_tokens' in src || + 'output_tokens' in src || + 'total_tokens' in src + ) { // never mutate src; rewrite usage in place if present. } if (src.usage && typeof src.usage === 'object') { const u = src.usage as Record out.usage = { ...u, - ...(('input_tokens' in u) && { inputTokens: u.input_tokens }), - ...(('output_tokens' in u) && { outputTokens: u.output_tokens }), - ...(('total_tokens' in u) && { totalTokens: u.total_tokens }), + ...('input_tokens' in u && { inputTokens: u.input_tokens }), + ...('output_tokens' in u && { outputTokens: u.output_tokens }), + ...('total_tokens' in u && { totalTokens: u.total_tokens }), } } if (Array.isArray(src.output)) { diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 50ba6a5a2..f913a9bfc 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -719,17 +719,20 @@ export class OpenRouterTextAdapter< // Use `tool_calls` only when a TOOL_CALL_END was actually emitted. // OpenRouter emits 'error' as a finish reason for upstream errors; // collapse to 'content_filter' (the closest AG-UI equivalent). - const finishReason: 'tool_calls' | 'length' | 'content_filter' | 'stop' = - emittedAnyToolCallEnd - ? 'tool_calls' - : pendingFinishReason === 'tool_calls' - ? 'stop' - : pendingFinishReason === 'length' - ? 'length' - : pendingFinishReason === 'content_filter' || - pendingFinishReason === 'error' - ? 'content_filter' - : 'stop' + const finishReason: + | 'tool_calls' + | 'length' + | 'content_filter' + | 'stop' = emittedAnyToolCallEnd + ? 'tool_calls' + : pendingFinishReason === 'tool_calls' + ? 'stop' + : pendingFinishReason === 'length' + ? 'length' + : pendingFinishReason === 'content_filter' || + pendingFinishReason === 'error' + ? 'content_filter' + : 'stop' yield { type: EventType.RUN_FINISHED, @@ -815,7 +818,8 @@ export class OpenRouterTextAdapter< maxCompletionTokens: options.maxTokens, }), ...(options.topP !== undefined && { topP: options.topP }), - ...(tools && tools.length > 0 && { tools: tools as ChatRequest['tools'] }), + ...(tools && + tools.length > 0 && { tools: tools as ChatRequest['tools'] }), } as Omit } diff --git a/packages/typescript/openai-base/README.md b/packages/typescript/openai-base/README.md index d981c5ec6..897be381a 100644 --- a/packages/typescript/openai-base/README.md +++ b/packages/typescript/openai-base/README.md @@ -46,7 +46,7 @@ originally shipped it. ## What goes here vs. in `@tanstack/ai-openai` -| Belongs in `@tanstack/openai-base` | Belongs in `@tanstack/ai-openai` | +| Belongs in `@tanstack/openai-base` | Belongs in `@tanstack/ai-openai` | | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | | Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts index 6a024dedf..16613a662 100644 --- a/packages/typescript/openai-base/tests/responses-text.test.ts +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -27,7 +27,11 @@ function makeStubClient(): OpenAI { * `mockResponsesCreate` for per-test setup. */ class TestResponsesAdapter extends OpenAIBaseResponsesTextAdapter { - constructor(_config: unknown, model: string, name = 'openai-compatible-responses') { + constructor( + _config: unknown, + model: string, + name = 'openai-compatible-responses', + ) { super(model, name, makeStubClient()) } } From 20e8397f57d4fa2c510e4eb622ecd3d95c65cd72 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Wed, 13 May 2026 20:41:45 +1000 Subject: [PATCH 38/49] Corrected package versions --- packages/typescript/ai-grok/package.json | 2 +- packages/typescript/ai-groq/package.json | 3 +-- packages/typescript/ai-openai/package.json | 4 ++-- packages/typescript/openai-base/package.json | 2 +- pnpm-lock.yaml | 10 +++++----- 5 files changed, 10 insertions(+), 11 deletions(-) diff --git a/packages/typescript/ai-grok/package.json b/packages/typescript/ai-grok/package.json index 46f1a7b6f..559b8f740 100644 --- a/packages/typescript/ai-grok/package.json +++ b/packages/typescript/ai-grok/package.json @@ -44,8 +44,8 @@ "adapter" ], "dependencies": { - "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "openai": "^6.9.1" }, "devDependencies": { diff --git a/packages/typescript/ai-groq/package.json b/packages/typescript/ai-groq/package.json index 408e44e17..dee3e860e 100644 --- a/packages/typescript/ai-groq/package.json +++ b/packages/typescript/ai-groq/package.json @@ -43,7 +43,6 @@ "adapter" ], "devDependencies": { - "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7" }, @@ -52,8 +51,8 @@ "zod": "^4.0.0" }, "dependencies": { - "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "openai": "^6.9.1" } } diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index d81885287..9a3a24895 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -44,8 +44,8 @@ "adapter" ], "dependencies": { - "@tanstack/openai-base": "workspace:*", "@tanstack/ai-utils": "workspace:*", + "@tanstack/openai-base": "workspace:*", "openai": "^6.9.1" }, "peerDependencies": { @@ -58,6 +58,6 @@ "@tanstack/ai-client": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7", - "zod": "^4.3.0" + "zod": "^4.2.0" } } diff --git a/packages/typescript/openai-base/package.json b/packages/typescript/openai-base/package.json index 8318848d9..e60b54b07 100644 --- a/packages/typescript/openai-base/package.json +++ b/packages/typescript/openai-base/package.json @@ -49,6 +49,6 @@ "@tanstack/ai": "workspace:*", "@vitest/coverage-v8": "4.0.14", "vite": "^7.2.7", - "zod": "^4.3.0" + "zod": "^4.2.0" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f18560fd9..241c2f9ca 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1229,6 +1229,9 @@ importers: packages/typescript/ai-groq: dependencies: + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai '@tanstack/ai-utils': specifier: workspace:* version: link:../ai-utils @@ -1242,9 +1245,6 @@ importers: specifier: ^4.0.0 version: 4.3.6 devDependencies: - '@tanstack/ai': - specifier: workspace:* - version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@opentelemetry/api@1.9.1)(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.9))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) @@ -1338,7 +1338,7 @@ importers: specifier: ^7.2.7 version: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) zod: - specifier: ^4.3.0 + specifier: ^4.2.0 version: 4.3.6 packages/typescript/ai-openrouter: @@ -1663,7 +1663,7 @@ importers: specifier: ^7.2.7 version: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) zod: - specifier: ^4.3.0 + specifier: ^4.2.0 version: 4.3.6 packages/typescript/preact-ai-devtools: From 44db9257825960000d05baabf1588e1f07645870 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Wed, 13 May 2026 19:49:09 +0200 Subject: [PATCH 39/49] =?UTF-8?q?refactor(adapters):=20address=20PR=20revi?= =?UTF-8?q?ew=20=E2=80=94=20jsdoc,=20casts,=20zod,=20finishReason?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apply review feedback from PR #545: - Restore JSDoc removed during the openai-base media/summarize refactor (26 blocks across ai-openai, ai-grok, ai-anthropic, ai-gemini, ai-openrouter adapters). Only restore where the documented symbol still exists post-refactor; skip JSDoc tied to removed classes / provider- options interfaces. - Drop `as` casts on stream chunks in ai-openrouter (responses-text.ts output_item.{added,done} handlers, response.completed handler) by typing `NormalizedStreamEvent.item` as the SDK's `OutputItems` discriminated union and `.response` as `Partial`. Discriminated- union narrowing now works without bypass. - Drop request-builder casts in ai-openrouter/{text,responses-text}.ts: `as InputsItem`, `as ChatMessages`, `as ChatContentItems`, `as ResponsesRequest['tools' | 'text' | 'input']`, `as Omit`, `as Record` on modelOptions spread. - Drop SDK-return casts `as AsyncIterable` / `as AsyncIterable` — `EventStream` already is `AsyncIterable`. - Drop `tool as Tool` in the webSearchTool guard — `Tool` is assignable to `Tool` directly. - Remove `'function_call'` from RunFinishedEvent.finishReason union. Normalize OpenAI's legacy v1 function_call termination to `tool_calls` inside chat-completions-text — the SDK-vocabulary value no longer leaks into the public AG-UI type. - Drop redundant `satisfies StreamChunk` from yield/array-element sites across adapters and ai-client tests. The contextual type from `AsyncIterable` / `Array` already validates every emission; the suffix added no extra safety. - Annotate the `ev.*` builders in ai/tests/test-utils.ts with explicit return types (RunStartedEvent, TextMessageStartEvent, …) instead of `satisfies StreamChunk`. Each builder now returns the precise event variant rather than the wide union. - Drop zod from ai-openrouter peerDependencies — no source imports zod; it's only used in tests, where it stays as a devDep. (OpenRouter SDK already declares zod as a regular dep, so transitive consumers aren't affected.) - Clean up mid-PR rename leftovers: stale "openai-compatible adapters" jsdoc in ai-openai/utils/client.ts, and `'openai-compatible'` / `'openai-compatible-responses'` default-name strings in the openai-base test subclasses (now `openai-base` / `openai-base-responses`). --- .../ai-anthropic/src/adapters/summarize.ts | 17 +- .../ai-anthropic/src/adapters/text.ts | 52 +++--- .../ai-client/tests/chat-client-abort.test.ts | 24 +-- .../ai-client/tests/chat-client.test.ts | 8 +- .../tests/connection-adapters.test.ts | 12 +- .../ai-client/tests/generation-client.test.ts | 44 ++--- .../tests/video-generation-client.test.ts | 62 +++--- .../ai-gemini/src/adapters/summarize.ts | 3 + .../typescript/ai-gemini/src/adapters/text.ts | 46 ++--- .../typescript/ai-grok/src/adapters/image.ts | 53 +++++- .../ai-grok/src/adapters/summarize.ts | 29 ++- .../typescript/ai-ollama/src/adapters/text.ts | 34 ++-- .../ai-openai/src/adapters/image.ts | 48 +++++ .../ai-openai/src/adapters/summarize.ts | 28 ++- .../ai-openai/src/adapters/transcription.ts | 62 +++++- .../typescript/ai-openai/src/adapters/tts.ts | 54 +++++- .../ai-openai/src/adapters/video.ts | 65 ++++++- .../typescript/ai-openai/src/utils/client.ts | 4 +- .../typescript/ai-openrouter/package.json | 3 +- .../src/adapters/responses-text.ts | 133 ++++++------- .../ai-openrouter/src/adapters/summarize.ts | 26 ++- .../ai-openrouter/src/adapters/text.ts | 82 ++++---- .../activities/stream-generation-result.ts | 8 +- packages/typescript/ai/src/types.ts | 8 +- packages/typescript/ai/tests/test-utils.ts | 176 +++++++++--------- .../src/adapters/chat-completions-text.ts | 67 ++++--- .../src/adapters/responses-text.ts | 62 +++--- .../tests/chat-completions-text.test.ts | 4 +- .../openai-base/tests/responses-text.test.ts | 8 +- 29 files changed, 780 insertions(+), 442 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts index b9febd09f..917176f44 100644 --- a/packages/typescript/ai-anthropic/src/adapters/summarize.ts +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -5,12 +5,22 @@ import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { ANTHROPIC_MODELS } from '../model-meta' import type { AnthropicClientConfig } from '../utils' +/** + * Configuration for Anthropic summarize adapter + */ export interface AnthropicSummarizeConfig extends AnthropicClientConfig {} +/** Model type for Anthropic summarization */ export type AnthropicSummarizeModel = (typeof ANTHROPIC_MODELS)[number] /** * Creates an Anthropic summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') + * @param apiKey - Your Anthropic API key + * @param config - Optional additional configuration + * @returns Configured Anthropic summarize adapter instance with resolved types * * @example * ```typescript @@ -35,7 +45,12 @@ export function createAnthropicSummarize< } /** - * Creates an Anthropic summarize adapter with API key from `ANTHROPIC_API_KEY`. + * Creates an Anthropic summarize adapter with automatic API key detection. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'claude-sonnet-4-5', 'claude-3-5-haiku-latest') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Anthropic summarize adapter instance with resolved types * * @example * ```typescript diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 92d19b8f0..7cea1d023 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -183,7 +183,7 @@ export class AnthropicTextAdapter< message: err.message || 'Unknown error occurred', code: err.code || String(err.status), }, - } satisfies StreamChunk + } } } @@ -658,7 +658,7 @@ export class AnthropicTextAdapter< threadId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } if (event.type === 'content_block_start') { @@ -684,14 +684,14 @@ export class AnthropicTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP events (kept during transition) yield { @@ -701,7 +701,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } } else if (event.type === 'content_block_delta') { if (event.delta.type === 'text_delta') { @@ -713,13 +713,13 @@ export class AnthropicTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit TEXT_MESSAGE_START on first text content @@ -731,7 +731,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } const delta = event.delta.text @@ -743,7 +743,7 @@ export class AnthropicTextAdapter< timestamp: Date.now(), delta, content: accumulatedContent, - } satisfies StreamChunk + } } else if (event.delta.type === 'thinking_delta') { const delta = event.delta.thinking accumulatedThinking += delta @@ -755,7 +755,7 @@ export class AnthropicTextAdapter< delta, model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP event yield { @@ -766,7 +766,7 @@ export class AnthropicTextAdapter< timestamp: Date.now(), delta, content: accumulatedThinking, - } satisfies StreamChunk + } } else if ( (event.delta as { type: string }).type === 'signature_delta' ) { @@ -786,7 +786,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), index: currentToolIndex, - } satisfies StreamChunk + } } existing.input += event.delta.partial_json @@ -798,7 +798,7 @@ export class AnthropicTextAdapter< timestamp: Date.now(), delta: event.delta.partial_json, args: existing.input, - } satisfies StreamChunk + } } } } else if (event.type === 'content_block_stop') { @@ -814,7 +814,7 @@ export class AnthropicTextAdapter< delta: '', content: accumulatedThinking, signature: accumulatedSignature, - } satisfies StreamChunk + } } } else if (currentBlockType === 'tool_use') { const existing = toolCallsMap.get(currentToolIndex) @@ -830,7 +830,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), index: currentToolIndex, - } satisfies StreamChunk + } } // Emit TOOL_CALL_END @@ -850,7 +850,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls hasEmittedTextMessageStart = false @@ -863,7 +863,7 @@ export class AnthropicTextAdapter< messageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } } currentBlockType = null @@ -876,13 +876,13 @@ export class AnthropicTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Only emit RUN_FINISHED from message_stop if message_delta didn't already emit one. @@ -896,7 +896,7 @@ export class AnthropicTextAdapter< model, timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } } } else if (event.type === 'message_delta') { if (event.delta.stop_reason) { @@ -910,13 +910,13 @@ export class AnthropicTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } switch (event.delta.stop_reason) { @@ -935,7 +935,7 @@ export class AnthropicTextAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - } satisfies StreamChunk + } break } case 'max_tokens': { @@ -951,7 +951,7 @@ export class AnthropicTextAdapter< 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', }, - } satisfies StreamChunk + } break } default: { @@ -969,7 +969,7 @@ export class AnthropicTextAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - } satisfies StreamChunk + } } } } @@ -992,7 +992,7 @@ export class AnthropicTextAdapter< message: err.message || 'Unknown error occurred', code: err.code || String(err.status), }, - } satisfies StreamChunk + } } } } diff --git a/packages/typescript/ai-client/tests/chat-client-abort.test.ts b/packages/typescript/ai-client/tests/chat-client-abort.test.ts index 882d6d471..7cf7bab18 100644 --- a/packages/typescript/ai-client/tests/chat-client-abort.test.ts +++ b/packages/typescript/ai-client/tests/chat-client-abort.test.ts @@ -24,7 +24,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } yield { type: EventType.TEXT_MESSAGE_CONTENT, messageId: '1', @@ -32,7 +32,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: ' World', content: 'Hello World', - } satisfies StreamChunk + } yield { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -40,7 +40,7 @@ describe('ChatClient - Abort Signal Handling', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }, } }) @@ -87,7 +87,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } // Simulate long-running stream await new Promise((resolve) => setTimeout(resolve, 100)) yield { @@ -97,7 +97,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: ' World', content: 'Hello World', - } satisfies StreamChunk + } } catch (err) { // Abort errors are expected if (err instanceof Error && err.name === 'AbortError') { @@ -142,7 +142,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } yieldedChunks++ if (abortSignal?.aborted) { @@ -156,7 +156,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: ' World', content: 'Hello World', - } satisfies StreamChunk + } yieldedChunks++ }, } @@ -199,7 +199,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } if (abortSignal?.aborted) { return @@ -239,7 +239,7 @@ describe('ChatClient - Abort Signal Handling', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } await new Promise((resolve) => setTimeout(resolve, 50)) }, } @@ -281,7 +281,7 @@ describe('ChatClient - Abort Signal Handling', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }, } @@ -325,7 +325,7 @@ describe('ChatClient - Abort Signal Handling', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }, } @@ -374,7 +374,7 @@ describe('ChatClient - Abort Signal Handling', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }, } diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index ff933bd01..cc2b2ed75 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -474,7 +474,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } satisfies StreamChunk + } } await new Promise((resolve) => { const onAbort = () => resolve() @@ -518,7 +518,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } satisfies StreamChunk + } } await new Promise((resolve) => { const onAbort = () => resolve() @@ -820,7 +820,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } satisfies StreamChunk + } await new Promise((resolve) => setTimeout(resolve, 10)) } throw new Error('subscription failed') @@ -2059,7 +2059,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'tool_calls' as const, - } satisfies StreamChunk + } } else if (streamCount === 3) { // Third stream (after second approval): final text response const chunks = createTextChunks('All done!') diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 263f3600d..f1511aa5f 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -793,7 +793,7 @@ describe('connection-adapters', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } }) const adapter = stream(streamFactory) @@ -818,7 +818,7 @@ describe('connection-adapters', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }) const adapter = stream(streamFactory) @@ -879,7 +879,7 @@ describe('connection-adapters', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } satisfies StreamChunk + } }) const adapter = normalizeConnectionAdapter(base) @@ -938,7 +938,7 @@ describe('connection-adapters', () => { error: { message: 'already failed', }, - } satisfies StreamChunk + } throw new Error('connect exploded') }) @@ -978,7 +978,7 @@ describe('connection-adapters', () => { timestamp: Date.now(), delta: 'Hello', content: 'Hello', - } satisfies StreamChunk + } }) const adapter = rpcStream(rpcCall) @@ -1007,7 +1007,7 @@ describe('connection-adapters', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } satisfies StreamChunk + } }) const adapter = rpcStream(rpcCall) diff --git a/packages/typescript/ai-client/tests/generation-client.test.ts b/packages/typescript/ai-client/tests/generation-client.test.ts index 4ce69353c..895e9f067 100644 --- a/packages/typescript/ai-client/tests/generation-client.test.ts +++ b/packages/typescript/ai-client/tests/generation-client.test.ts @@ -135,20 +135,20 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', value: mockResult, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient({ @@ -172,14 +172,14 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_ERROR, message: 'Generation failed', runId: 'run-1', error: { message: 'Generation failed' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient({ @@ -203,26 +203,26 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:progress', value: { progress: 50, message: 'Halfway' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient({ @@ -279,14 +279,14 @@ describe('GenerationClient', () => { name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.RUN_FINISHED as const, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - } satisfies StreamChunk + } }) const connection: ConnectConnectionAdapter = { @@ -358,7 +358,7 @@ describe('GenerationClient', () => { threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - } satisfies StreamChunk + } }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -390,7 +390,7 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk + } // Wait until abort is triggered await new Promise((resolve) => { signal?.addEventListener('abort', () => resolve()) @@ -402,7 +402,7 @@ describe('GenerationClient', () => { name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - } satisfies StreamChunk + } }, } @@ -489,14 +489,14 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient({ @@ -520,20 +520,20 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'unknown:event', value: { foo: 'bar' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient({ @@ -631,20 +631,20 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', value: { id: '1', images: [] }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new GenerationClient< diff --git a/packages/typescript/ai-client/tests/video-generation-client.test.ts b/packages/typescript/ai-client/tests/video-generation-client.test.ts index a0aa267f0..449e6ec3d 100644 --- a/packages/typescript/ai-client/tests/video-generation-client.test.ts +++ b/packages/typescript/ai-client/tests/video-generation-client.test.ts @@ -147,13 +147,13 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:status', @@ -163,7 +163,7 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:status', @@ -173,7 +173,7 @@ describe('VideoGenerationClient', () => { progress: 100, }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', @@ -183,14 +183,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -223,7 +223,7 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:status', @@ -233,7 +233,7 @@ describe('VideoGenerationClient', () => { progress: 25, }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', @@ -243,14 +243,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -282,14 +282,14 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_ERROR, message: 'Video generation failed', runId: 'run-1', error: { message: 'Video generation failed' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -313,7 +313,7 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:status', @@ -323,14 +323,14 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -352,20 +352,20 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:progress', value: { progress: 75, message: 'Almost done' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -387,7 +387,7 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', @@ -397,14 +397,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -425,7 +425,7 @@ describe('VideoGenerationClient', () => { threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - } satisfies StreamChunk + } }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -484,13 +484,13 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'video:status', @@ -500,7 +500,7 @@ describe('VideoGenerationClient', () => { progress: 50, }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.CUSTOM, name: 'generation:result', @@ -510,14 +510,14 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - } satisfies StreamChunk, + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk, + }, ]) const client = new VideoGenerationClient({ @@ -549,7 +549,7 @@ describe('VideoGenerationClient', () => { threadId: 'thread-1', finishReason: 'stop' as const, timestamp: Date.now(), - } satisfies StreamChunk + } }) const connection: ConnectConnectionAdapter = { connect: connectSpy } @@ -582,13 +582,13 @@ describe('VideoGenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.CUSTOM as const, name: 'video:job:created', value: { jobId: 'job-123' }, timestamp: Date.now(), - } satisfies StreamChunk + } // Wait until abort is triggered await new Promise((resolve) => { signal?.addEventListener('abort', () => resolve()) @@ -604,7 +604,7 @@ describe('VideoGenerationClient', () => { url: 'https://example.com/video.mp4', }, timestamp: Date.now(), - } satisfies StreamChunk + } }, } diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index 2fc39a63f..74c653a93 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -5,6 +5,9 @@ import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { GEMINI_MODELS } from '../model-meta' import type { GeminiClientConfig } from '../utils' +/** + * Configuration for Gemini summarize adapter + */ export interface GeminiSummarizeConfig extends GeminiClientConfig {} export type GeminiSummarizeModel = (typeof GEMINI_MODELS)[number] diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index e52a74859..66d6716c0 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -150,7 +150,7 @@ export class GeminiTextAdapter< ? error.message : 'An unknown error occurred during the chat stream.', }, - } satisfies StreamChunk + } } } @@ -271,7 +271,7 @@ export class GeminiTextAdapter< threadId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } if (chunk.candidates?.[0]?.content?.parts) { @@ -292,14 +292,14 @@ export class GeminiTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP events (kept during transition) yield { @@ -309,7 +309,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedThinking += part.text @@ -321,7 +321,7 @@ export class GeminiTextAdapter< delta: part.text, model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP event yield { @@ -332,7 +332,7 @@ export class GeminiTextAdapter< timestamp: Date.now(), delta: part.text, content: accumulatedThinking, - } satisfies StreamChunk + } } else if (part.text.trim()) { // Close reasoning before text starts if (reasoningMessageId && !hasClosedReasoning) { @@ -342,13 +342,13 @@ export class GeminiTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Skip whitespace-only text parts (e.g. "\n" during auto-continuation) @@ -361,7 +361,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += part.text @@ -372,7 +372,7 @@ export class GeminiTextAdapter< timestamp: Date.now(), delta: part.text, content: accumulatedContent, - } satisfies StreamChunk + } } } @@ -437,7 +437,7 @@ export class GeminiTextAdapter< thoughtSignature: toolCallData.thoughtSignature, } satisfies GeminiToolCallMetadata, }), - } satisfies StreamChunk + } } // Emit TOOL_CALL_ARGS @@ -448,7 +448,7 @@ export class GeminiTextAdapter< timestamp: Date.now(), delta: toolCallData.args, args: toolCallData.args, - } satisfies StreamChunk + } } } } else if (chunk.data && chunk.data.trim()) { @@ -462,7 +462,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += chunk.data @@ -473,7 +473,7 @@ export class GeminiTextAdapter< timestamp: Date.now(), delta: chunk.data, content: accumulatedContent, - } satisfies StreamChunk + } } if (chunk.candidates?.[0]?.finishReason) { @@ -510,7 +510,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), index: nextToolIndex - 1, - } satisfies StreamChunk + } // Emit TOOL_CALL_END with parsed input let parsedInput: unknown = {} @@ -533,7 +533,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } } } } @@ -557,7 +557,7 @@ export class GeminiTextAdapter< model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } } // Reset so a new TEXT_MESSAGE_START is emitted if text follows tool calls @@ -579,7 +579,7 @@ export class GeminiTextAdapter< 'The response was cut off because the maximum token limit was reached.', code: 'max_tokens', }, - } satisfies StreamChunk + } } // Close reasoning events if still open @@ -590,13 +590,13 @@ export class GeminiTextAdapter< messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit TEXT_MESSAGE_END if we had text content @@ -606,7 +606,7 @@ export class GeminiTextAdapter< messageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } } yield { @@ -623,7 +623,7 @@ export class GeminiTextAdapter< totalTokens: chunk.usageMetadata.totalTokenCount ?? 0, } : undefined, - } satisfies StreamChunk + } } } } diff --git a/packages/typescript/ai-grok/src/adapters/image.ts b/packages/typescript/ai-grok/src/adapters/image.ts index 13e17e8ca..625a2a9a9 100644 --- a/packages/typescript/ai-grok/src/adapters/image.ts +++ b/packages/typescript/ai-grok/src/adapters/image.ts @@ -28,7 +28,15 @@ import type { GrokClientConfig } from '../utils' export interface GrokImageConfig extends GrokClientConfig {} /** - * Grok Image Generation Adapter. Supports grok-2-image-1212. + * Grok Image Generation Adapter + * + * Tree-shakeable adapter for Grok image generation functionality. + * Supports grok-2-image-1212 model. + * + * Features: + * - Model-specific type-safe provider options + * - Size validation per model + * - Number of images validation */ export class GrokImageAdapter< TModel extends GrokImageModel, @@ -110,6 +118,25 @@ export class GrokImageAdapter< } } +/** + * Creates a Grok image adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok image adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createGrokImage('grok-2-image-1212', "xai-..."); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A cute baby sea otter' + * }); + * ``` + */ export function createGrokImage( model: TModel, apiKey: string, @@ -118,6 +145,30 @@ export function createGrokImage( return new GrokImageAdapter({ apiKey, ...config }, model) } +/** + * Creates a Grok image adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-2-image-1212') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok image adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses XAI_API_KEY from environment + * const adapter = grokImage('grok-2-image-1212'); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A beautiful sunset over mountains' + * }); + * ``` + */ export function grokImage( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-grok/src/adapters/summarize.ts b/packages/typescript/ai-grok/src/adapters/summarize.ts index 0177d2983..01040acd9 100644 --- a/packages/typescript/ai-grok/src/adapters/summarize.ts +++ b/packages/typescript/ai-grok/src/adapters/summarize.ts @@ -5,12 +5,22 @@ import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { GROK_CHAT_MODELS } from '../model-meta' import type { GrokClientConfig } from '../utils' +/** + * Configuration for Grok summarize adapter + */ export interface GrokSummarizeConfig extends GrokClientConfig {} +/** Model type for Grok summarization */ export type GrokSummarizeModel = (typeof GROK_CHAT_MODELS)[number] /** * Creates a Grok summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param apiKey - Your xAI API key + * @param config - Optional additional configuration + * @returns Configured Grok summarize adapter instance with resolved types * * @example * ```typescript @@ -33,12 +43,27 @@ export function createGrokSummarize( } /** - * Creates a Grok summarize adapter with API key from `XAI_API_KEY`. + * Creates a Grok summarize adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `XAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'grok-3', 'grok-4') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured Grok summarize adapter instance with resolved types + * @throws Error if XAI_API_KEY is not found in environment * * @example * ```typescript + * // Automatically uses XAI_API_KEY from environment * const adapter = grokSummarize('grok-3'); - * await summarize({ adapter, text: "Long article text..." }); + * + * await summarize({ + * adapter, + * text: "Long article text..." + * }); * ``` */ export function grokSummarize( diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 907418330..b06c4036e 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -248,7 +248,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< threadId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } } const handleToolCall = (toolCall: ToolCall): Array => { @@ -271,7 +271,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp: Date.now(), index: actualToolCall.function.index, - } satisfies StreamChunk) + }) } // Serialize arguments to a string for the TOOL_CALL_ARGS event @@ -295,7 +295,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< timestamp: Date.now(), delta: argsStr, args: argsStr, - } satisfies StreamChunk) + }) // Emit TOOL_CALL_END events.push({ @@ -306,7 +306,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk) + }) return events } @@ -329,13 +329,13 @@ export class OllamaTextAdapter extends BaseTextAdapter< messageId: reasoningMessageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit TEXT_MESSAGE_END if we had text content @@ -345,7 +345,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< messageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } } yield { @@ -361,7 +361,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< totalTokens: (chunk.prompt_eval_count || 0) + (chunk.eval_count || 0), }, - } satisfies StreamChunk + } continue } @@ -374,13 +374,13 @@ export class OllamaTextAdapter extends BaseTextAdapter< messageId: reasoningMessageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit TEXT_MESSAGE_START on first text content @@ -392,7 +392,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += chunk.message.content @@ -403,7 +403,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< timestamp: Date.now(), delta: chunk.message.content, content: accumulatedContent, - } satisfies StreamChunk + } } if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) { @@ -428,14 +428,14 @@ export class OllamaTextAdapter extends BaseTextAdapter< messageId: reasoningMessageId, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP events (kept during transition) yield { @@ -445,7 +445,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += chunk.message.thinking @@ -457,7 +457,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< delta: chunk.message.thinking, model: chunk.model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP event yield { @@ -468,7 +468,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< timestamp: Date.now(), delta: chunk.message.thinking, content: accumulatedReasoning, - } satisfies StreamChunk + } } } } diff --git a/packages/typescript/ai-openai/src/adapters/image.ts b/packages/typescript/ai-openai/src/adapters/image.ts index 2a5159e50..41da352c5 100644 --- a/packages/typescript/ai-openai/src/adapters/image.ts +++ b/packages/typescript/ai-openai/src/adapters/image.ts @@ -32,6 +32,11 @@ export interface OpenAIImageConfig extends OpenAIClientConfig {} * * Tree-shakeable adapter for OpenAI image generation functionality. * Supports gpt-image-1, gpt-image-1-mini, dall-e-3, and dall-e-2 models. + * + * Features: + * - Model-specific type-safe provider options + * - Size validation per model + * - Number of images validation */ export class OpenAIImageAdapter< TModel extends OpenAIImageModel, @@ -115,6 +120,25 @@ export class OpenAIImageAdapter< } } +/** + * Creates an OpenAI image adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'dall-e-3', 'gpt-image-1') + * @param apiKey - Your OpenAI API key + * @param config - Optional additional configuration + * @returns Configured OpenAI image adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createOpenaiImage('dall-e-3', "sk-..."); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A cute baby sea otter' + * }); + * ``` + */ export function createOpenaiImage( model: TModel, apiKey: string, @@ -123,6 +147,30 @@ export function createOpenaiImage( return new OpenAIImageAdapter({ apiKey, ...config }, model) } +/** + * Creates an OpenAI image adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'dall-e-3', 'gpt-image-1') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenAI image adapter instance with resolved types + * @throws Error if OPENAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses OPENAI_API_KEY from environment + * const adapter = openaiImage('dall-e-3'); + * + * const result = await generateImage({ + * adapter, + * prompt: 'A beautiful sunset over mountains' + * }); + * ``` + */ export function openaiImage( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/summarize.ts b/packages/typescript/ai-openai/src/adapters/summarize.ts index 64dd71a8c..e95597d3d 100644 --- a/packages/typescript/ai-openai/src/adapters/summarize.ts +++ b/packages/typescript/ai-openai/src/adapters/summarize.ts @@ -5,10 +5,19 @@ import type { InferTextProviderOptions } from '@tanstack/ai/adapters' import type { OpenAIChatModel } from '../model-meta' import type { OpenAIClientConfig } from '../utils/client' +/** + * Configuration for OpenAI summarize adapter + */ export interface OpenAISummarizeConfig extends OpenAIClientConfig {} /** * Creates an OpenAI summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'gpt-4o-mini', 'gpt-4o') + * @param apiKey - Your OpenAI API key + * @param config - Optional additional configuration + * @returns Configured OpenAI summarize adapter instance with resolved types * * @example * ```typescript @@ -31,12 +40,27 @@ export function createOpenaiSummarize( } /** - * Creates an OpenAI summarize adapter with API key from `OPENAI_API_KEY`. + * Creates an OpenAI summarize adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'gpt-4o-mini', 'gpt-4o') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenAI summarize adapter instance with resolved types + * @throws Error if OPENAI_API_KEY is not found in environment * * @example * ```typescript + * // Automatically uses OPENAI_API_KEY from environment * const adapter = openaiSummarize('gpt-4o-mini'); - * await summarize({ adapter, text: "Long article text..." }); + * + * await summarize({ + * adapter, + * text: "Long article text..." + * }); * ``` */ export function openaiSummarize( diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index 556aa0f8a..736c32a37 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -19,9 +19,19 @@ import type { OpenAIClientConfig } from '../utils/client' export interface OpenAITranscriptionConfig extends OpenAIClientConfig {} /** - * OpenAI Transcription (Speech-to-Text) Adapter. - * Supports whisper-1 and gpt-4o-transcribe* models. Verbose JSON output - * (timestamps + segments) only available on whisper-1. + * OpenAI Transcription (Speech-to-Text) Adapter + * + * Tree-shakeable adapter for OpenAI audio transcription functionality. + * Supports whisper-1, gpt-4o-transcribe, gpt-4o-mini-transcribe, and gpt-4o-transcribe-diarize models. + * + * Features: + * - Multiple transcription models with different capabilities + * - Language detection or specification + * - Multiple output formats: json, text, srt, verbose_json, vtt + * - Word and segment-level timestamps (with verbose_json — whisper-1 only; + * gpt-4o-* transcribe models accept only json/text and reject verbose_json + * with HTTP 400) + * - Speaker diarization (with gpt-4o-transcribe-diarize) */ export class OpenAITranscriptionAdapter< TModel extends OpenAITranscriptionModel, @@ -165,6 +175,26 @@ export class OpenAITranscriptionAdapter< } } +/** + * Creates an OpenAI transcription adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'whisper-1') + * @param apiKey - Your OpenAI API key + * @param config - Optional additional configuration + * @returns Configured OpenAI transcription adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createOpenaiTranscription('whisper-1', "sk-..."); + * + * const result = await generateTranscription({ + * adapter, + * audio: audioFile, + * language: 'en' + * }); + * ``` + */ export function createOpenaiTranscription< TModel extends OpenAITranscriptionModel, >( @@ -175,6 +205,32 @@ export function createOpenaiTranscription< return new OpenAITranscriptionAdapter({ apiKey, ...config }, model) } +/** + * Creates an OpenAI transcription adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'whisper-1') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenAI transcription adapter instance with resolved types + * @throws Error if OPENAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses OPENAI_API_KEY from environment + * const adapter = openaiTranscription('whisper-1'); + * + * const result = await generateTranscription({ + * adapter, + * audio: audioFile + * }); + * + * console.log(result.text) + * ``` + */ export function openaiTranscription( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/tts.ts b/packages/typescript/ai-openai/src/adapters/tts.ts index 382d0096c..34fd0574e 100644 --- a/packages/typescript/ai-openai/src/adapters/tts.ts +++ b/packages/typescript/ai-openai/src/adapters/tts.ts @@ -22,9 +22,13 @@ export interface OpenAITTSConfig extends OpenAIClientConfig {} /** * OpenAI Text-to-Speech Adapter * + * Tree-shakeable adapter for OpenAI TTS functionality. * Supports tts-1, tts-1-hd, and gpt-4o-audio-preview models. - * Voices: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, verse. - * Formats: mp3, opus, aac, flac, wav, pcm. Speed 0.25 to 4.0. + * + * Features: + * - Multiple voice options: alloy, ash, ballad, coral, echo, fable, onyx, nova, sage, shimmer, verse + * - Multiple output formats: mp3, opus, aac, flac, wav, pcm + * - Speed control (0.25 to 4.0) */ export class OpenAITTSAdapter< TModel extends OpenAITTSModel, @@ -107,6 +111,26 @@ export class OpenAITTSAdapter< } } +/** + * Creates an OpenAI speech adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'tts-1', 'tts-1-hd') + * @param apiKey - Your OpenAI API key + * @param config - Optional additional configuration + * @returns Configured OpenAI speech adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createOpenaiSpeech('tts-1-hd', "sk-..."); + * + * const result = await generateSpeech({ + * adapter, + * text: 'Hello, world!', + * voice: 'nova' + * }); + * ``` + */ export function createOpenaiSpeech( model: TModel, apiKey: string, @@ -115,6 +139,32 @@ export function createOpenaiSpeech( return new OpenAITTSAdapter({ apiKey, ...config }, model) } +/** + * Creates an OpenAI speech adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'tts-1', 'tts-1-hd') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenAI speech adapter instance with resolved types + * @throws Error if OPENAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses OPENAI_API_KEY from environment + * const adapter = openaiSpeech('tts-1'); + * + * const result = await generateSpeech({ + * adapter, + * text: 'Welcome to TanStack AI!', + * voice: 'alloy', + * format: 'mp3' + * }); + * ``` + */ export function openaiSpeech( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/adapters/video.ts b/packages/typescript/ai-openai/src/adapters/video.ts index 133f204f9..805d8e6da 100644 --- a/packages/typescript/ai-openai/src/adapters/video.ts +++ b/packages/typescript/ai-openai/src/adapters/video.ts @@ -49,9 +49,18 @@ function warnIfLargeMediaBuffer(byteLength: number, source: string): void { export interface OpenAIVideoConfig extends OpenAIClientConfig {} /** - * OpenAI Video Generation Adapter (Sora-2). Job/polling architecture. + * OpenAI Video Generation Adapter + * + * Tree-shakeable adapter for OpenAI video generation functionality using Sora-2. + * Uses a jobs/polling architecture for async video generation. * * @experimental Video generation is an experimental feature and may change. + * + * Features: + * - Async job-based video generation + * - Status polling for job progress + * - URL retrieval for completed videos + * - Model-specific type-safe provider options */ export class OpenAIVideoAdapter< TModel extends OpenAIVideoModel, @@ -303,6 +312,27 @@ export class OpenAIVideoAdapter< } } +/** + * Creates an OpenAI video adapter with an explicit API key. + * Type resolution happens here at the call site. + * + * @experimental Video generation is an experimental feature and may change. + * + * @param model - The model name (e.g., 'sora-2') + * @param apiKey - Your OpenAI API key + * @param config - Optional additional configuration + * @returns Configured OpenAI video adapter instance with resolved types + * + * @example + * ```typescript + * const adapter = createOpenaiVideo('sora-2', 'your-api-key'); + * + * const { jobId } = await generateVideo({ + * adapter, + * prompt: 'A beautiful sunset over the ocean' + * }); + * ``` + */ export function createOpenaiVideo( model: TModel, apiKey: string, @@ -311,6 +341,39 @@ export function createOpenaiVideo( return new OpenAIVideoAdapter({ apiKey, ...config }, model) } +/** + * Creates an OpenAI video adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENAI_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @experimental Video generation is an experimental feature and may change. + * + * @param model - The model name (e.g., 'sora-2') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenAI video adapter instance with resolved types + * @throws Error if OPENAI_API_KEY is not found in environment + * + * @example + * ```typescript + * // Automatically uses OPENAI_API_KEY from environment + * const adapter = openaiVideo('sora-2'); + * + * // Create a video generation job + * const { jobId } = await generateVideo({ + * adapter, + * prompt: 'A cat playing piano' + * }); + * + * // Poll for status + * const status = await getVideoJobStatus({ + * adapter, + * jobId + * }); + * ``` + */ export function openaiVideo( model: TModel, config?: Omit, diff --git a/packages/typescript/ai-openai/src/utils/client.ts b/packages/typescript/ai-openai/src/utils/client.ts index b8ef4a06c..a64b02b93 100644 --- a/packages/typescript/ai-openai/src/utils/client.ts +++ b/packages/typescript/ai-openai/src/utils/client.ts @@ -3,8 +3,8 @@ import type { ClientOptions } from 'openai' /** * OpenAI client configuration. Pass through to `new OpenAI(...)`. `apiKey` - * is required so the openai-compatible adapters don't need to handle a - * missing-key case at construction time. + * is required so the OpenAI adapters don't need to handle a missing-key + * case at construction time. */ export interface OpenAIClientConfig extends Omit { apiKey: string diff --git a/packages/typescript/ai-openrouter/package.json b/packages/typescript/ai-openrouter/package.json index 892a80cbd..a8c82d4ec 100644 --- a/packages/typescript/ai-openrouter/package.json +++ b/packages/typescript/ai-openrouter/package.json @@ -53,7 +53,6 @@ "zod": "^4.2.0" }, "peerDependencies": { - "@tanstack/ai": "workspace:^", - "zod": "^4.0.0" + "@tanstack/ai": "workspace:^" } } diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 3ade50cf5..3e118b620 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -13,6 +13,7 @@ import type { ResponsesFunctionTool } from '../internal/responses-tool-converter import type { InputsUnion, OpenResponsesResult, + OutputItems, ResponsesRequest, StreamEvents, } from '@openrouter/sdk/models' @@ -25,7 +26,6 @@ import type { ModelMessage, StreamChunk, TextOptions, - Tool, } from '@tanstack/ai' import type { ExternalResponsesProviderOptions } from '../text/responses-provider-options' import type { @@ -133,13 +133,13 @@ export class OpenRouterResponsesTextAdapter< { provider: this.name, model: this.model }, ) const reqOptions = extractRequestOptions(options.request) - const response = (await this.orClient.beta.responses.send( + const response = await this.orClient.beta.responses.send( { responsesRequest: { ...responsesRequest, stream: true } }, { signal: reqOptions.signal ?? undefined, ...(reqOptions.headers && { headers: reqOptions.headers }), }, - )) as AsyncIterable + ) yield* this.processStreamChunks( response, @@ -164,7 +164,7 @@ export class OpenRouterResponsesTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } yield { @@ -174,7 +174,7 @@ export class OpenRouterResponsesTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -216,7 +216,7 @@ export class OpenRouterResponsesTextAdapter< schema: jsonSchema, strict: true, }, - } as ResponsesRequest['text'], + }, }, }, { @@ -397,7 +397,7 @@ export class OpenRouterResponsesTextAdapter< threadId: aguiState.threadId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } const handleContentPart = (contentPart: { @@ -414,7 +414,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedContent, - } satisfies StreamChunk + } } if (contentPart.type === 'reasoning_text') { @@ -432,7 +432,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedReasoning, - } satisfies StreamChunk + } } // Either a real refusal or an unknown content_part type. Surface // the part type in the error so unknown parts are debuggable @@ -449,7 +449,7 @@ export class OpenRouterResponsesTextAdapter< message, code, error: { message, code }, - } satisfies StreamChunk + } } // Capture model metadata from any of these events. @@ -485,7 +485,7 @@ export class OpenRouterResponsesTextAdapter< messageId: aguiState.messageId, model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } const r = (chunk.response ?? {}) as { @@ -512,7 +512,7 @@ export class OpenRouterResponsesTextAdapter< message: errorMessage, ...(errorCode !== undefined && { code: errorCode }), }, - } satisfies StreamChunk + } runFinishedEmitted = true return } @@ -534,7 +534,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += textDelta @@ -546,7 +546,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), delta: textDelta, content: accumulatedContent, - } satisfies StreamChunk + } } } @@ -569,7 +569,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += reasoningDelta @@ -583,7 +583,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), delta: reasoningDelta, content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -606,7 +606,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += summaryDelta @@ -620,7 +620,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), delta: summaryDelta, content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -642,7 +642,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { hasEmittedStepStarted = true @@ -654,7 +654,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } if (contentPart.type === 'output_text') { hasStreamedContentDeltas = true @@ -701,7 +701,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } else if ( contentPart.type === 'reasoning_text' && !hasEmittedStepStarted @@ -715,7 +715,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } const doneChunk = handleContentPart(contentPart) @@ -728,20 +728,16 @@ export class OpenRouterResponsesTextAdapter< // handle output_item.added to capture function call metadata (name) if (chunk.type === 'response.output_item.added') { - const item = chunk.item as { - type: string - id?: string - name?: string - } - if (item.type === 'function_call' && item.id) { + const item = chunk.item + if (item?.type === 'function_call' && item.id) { const existing = toolCallMetadata.get(item.id) if (!existing) { toolCallMetadata.set(item.id, { index: chunk.outputIndex ?? 0, - name: item.name || '', + name: item.name, started: false, }) - } else if (!existing.name && item.name) { + } else if (!existing.name) { existing.name = item.name } const metadata = toolCallMetadata.get(item.id)! @@ -754,7 +750,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: chunk.outputIndex ?? 0, - } satisfies StreamChunk + } metadata.started = true } } @@ -784,7 +780,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), delta: typeof chunk.delta === 'string' ? chunk.delta : '', - } satisfies StreamChunk + } } if (chunk.type === 'response.function_call_arguments.done') { @@ -840,27 +836,22 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } } // `output_item.done` is the last point at which a function_call's // name is guaranteed to be on the wire. if (chunk.type === 'response.output_item.done') { - const item = chunk.item as { - type: string - id?: string - name?: string - arguments?: string - } - if (item.type === 'function_call' && item.id) { + const item = chunk.item + if (item?.type === 'function_call' && item.id) { const metadata = toolCallMetadata.get(item.id) ?? { index: chunk.outputIndex ?? 0, - name: item.name || '', + name: item.name, started: false, } if (!toolCallMetadata.has(item.id)) { toolCallMetadata.set(item.id, metadata) - } else if (!metadata.name && item.name) { + } else if (!metadata.name) { metadata.name = item.name } if (!metadata.started && metadata.name) { @@ -872,7 +863,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: metadata.index, - } satisfies StreamChunk + } metadata.started = true } const rawArgs = @@ -912,7 +903,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } metadata.ended = true metadata.pendingArguments = undefined } @@ -920,27 +911,13 @@ export class OpenRouterResponsesTextAdapter< } if (chunk.type === 'response.completed') { - const responseObj = (chunk.response ?? {}) as { - output?: ReadonlyArray - usage?: { - inputTokens?: number - outputTokens?: number - totalTokens?: number - } | null - incompleteDetails?: { reason?: string } | null - } + const responseObj = chunk.response ?? {} const outputItems = Array.isArray(responseObj.output) ? responseObj.output : [] // Final backstop for function_call lifecycle. - for (const rawItem of outputItems) { - const item = rawItem as { - type?: string - id?: string - name?: string - arguments?: string - } + for (const item of outputItems) { if (item.type !== 'function_call' || !item.id) continue const metadata = toolCallMetadata.get(item.id) ?? { index: 0, @@ -961,7 +938,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: metadata.index, - } satisfies StreamChunk + } metadata.started = true } const rawArgs = @@ -1001,7 +978,7 @@ export class OpenRouterResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } metadata.ended = true metadata.pendingArguments = undefined } @@ -1013,7 +990,7 @@ export class OpenRouterResponsesTextAdapter< messageId: aguiState.messageId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } @@ -1045,7 +1022,7 @@ export class OpenRouterResponsesTextAdapter< totalTokens: responseObj.usage?.totalTokens || 0, }, finishReason, - } satisfies StreamChunk + } runFinishedEmitted = true } @@ -1061,7 +1038,7 @@ export class OpenRouterResponsesTextAdapter< message: chunk.message ?? '', ...(code !== undefined && { code }), }, - } satisfies StreamChunk + } runFinishedEmitted = true return } @@ -1076,7 +1053,7 @@ export class OpenRouterResponsesTextAdapter< messageId: aguiState.messageId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } yield { type: EventType.RUN_FINISHED, @@ -1086,7 +1063,7 @@ export class OpenRouterResponsesTextAdapter< timestamp: Date.now(), usage: undefined, finishReason: toolCallMetadata.size > 0 ? 'tool_calls' : 'stop', - } satisfies StreamChunk + } } } catch (error: unknown) { const errorPayload = toRunErrorPayload( @@ -1104,7 +1081,7 @@ export class OpenRouterResponsesTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } } } @@ -1117,7 +1094,7 @@ export class OpenRouterResponsesTextAdapter< // Fail loud on webSearchTool() — v1 only routes function tools. if (options.tools) { for (const tool of options.tools) { - if (isWebSearchTool(tool as Tool)) { + if (isWebSearchTool(tool)) { throw new Error( `OpenRouterResponsesTextAdapter does not yet support webSearchTool(). ` + `Use the chat-completions adapter (openRouterText) for web search ` + @@ -1175,10 +1152,10 @@ export class OpenRouterResponsesTextAdapter< options.systemPrompts.length > 0 && { instructions: options.systemPrompts.join('\n'), }), - input: input as ResponsesRequest['input'], + input, ...(tools && tools.length > 0 && { - tools: tools as ResponsesRequest['tools'], + tools, }), } @@ -1204,7 +1181,7 @@ export class OpenRouterResponsesTextAdapter< typeof message.content === 'string' ? message.content : this.extractTextContent(message.content), - } as InputsItem) + }) continue } @@ -1221,7 +1198,7 @@ export class OpenRouterResponsesTextAdapter< id: toolCall.id, name: toolCall.function.name, arguments: argumentsString, - } as InputsItem) + }) } } @@ -1232,7 +1209,7 @@ export class OpenRouterResponsesTextAdapter< type: 'message', role: 'assistant', content: contentStr, - } as InputsItem) + }) } } continue @@ -1255,7 +1232,7 @@ export class OpenRouterResponsesTextAdapter< type: 'message', role: 'user', content: inputContent, - } as InputsItem) + }) } return result @@ -1373,8 +1350,10 @@ interface NormalizedStreamEvent { code?: unknown param?: string | null sequenceNumber?: number - response?: unknown - item?: unknown + /** camelCased copy of the `response` payload from `response.{completed,failed,incomplete}` events. */ + response?: Partial + /** SDK discriminated union — narrow with `item.type === ''`. */ + item?: OutputItems part?: unknown } diff --git a/packages/typescript/ai-openrouter/src/adapters/summarize.ts b/packages/typescript/ai-openrouter/src/adapters/summarize.ts index 21004335d..11d69d136 100644 --- a/packages/typescript/ai-openrouter/src/adapters/summarize.ts +++ b/packages/typescript/ai-openrouter/src/adapters/summarize.ts @@ -20,6 +20,12 @@ export interface OpenRouterSummarizeConfig extends OpenRouterConfig { /** * Creates an OpenRouter summarize adapter with explicit API key. + * Type resolution happens here at the call site. + * + * @param model - The model name (e.g., 'openai/gpt-4o-mini', 'anthropic/claude-3-5-sonnet') + * @param apiKey - Your OpenRouter API key + * @param config - Optional additional configuration + * @returns Configured OpenRouter summarize adapter instance with resolved types * * @example * ```typescript @@ -42,13 +48,27 @@ export function createOpenRouterSummarize( } /** - * Creates an OpenRouter summarize adapter with API key from - * `OPENROUTER_API_KEY` in `process.env` (Node) or `window.env` (browser). + * Creates an OpenRouter summarize adapter with automatic API key detection from environment variables. + * Type resolution happens here at the call site. + * + * Looks for `OPENROUTER_API_KEY` in: + * - `process.env` (Node.js) + * - `window.env` (Browser with injected env) + * + * @param model - The model name (e.g., 'openai/gpt-4o-mini', 'anthropic/claude-3-5-sonnet') + * @param config - Optional configuration (excluding apiKey which is auto-detected) + * @returns Configured OpenRouter summarize adapter instance with resolved types + * @throws Error if OPENROUTER_API_KEY is not found in environment * * @example * ```typescript + * // Automatically uses OPENROUTER_API_KEY from environment * const adapter = openRouterSummarize('openai/gpt-4o-mini'); - * await summarize({ adapter, text: 'Long article text...' }); + * + * await summarize({ + * adapter, + * text: "Long article text..." + * }); * ``` */ export function openRouterSummarize( diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index f913a9bfc..a32a894da 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -124,7 +124,7 @@ export class OpenRouterTextAdapter< { provider: this.name, model: this.model }, ) const reqOptions = extractRequestOptions(options.request) - const stream = (await this.orClient.chat.send( + const stream = await this.orClient.chat.send( { chatRequest: { ...chatRequest, @@ -139,7 +139,7 @@ export class OpenRouterTextAdapter< signal: reqOptions.signal ?? undefined, ...(reqOptions.headers && { headers: reqOptions.headers }), }, - )) as AsyncIterable + ) yield* this.processStreamChunks(stream, options, aguiState) } catch (error: unknown) { @@ -159,7 +159,7 @@ export class OpenRouterTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit AG-UI RUN_ERROR @@ -170,7 +170,7 @@ export class OpenRouterTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -392,7 +392,7 @@ export class OpenRouterTextAdapter< threadId: aguiState.threadId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Reasoning content (OpenRouter emits this as `delta.reasoningDetails`). @@ -408,14 +408,14 @@ export class OpenRouterTextAdapter< messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP_STARTED (single emission, paired with the // STEP_FINISHED below when reasoning closes). yield { @@ -425,7 +425,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += reasoningText yield { @@ -434,7 +434,7 @@ export class OpenRouterTextAdapter< delta: reasoningText, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } const choice = chunk.choices[0] @@ -456,13 +456,13 @@ export class OpenRouterTextAdapter< messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } if (stepId) { yield { type: EventType.STEP_FINISHED, @@ -471,7 +471,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -484,7 +484,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += deltaContent @@ -497,7 +497,7 @@ export class OpenRouterTextAdapter< timestamp: Date.now(), delta: deltaContent, content: accumulatedContent, - } satisfies StreamChunk + } } // Handle tool calls - they come in as deltas (camelCase toolCalls) @@ -539,7 +539,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), index, - } satisfies StreamChunk + } } // Emit TOOL_CALL_ARGS for argument deltas @@ -550,7 +550,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), delta: toolCallDelta.function.arguments, - } satisfies StreamChunk + } } } } @@ -609,7 +609,7 @@ export class OpenRouterTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } emittedAnyToolCallEnd = true } // Clear tool-call state after emission so a subsequent @@ -625,7 +625,7 @@ export class OpenRouterTextAdapter< messageId: aguiState.messageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } @@ -670,7 +670,7 @@ export class OpenRouterTextAdapter< model: lastModel || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } emittedAnyToolCallEnd = true } toolCallsInProgress.clear() @@ -683,7 +683,7 @@ export class OpenRouterTextAdapter< messageId: aguiState.messageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Close any reasoning lifecycle that text never closed (no text @@ -695,13 +695,13 @@ export class OpenRouterTextAdapter< messageId: reasoningMessageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } if (stepId) { yield { type: EventType.STEP_FINISHED, @@ -710,7 +710,7 @@ export class OpenRouterTextAdapter< model: lastModel || options.model, timestamp: Date.now(), content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -748,7 +748,7 @@ export class OpenRouterTextAdapter< } : undefined, finishReason, - } satisfies StreamChunk + } } } catch (error: unknown) { // Narrow before logging: raw SDK errors can carry request metadata @@ -770,7 +770,7 @@ export class OpenRouterTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } } } @@ -794,7 +794,7 @@ export class OpenRouterTextAdapter< messages.push({ role: 'system', content: options.systemPrompts.join('\n'), - } as ChatMessages) + }) } for (const m of options.messages) { messages.push(this.convertMessage(m)) @@ -807,8 +807,8 @@ export class OpenRouterTextAdapter< // Spread modelOptions first so explicit top-level options (set below) win // when defined but `undefined` doesn't clobber values the caller set in // modelOptions. - return { - ...(modelOptions as Record), + const request: Omit = { + ...modelOptions, model: options.model + variantSuffix, messages, ...(options.temperature !== undefined && { @@ -818,9 +818,9 @@ export class OpenRouterTextAdapter< maxCompletionTokens: options.maxTokens, }), ...(options.topP !== undefined && { topP: options.topP }), - ...(tools && - tools.length > 0 && { tools: tools as ChatRequest['tools'] }), - } as Omit + ...(tools && tools.length > 0 && { tools }), + } + return request } /** @@ -841,7 +841,7 @@ export class OpenRouterTextAdapter< ? message.content : this.extractTextContent(message.content), toolCallId: message.toolCallId || '', - } as ChatMessages + } } if (message.role === 'assistant') { @@ -872,7 +872,7 @@ export class OpenRouterTextAdapter< role: 'assistant', content: hasToolCalls && !textContent ? null : textContent, toolCalls, - } as ChatMessages + } } // user — fail loud on empty and unsupported content. Silently sending an @@ -891,7 +891,7 @@ export class OpenRouterTextAdapter< return { role: 'user', content: text, - } as ChatMessages + } } const parts: Array = [] @@ -916,14 +916,14 @@ export class OpenRouterTextAdapter< return { role: 'user', content: parts, - } as ChatMessages + } } /** OpenRouter content-part converter (camelCase imageUrl/inputAudio/videoUrl). */ protected convertContentPart(part: ContentPart): ChatContentItems | null { switch (part.type) { case 'text': - return { type: 'text', text: part.content } as ChatContentItems + return { type: 'text', text: part.content } case 'image': { const meta = part.metadata as OpenRouterImageMetadata | undefined const value = part.source.value @@ -939,7 +939,7 @@ export class OpenRouterTextAdapter< return { type: 'image_url', imageUrl: { url, detail: meta?.detail || 'auto' }, - } as ChatContentItems + } } case 'audio': // OpenRouter's chat-completions `input_audio` shape carries @@ -953,17 +953,17 @@ export class OpenRouterTextAdapter< return { type: 'text', text: `[Audio: ${part.source.value}]`, - } as ChatContentItems + } } return { type: 'input_audio', inputAudio: { data: part.source.value, format: 'mp3' }, - } as ChatContentItems + } case 'video': return { type: 'video_url', videoUrl: { url: part.source.value }, - } as ChatContentItems + } case 'document': // The chat-completions SDK has no document_url type. For URL // sources, surface a text reference so the model at least sees @@ -984,7 +984,7 @@ export class OpenRouterTextAdapter< return { type: 'text', text: `[Document: ${part.source.value}]`, - } as ChatContentItems + } default: return null } diff --git a/packages/typescript/ai/src/activities/stream-generation-result.ts b/packages/typescript/ai/src/activities/stream-generation-result.ts index deeacda8f..9b83a85bd 100644 --- a/packages/typescript/ai/src/activities/stream-generation-result.ts +++ b/packages/typescript/ai/src/activities/stream-generation-result.ts @@ -34,7 +34,7 @@ export async function* streamGenerationResult( runId, threadId, timestamp: Date.now(), - } satisfies StreamChunk + } try { const result = await generator() @@ -44,7 +44,7 @@ export async function* streamGenerationResult( name: 'generation:result', value: result as unknown, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.RUN_FINISHED, @@ -52,7 +52,7 @@ export async function* streamGenerationResult( threadId, finishReason: 'stop', timestamp: Date.now(), - } satisfies StreamChunk + } } catch (error: unknown) { const payload = toRunErrorPayload(error, 'Generation failed') yield { @@ -62,6 +62,6 @@ export async function* streamGenerationResult( // Deprecated nested form for backward compatibility error: payload, timestamp: Date.now(), - } satisfies StreamChunk + } } } diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index f66522b3a..c240e2578 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -830,13 +830,7 @@ export interface RunFinishedEvent extends AGUIRunFinishedEvent { /** Model identifier for multi-model support */ model?: string /** Why the generation stopped */ - finishReason?: - | 'stop' - | 'length' - | 'content_filter' - | 'tool_calls' - | 'function_call' - | null + finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null /** Token usage statistics */ usage?: { promptTokens: number diff --git a/packages/typescript/ai/tests/test-utils.ts b/packages/typescript/ai/tests/test-utils.ts index 365d4f80c..978c38387 100644 --- a/packages/typescript/ai/tests/test-utils.ts +++ b/packages/typescript/ai/tests/test-utils.ts @@ -1,6 +1,20 @@ import { EventType } from '../src/types' import type { AnyTextAdapter } from '../src/activities/chat/adapter' -import type { StreamChunk, TextMessageContentEvent, Tool } from '../src/types' +import type { + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + StepFinishedEvent, + StepStartedEvent, + StreamChunk, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + Tool, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallStartEvent, +} from '../src/types' // ============================================================================ // Chunk factory @@ -22,62 +36,62 @@ export function chunk( /** Shorthand chunk factories for common AG-UI events. */ export const ev = { - runStarted: (runId = 'run-1', threadId = 'thread-1') => - ({ - type: EventType.RUN_STARTED, - runId, - threadId, - timestamp: Date.now(), - }) satisfies StreamChunk, - textStart: (messageId = 'msg-1') => - ({ - type: EventType.TEXT_MESSAGE_START, - messageId, - role: 'assistant', - timestamp: Date.now(), - }) satisfies StreamChunk, - textContent: (delta: string, messageId = 'msg-1') => - ({ - type: EventType.TEXT_MESSAGE_CONTENT, - messageId, - delta, - timestamp: Date.now(), - }) satisfies StreamChunk, - textEnd: (messageId = 'msg-1') => - ({ - type: EventType.TEXT_MESSAGE_END, - messageId, - timestamp: Date.now(), - }) satisfies StreamChunk, - toolStart: (toolCallId: string, toolCallName: string, index?: number) => - ({ - type: EventType.TOOL_CALL_START, - toolCallId, - toolCallName, - toolName: toolCallName, - timestamp: Date.now(), - ...(index !== undefined ? { index } : {}), - }) satisfies StreamChunk, - toolArgs: (toolCallId: string, delta: string) => - ({ - type: EventType.TOOL_CALL_ARGS, - toolCallId, - delta, - timestamp: Date.now(), - }) satisfies StreamChunk, + runStarted: (runId = 'run-1', threadId = 'thread-1'): RunStartedEvent => ({ + type: EventType.RUN_STARTED, + runId, + threadId, + timestamp: Date.now(), + }), + textStart: (messageId = 'msg-1'): TextMessageStartEvent => ({ + type: EventType.TEXT_MESSAGE_START, + messageId, + role: 'assistant', + timestamp: Date.now(), + }), + textContent: ( + delta: string, + messageId = 'msg-1', + ): TextMessageContentEvent => ({ + type: EventType.TEXT_MESSAGE_CONTENT, + messageId, + delta, + timestamp: Date.now(), + }), + textEnd: (messageId = 'msg-1'): TextMessageEndEvent => ({ + type: EventType.TEXT_MESSAGE_END, + messageId, + timestamp: Date.now(), + }), + toolStart: ( + toolCallId: string, + toolCallName: string, + index?: number, + ): ToolCallStartEvent => ({ + type: EventType.TOOL_CALL_START, + toolCallId, + toolCallName, + toolName: toolCallName, + timestamp: Date.now(), + ...(index !== undefined ? { index } : {}), + }), + toolArgs: (toolCallId: string, delta: string): ToolCallArgsEvent => ({ + type: EventType.TOOL_CALL_ARGS, + toolCallId, + delta, + timestamp: Date.now(), + }), toolEnd: ( toolCallId: string, toolCallName: string, opts?: { input?: unknown; result?: string }, - ) => - ({ - type: EventType.TOOL_CALL_END, - toolCallId, - toolCallName, - toolName: toolCallName, - timestamp: Date.now(), - ...opts, - }) satisfies StreamChunk, + ): ToolCallEndEvent => ({ + type: EventType.TOOL_CALL_END, + toolCallId, + toolCallName, + toolName: toolCallName, + timestamp: Date.now(), + ...opts, + }), runFinished: ( finishReason: | 'stop' @@ -92,36 +106,32 @@ export const ev = { totalTokens: number }, threadId = 'thread-1', - ) => - ({ - type: EventType.RUN_FINISHED, - runId, - threadId, - finishReason, - timestamp: Date.now(), - ...(usage ? { usage } : {}), - }) satisfies StreamChunk, - runError: (message: string) => - ({ - type: EventType.RUN_ERROR, - message, - timestamp: Date.now(), - error: { message }, - }) satisfies StreamChunk, - stepStarted: (stepName = 'step-1') => - ({ - type: EventType.STEP_STARTED, - stepName, - timestamp: Date.now(), - }) satisfies StreamChunk, - stepFinished: (delta: string, stepName = 'step-1') => - ({ - type: EventType.STEP_FINISHED, - stepName, - stepId: stepName, - delta, - timestamp: Date.now(), - }) satisfies StreamChunk, + ): RunFinishedEvent => ({ + type: EventType.RUN_FINISHED, + runId, + threadId, + finishReason, + timestamp: Date.now(), + ...(usage ? { usage } : {}), + }), + runError: (message: string): RunErrorEvent => ({ + type: EventType.RUN_ERROR, + message, + timestamp: Date.now(), + error: { message }, + }), + stepStarted: (stepName = 'step-1'): StepStartedEvent => ({ + type: EventType.STEP_STARTED, + stepName, + timestamp: Date.now(), + }), + stepFinished: (delta: string, stepName = 'step-1'): StepFinishedEvent => ({ + type: EventType.STEP_FINISHED, + stepName, + stepId: stepName, + delta, + timestamp: Date.now(), + }), } // ============================================================================ diff --git a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts index 83b73b191..e6d892d53 100644 --- a/packages/typescript/openai-base/src/adapters/chat-completions-text.ts +++ b/packages/typescript/openai-base/src/adapters/chat-completions-text.ts @@ -21,6 +21,7 @@ import type { DefaultMessageMetadataByModality, Modality, ModelMessage, + RunFinishedEvent, StreamChunk, TextOptions, } from '@tanstack/ai' @@ -105,7 +106,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit AG-UI RUN_ERROR @@ -116,7 +117,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -338,7 +339,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< threadId: aguiState.threadId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Reasoning content (extractReasoning() hook). Run before reading @@ -355,14 +356,14 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_MESSAGE_START, messageId: reasoningMessageId, role: 'reasoning' as const, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } // Legacy STEP_STARTED (single emission, paired with the // STEP_FINISHED below when reasoning closes). yield { @@ -372,7 +373,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += reasoning.text yield { @@ -381,7 +382,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< delta: reasoning.text, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } const choice = chunk.choices[0] @@ -403,13 +404,13 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } if (stepId) { yield { type: EventType.STEP_FINISHED, @@ -418,7 +419,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -431,7 +432,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += deltaContent @@ -444,7 +445,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< timestamp: Date.now(), delta: deltaContent, content: accumulatedContent, - } satisfies StreamChunk + } } // Handle tool calls - they come in as deltas @@ -486,7 +487,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), index, - } satisfies StreamChunk + } } // Emit TOOL_CALL_ARGS for argument deltas @@ -497,7 +498,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), delta: toolCallDelta.function.arguments, - } satisfies StreamChunk + } } } } @@ -559,7 +560,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: chunk.model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } emittedAnyToolCallEnd = true } // Clear tool-call state after emission so a subsequent @@ -575,7 +576,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< messageId: aguiState.messageId, model: chunk.model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } @@ -632,7 +633,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: lastModel || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } pendingToolCount += 1 emittedAnyToolCallEnd = true } @@ -646,7 +647,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< messageId: aguiState.messageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Close any reasoning lifecycle that text never closed (no text @@ -658,13 +659,13 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< messageId: reasoningMessageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } yield { type: EventType.REASONING_END, messageId: reasoningMessageId, model: lastModel || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } if (stepId) { yield { type: EventType.STEP_FINISHED, @@ -673,23 +674,27 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< model: lastModel || options.model, timestamp: Date.now(), content: accumulatedReasoning, - } satisfies StreamChunk + } } } - // Map upstream finish_reason to AG-UI's narrower vocabulary while - // preserving the upstream value when it falls outside the AG-UI set. + // Map upstream finish_reason to AG-UI's narrower vocabulary. // Collapsing length / content_filter to 'stop' would hide why the // run terminated — surface it instead. Use `tool_calls` only when // a TOOL_CALL_END was actually emitted: an upstream that signalled // `tool_calls` but never produced a started/ended pair must NOT // surface `tool_calls` here, since downstream consumers wait for - // tool results that would never arrive. - const finishReason = emittedAnyToolCallEnd - ? 'tool_calls' - : pendingFinishReason === 'tool_calls' - ? 'stop' - : (pendingFinishReason ?? 'stop') + // tool results that would never arrive. OpenAI's legacy + // `function_call` value (from the v1 function-calling API) is + // normalized to `tool_calls` — semantically the same termination. + const finishReason: NonNullable = + emittedAnyToolCallEnd + ? 'tool_calls' + : pendingFinishReason === 'tool_calls' + ? 'stop' + : pendingFinishReason === 'function_call' + ? 'tool_calls' + : (pendingFinishReason ?? 'stop') yield { type: EventType.RUN_FINISHED, @@ -705,7 +710,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< } : undefined, finishReason, - } satisfies StreamChunk + } } } catch (error: unknown) { // Narrow before logging: raw SDK errors can carry request metadata @@ -727,7 +732,7 @@ export abstract class OpenAIBaseChatCompletionsTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } } } diff --git a/packages/typescript/openai-base/src/adapters/responses-text.ts b/packages/typescript/openai-base/src/adapters/responses-text.ts index 609cf8087..4fefd0679 100644 --- a/packages/typescript/openai-base/src/adapters/responses-text.ts +++ b/packages/typescript/openai-base/src/adapters/responses-text.ts @@ -128,7 +128,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } // Emit AG-UI RUN_ERROR @@ -139,7 +139,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } options.logger.errors(`${this.name}.chatStream fatal`, { error: errorPayload, @@ -404,7 +404,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< threadId: aguiState.threadId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } const handleContentPart = (contentPart: { @@ -421,7 +421,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedContent, - } satisfies StreamChunk + } } if (contentPart.type === 'reasoning_text') { @@ -441,7 +441,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), delta: contentPart.text || '', content: accumulatedReasoning, - } satisfies StreamChunk + } } // Either a real refusal or an unknown content_part type. Surface // the part type in the error so unknown parts are debuggable @@ -458,7 +458,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< message, code, error: { message, code }, - } satisfies StreamChunk + } } // Capture model metadata from any of these events (created starts @@ -498,7 +498,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< messageId: aguiState.messageId, model: chunk.response.model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } // Coalesce error + incomplete_details into a single RUN_ERROR @@ -529,7 +529,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< message: errorMessage, ...(errorCode !== undefined && { code: errorCode }), }, - } satisfies StreamChunk + } // RUN_ERROR is the terminal event for this run; stop processing // any further chunks the iterator might still deliver. runFinishedEmitted = true @@ -556,7 +556,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } accumulatedContent += textDelta @@ -568,7 +568,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), delta: textDelta, content: accumulatedContent, - } satisfies StreamChunk + } } } @@ -594,7 +594,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += reasoningDelta @@ -608,7 +608,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), delta: reasoningDelta, content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -633,7 +633,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } accumulatedReasoning += summaryDelta @@ -647,7 +647,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), delta: summaryDelta, content: accumulatedReasoning, - } satisfies StreamChunk + } } } @@ -666,7 +666,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } // Emit STEP_STARTED if this is reasoning content if (contentPart.type === 'reasoning_text' && !hasEmittedStepStarted) { @@ -679,7 +679,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } // Mark whichever stream we just emitted into so a subsequent // `content_part.done` doesn't duplicate the same text. Without @@ -736,7 +736,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), role: 'assistant', - } satisfies StreamChunk + } } else if ( contentPart.type === 'reasoning_text' && !hasEmittedStepStarted @@ -750,7 +750,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), stepType: 'thinking', - } satisfies StreamChunk + } } // Only emit if we haven't been streaming deltas (e.g., for non-streaming responses) @@ -794,7 +794,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: chunk.output_index, - } satisfies StreamChunk + } metadata.started = true } } @@ -834,7 +834,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), delta: chunk.delta, - } satisfies StreamChunk + } } if (chunk.type === 'response.function_call_arguments.done') { @@ -904,7 +904,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } } // `output_item.done` is the last point at which a function_call's @@ -935,7 +935,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: metadata.index, - } satisfies StreamChunk + } metadata.started = true } // Emit END if we have args (either from a previously-deferred @@ -977,7 +977,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } metadata.ended = true metadata.pendingArguments = undefined } @@ -1013,7 +1013,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), index: metadata.index, - } satisfies StreamChunk + } metadata.started = true } const rawArgs = @@ -1053,7 +1053,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< model: model || options.model, timestamp: Date.now(), input: parsedInput, - } satisfies StreamChunk + } metadata.ended = true metadata.pendingArguments = undefined } @@ -1066,7 +1066,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< messageId: aguiState.messageId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } hasEmittedTextMessageStart = false } @@ -1106,7 +1106,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< totalTokens: chunk.response.usage?.total_tokens || 0, }, finishReason, - } satisfies StreamChunk + } runFinishedEmitted = true } @@ -1121,7 +1121,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< message: chunk.message, code: chunk.code ?? undefined, }, - } satisfies StreamChunk + } // RUN_ERROR is terminal — don't let the synthetic RUN_FINISHED // block fire after a top-level stream error event, and stop // processing further chunks so no in-flight lifecycle events @@ -1144,7 +1144,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< messageId: aguiState.messageId, model: model || options.model, timestamp: Date.now(), - } satisfies StreamChunk + } } yield { type: EventType.RUN_FINISHED, @@ -1154,7 +1154,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< timestamp: Date.now(), usage: undefined, finishReason: toolCallMetadata.size > 0 ? 'tool_calls' : 'stop', - } satisfies StreamChunk + } } } catch (error: unknown) { // Narrow before logging: raw SDK errors can carry request metadata @@ -1174,7 +1174,7 @@ export abstract class OpenAIBaseResponsesTextAdapter< message: errorPayload.message, code: errorPayload.code, error: errorPayload, - } satisfies StreamChunk + } } } diff --git a/packages/typescript/openai-base/tests/chat-completions-text.test.ts b/packages/typescript/openai-base/tests/chat-completions-text.test.ts index 00f77bf5a..d0aa111a5 100644 --- a/packages/typescript/openai-base/tests/chat-completions-text.test.ts +++ b/packages/typescript/openai-base/tests/chat-completions-text.test.ts @@ -31,7 +31,7 @@ function makeStubClient(): OpenAI { * naturally; `config` is ignored. */ class TestChatCompletionsAdapter extends OpenAIBaseChatCompletionsTextAdapter { - constructor(_config: unknown, model: string, name = 'openai-compatible') { + constructor(_config: unknown, model: string, name = 'openai-base') { super(model, name, makeStubClient()) } } @@ -91,7 +91,7 @@ describe('OpenAIBaseChatCompletionsTextAdapter', () => { expect(adapter).toBeDefined() expect(adapter.kind).toBe('text') - expect(adapter.name).toBe('openai-compatible') + expect(adapter.name).toBe('openai-base') expect(adapter.model).toBe('test-model') }) diff --git a/packages/typescript/openai-base/tests/responses-text.test.ts b/packages/typescript/openai-base/tests/responses-text.test.ts index 16613a662..afe3c2860 100644 --- a/packages/typescript/openai-base/tests/responses-text.test.ts +++ b/packages/typescript/openai-base/tests/responses-text.test.ts @@ -27,11 +27,7 @@ function makeStubClient(): OpenAI { * `mockResponsesCreate` for per-test setup. */ class TestResponsesAdapter extends OpenAIBaseResponsesTextAdapter { - constructor( - _config: unknown, - model: string, - name = 'openai-compatible-responses', - ) { + constructor(_config: unknown, model: string, name = 'openai-base-responses') { super(model, name, makeStubClient()) } } @@ -91,7 +87,7 @@ describe('OpenAIBaseResponsesTextAdapter', () => { expect(adapter).toBeDefined() expect(adapter.kind).toBe('text') - expect(adapter.name).toBe('openai-compatible-responses') + expect(adapter.name).toBe('openai-base-responses') expect(adapter.model).toBe('test-model') }) From f473e441d48eff631001f57e522b818b90293f4c Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 08:44:30 +1000 Subject: [PATCH 40/49] refactor(ai-openrouter): drop residual chunk casts in responses-text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend the no-`as`-on-chunks principle (PR #545 review) to five sibling sites missed by 44db9257: - `response.created/in_progress/incomplete/failed` model + error/incomplete capture (lines 462, 491): `NormalizedStreamEvent.response` is already `Partial`, so the duck-type casts were redundant. Read `chunk.response?.{model,error,incompleteDetails}` directly. - `response.content_part.{added,done}` (lines 629, 673): type `NormalizedStreamEvent.part` as the SDK's `ContentPartAddedEventPart` discriminated union (`ResponseOutputText | ReasoningTextContent | OpenAIResponsesRefusalContent | Unknown<'type'>`) and switch `handleContentPart` to narrow on `part.type`. The previous `text?` / `refusal?` duck-type allowed unsafe access on unknown parts. - `response.completed` `outputItems.some(item.type === 'function_call')` (line 998): the array element type is already `OutputItems`, line 921 above already narrows without a cast — leftover. Behaviourally identical; verified by openrouter unit tests (80/80) and e2e suite (30/30). Co-Authored-By: Claude Opus 4.7 (1M context) --- .../openrouter-narrow-stream-chunk-types.md | 5 ++ .../src/adapters/responses-text.ts | 82 +++++++++---------- 2 files changed, 46 insertions(+), 41 deletions(-) create mode 100644 .changeset/openrouter-narrow-stream-chunk-types.md diff --git a/.changeset/openrouter-narrow-stream-chunk-types.md b/.changeset/openrouter-narrow-stream-chunk-types.md new file mode 100644 index 000000000..2d2e2fc2b --- /dev/null +++ b/.changeset/openrouter-narrow-stream-chunk-types.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-openrouter': patch +--- + +Internal: drop the remaining duck-typed `as { ... }` casts on stream chunks in `OpenRouterResponsesTextAdapter`. Five sites (`response.created/in_progress/incomplete/failed` model + error capture, `response.content_part.added/done` payload, and the `response.completed` function-call detection) now narrow via the SDK's discriminated unions directly. Behaviourally identical; reduces the chance of a SDK type rename silently slipping past us. diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index 3e118b620..b8c0d04c1 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -11,6 +11,7 @@ import { getOpenRouterApiKeyFromEnv } from '../utils' import type { SDKOptions } from '@openrouter/sdk' import type { ResponsesFunctionTool } from '../internal/responses-tool-converter' import type { + ContentPartAddedEventPart, InputsUnion, OpenResponsesResult, OutputItems, @@ -400,25 +401,23 @@ export class OpenRouterResponsesTextAdapter< } } - const handleContentPart = (contentPart: { - type: string - text?: string - refusal?: string - }): StreamChunk => { + const handleContentPart = ( + contentPart: ContentPartAddedEventPart, + ): StreamChunk => { if (contentPart.type === 'output_text') { - accumulatedContent += contentPart.text || '' + accumulatedContent += contentPart.text return { type: EventType.TEXT_MESSAGE_CONTENT, messageId: aguiState.messageId, model: model || options.model, timestamp: Date.now(), - delta: contentPart.text || '', + delta: contentPart.text, content: accumulatedContent, } } if (contentPart.type === 'reasoning_text') { - accumulatedReasoning += contentPart.text || '' + accumulatedReasoning += contentPart.text // Cache the fallback stepId rather than generating a fresh one // on every call. if (!stepId) { @@ -430,18 +429,29 @@ export class OpenRouterResponsesTextAdapter< stepId, model: model || options.model, timestamp: Date.now(), - delta: contentPart.text || '', + delta: contentPart.text, content: accumulatedReasoning, } } - // Either a real refusal or an unknown content_part type. Surface - // the part type in the error so unknown parts are debuggable - // instead of being misreported as "Unknown refusal". - const isRefusal = contentPart.type === 'refusal' - const message = isRefusal - ? contentPart.refusal || 'Refused without explanation' - : `Unsupported response content_part type: ${contentPart.type}` - const code = isRefusal ? 'refusal' : contentPart.type + + if (contentPart.type === 'refusal') { + const message = + contentPart.refusal || 'Refused without explanation' + return { + type: EventType.RUN_ERROR, + model: model || options.model, + timestamp: Date.now(), + message, + code: 'refusal', + error: { message, code: 'refusal' }, + } + } + + // Forward-compat `Unknown<"type">` arm. Surface the discriminator + // value so unknown parts are debuggable instead of being misreported + // as "Unknown refusal". + const code = contentPart.type + const message = `Unsupported response content_part type: ${code}` return { type: EventType.RUN_ERROR, model: model || options.model, @@ -459,8 +469,7 @@ export class OpenRouterResponsesTextAdapter< chunk.type === 'response.incomplete' || chunk.type === 'response.failed' ) { - const r = chunk.response as { model?: string } | undefined - if (r?.model) model = r.model + if (chunk.response?.model) model = chunk.response.model } // response.created marks the start of a fresh run — safe to reset @@ -488,19 +497,15 @@ export class OpenRouterResponsesTextAdapter< } hasEmittedTextMessageStart = false } - const r = (chunk.response ?? {}) as { - error?: { message?: string; code?: unknown } | null - incompleteDetails?: { reason?: string } | null - } const errorMessage = - r.error?.message || - r.incompleteDetails?.reason || + chunk.response?.error?.message || + chunk.response?.incompleteDetails?.reason || (chunk.type === 'response.failed' ? 'Response failed' : 'Response ended incomplete') const errorCode = - normalizeCode(r.error?.code) ?? - (r.incompleteDetails ? 'incomplete' : undefined) ?? + normalizeCode(chunk.response?.error?.code) ?? + (chunk.response?.incompleteDetails ? 'incomplete' : undefined) ?? undefined yield { type: EventType.RUN_ERROR, @@ -625,12 +630,8 @@ export class OpenRouterResponsesTextAdapter< } // handle content_part added events for text, reasoning and refusals - if (chunk.type === 'response.content_part.added') { - const contentPart = chunk.part as { - type: string - text?: string - refusal?: string - } + if (chunk.type === 'response.content_part.added' && chunk.part) { + const contentPart = chunk.part if ( contentPart.type === 'output_text' && !hasEmittedTextMessageStart @@ -669,12 +670,8 @@ export class OpenRouterResponsesTextAdapter< } } - if (chunk.type === 'response.content_part.done') { - const contentPart = chunk.part as { - type: string - text?: string - refusal?: string - } + if (chunk.type === 'response.content_part.done' && chunk.part) { + const contentPart = chunk.part // Skip emitting chunks for content parts that we've already streamed via deltas if (contentPart.type === 'output_text' && hasStreamedContentDeltas) { @@ -995,7 +992,7 @@ export class OpenRouterResponsesTextAdapter< } const hasFunctionCalls = outputItems.some( - (item) => (item as { type?: string }).type === 'function_call', + (item) => item.type === 'function_call', ) const incompleteReason = responseObj.incompleteDetails?.reason const finishReason: @@ -1354,7 +1351,10 @@ interface NormalizedStreamEvent { response?: Partial /** SDK discriminated union — narrow with `item.type === ''`. */ item?: OutputItems - part?: unknown + /** SDK discriminated union — narrow with `part.type === ''`. + * Shared by `response.content_part.added` and `response.content_part.done` + * (`ContentPartDoneEventPart` is structurally identical). */ + part?: ContentPartAddedEventPart } /** From ad91033c932aaa91a97b35bc548b164523adc38c Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 22:46:01 +0000 Subject: [PATCH 41/49] ci: apply automated fixes --- .../typescript/ai-openrouter/src/adapters/responses-text.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts index b8c0d04c1..b70f6dde0 100644 --- a/packages/typescript/ai-openrouter/src/adapters/responses-text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/responses-text.ts @@ -435,8 +435,7 @@ export class OpenRouterResponsesTextAdapter< } if (contentPart.type === 'refusal') { - const message = - contentPart.refusal || 'Refused without explanation' + const message = contentPart.refusal || 'Refused without explanation' return { type: EventType.RUN_ERROR, model: model || options.model, From 6d99fad82d038e240f834267c134fbafd85fe8ed Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 09:08:15 +1000 Subject: [PATCH 42/49] refactor(ai): tighten summarize TProviderOptions to Record MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unify the generic constraint and default across the summarize surface: - `SummarizationOptions`: `extends object = Record` → `extends Record = Record` - `SummarizeAdapter` / `BaseSummarizeAdapter`: constraint tightened from `extends object` to `extends Record` (default was already `Record`) - `ChatStreamSummarizeAdapter`: `extends object = Record` → `extends Record = Record` - `activities/summarize/index.ts` instantiation sites: literal `` → `>` Removes the three-way default split (`object` / `Record` / `Record`) that lived inside the summarize folder, and forces unparameterised consumers to narrow before indexed access. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../summarize-tighten-provider-options-generic.md | 5 +++++ .../ai/src/activities/summarize/adapter.ts | 4 ++-- .../activities/summarize/chat-stream-summarize.ts | 2 +- .../ai/src/activities/summarize/index.ts | 14 +++++++------- packages/typescript/ai/src/types.ts | 2 +- 5 files changed, 16 insertions(+), 11 deletions(-) create mode 100644 .changeset/summarize-tighten-provider-options-generic.md diff --git a/.changeset/summarize-tighten-provider-options-generic.md b/.changeset/summarize-tighten-provider-options-generic.md new file mode 100644 index 000000000..55a9975b2 --- /dev/null +++ b/.changeset/summarize-tighten-provider-options-generic.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai': patch +--- + +Tighten the `TProviderOptions` generic constraint across the summarize surface from `extends object` to `extends Record`, and align the default from `Record` to `Record`. Affects `SummarizationOptions`, `SummarizeAdapter`, `BaseSummarizeAdapter`, and `ChatStreamSummarizeAdapter`. Removes the `any`/`unknown`/`object` mixed defaults that previously lived inside the summarize folder and forces unparameterised callers to narrow before indexed access. No public-surface signature change for callers that supply a concrete provider-options shape (every shipping adapter does). diff --git a/packages/typescript/ai/src/activities/summarize/adapter.ts b/packages/typescript/ai/src/activities/summarize/adapter.ts index 2f7cc34f6..022261280 100644 --- a/packages/typescript/ai/src/activities/summarize/adapter.ts +++ b/packages/typescript/ai/src/activities/summarize/adapter.ts @@ -27,7 +27,7 @@ export interface SummarizeAdapterConfig { */ export interface SummarizeAdapter< TModel extends string = string, - TProviderOptions extends object = Record, + TProviderOptions extends Record = Record, > { /** Discriminator for adapter kind - used by generate() to determine API shape */ readonly kind: 'summarize' @@ -74,7 +74,7 @@ export type AnySummarizeAdapter = SummarizeAdapter */ export abstract class BaseSummarizeAdapter< TModel extends string = string, - TProviderOptions extends object = Record, + TProviderOptions extends Record = Record, > implements SummarizeAdapter { readonly kind = 'summarize' as const abstract readonly name: string diff --git a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts index 534851824..ec6d3abc3 100644 --- a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts +++ b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts @@ -41,7 +41,7 @@ export type InferTextProviderOptions = TAdapter extends { */ export class ChatStreamSummarizeAdapter< TModel extends string, - TProviderOptions extends object = Record, + TProviderOptions extends Record = Record, > extends BaseSummarizeAdapter { readonly name: string diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index f454ab6af..5aae43b17 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -46,7 +46,7 @@ export type SummarizeProviderOptions = * @template TStream - Whether to stream the output */ export interface SummarizeActivityOptions< - TAdapter extends SummarizeAdapter, + TAdapter extends SummarizeAdapter>, TStream extends boolean = false, > { /** The summarize adapter to use (must be created with a model) */ @@ -154,7 +154,7 @@ function createId(prefix: string): string { * ``` */ export function summarize< - TAdapter extends SummarizeAdapter, + TAdapter extends SummarizeAdapter>, TStream extends boolean = false, >( options: SummarizeActivityOptions, @@ -164,7 +164,7 @@ export function summarize< if (stream) { return runStreamingSummarize( options as unknown as SummarizeActivityOptions< - SummarizeAdapter, + SummarizeAdapter>, true >, ) as SummarizeActivityResult @@ -172,7 +172,7 @@ export function summarize< return runSummarize( options as unknown as SummarizeActivityOptions< - SummarizeAdapter, + SummarizeAdapter>, false >, ) as SummarizeActivityResult @@ -182,7 +182,7 @@ export function summarize< * Run non-streaming summarization */ async function runSummarize( - options: SummarizeActivityOptions, false>, + options: SummarizeActivityOptions>, false>, ): Promise { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model @@ -252,7 +252,7 @@ async function runSummarize( * to non-streaming and yields the result as a single chunk. */ async function* runStreamingSummarize( - options: SummarizeActivityOptions, true>, + options: SummarizeActivityOptions>, true>, ): AsyncIterable { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model @@ -300,7 +300,7 @@ async function* runStreamingSummarize( * Create typed options for the summarize() function without executing. */ export function createSummarizeOptions< - TAdapter extends SummarizeAdapter, + TAdapter extends SummarizeAdapter>, TStream extends boolean = false, >( options: SummarizeActivityOptions, diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index c240e2578..9802f2e8a 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -1180,7 +1180,7 @@ export interface TextCompletionChunk { } export interface SummarizationOptions< - TProviderOptions extends object = Record, + TProviderOptions extends Record = Record, > { model: string text: string From c92b351d745bd7ec022d0b0dd905c44e9596c89b Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 23:30:26 +0000 Subject: [PATCH 43/49] ci: apply automated fixes --- .../typescript/ai/src/activities/summarize/index.ts | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index 5aae43b17..6644a25f3 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -182,7 +182,10 @@ export function summarize< * Run non-streaming summarization */ async function runSummarize( - options: SummarizeActivityOptions>, false>, + options: SummarizeActivityOptions< + SummarizeAdapter>, + false + >, ): Promise { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model @@ -252,7 +255,10 @@ async function runSummarize( * to non-streaming and yields the result as a single chunk. */ async function* runStreamingSummarize( - options: SummarizeActivityOptions>, true>, + options: SummarizeActivityOptions< + SummarizeAdapter>, + true + >, ): AsyncIterable { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model From 6b00d538010682bc182b7f4cf4c612282d66eecd Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 09:37:42 +1000 Subject: [PATCH 44/49] docs(openai-base): rewrite README; consolidate summarize changeset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit README: - Drop the broken "Renamed from" note (referenced an outdated state). - Drop the Vercel `@ai-sdk/openai-compatible` industry-term paragraph and the surrounding "Why this package exists" rationale that explained the prior rename — package is back to `openai-base`, that history is moot. - Reframe TL;DR around the actual current contract: "providers that drive the official `openai` SDK against a different `baseURL`" (only ai-openai, ai-grok, ai-groq remain on the base after this PR). - Remove ai-openrouter from subclass lists and the architecture diagram — it was decoupled in this PR and now extends `BaseTextAdapter` directly. - Rewrite the hooks section: the old `callChatCompletion(Stream)` / `callResponse(Stream)` abstract methods were removed in 7aff8b18; the base now takes a pre-built `OpenAI` client and calls `client.chat.completions.create` / `client.responses.create` itself. Document `convertMessage`, `mapOptionsToRequest`, `extractReasoning`, `transformStructuredOutput`, `makeStructuredOutputCompatible`, `processStreamChunks`, `extractTextFromResponse` as the real surface. - Update "build a new provider" example to point at ai-grok / ai-groq. Changesets: - Replace the narrow `summarize-tighten-provider-options-generic.md` (which only covered 6d99fad8) with a comprehensive `summarize-unify-on-chat-stream-wrapper.md` that also covers e0dcb778 (provider summarize unification on `ChatStreamSummarizeAdapter`, `modelOptions` plumbing fix in the activity layer, new `InferTextProviderOptions` helper, and removal of the bespoke `*SummarizeProviderOptions` interfaces from 6 provider packages). Adds patch bumps for ai-anthropic / ai-gemini / ai-ollama which were previously uncovered. Co-Authored-By: Claude Opus 4.7 (1M context) --- ...marize-tighten-provider-options-generic.md | 5 - .../summarize-unify-on-chat-stream-wrapper.md | 23 +++ packages/typescript/openai-base/README.md | 154 +++++++++--------- 3 files changed, 98 insertions(+), 84 deletions(-) delete mode 100644 .changeset/summarize-tighten-provider-options-generic.md create mode 100644 .changeset/summarize-unify-on-chat-stream-wrapper.md diff --git a/.changeset/summarize-tighten-provider-options-generic.md b/.changeset/summarize-tighten-provider-options-generic.md deleted file mode 100644 index 55a9975b2..000000000 --- a/.changeset/summarize-tighten-provider-options-generic.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@tanstack/ai': patch ---- - -Tighten the `TProviderOptions` generic constraint across the summarize surface from `extends object` to `extends Record`, and align the default from `Record` to `Record`. Affects `SummarizationOptions`, `SummarizeAdapter`, `BaseSummarizeAdapter`, and `ChatStreamSummarizeAdapter`. Removes the `any`/`unknown`/`object` mixed defaults that previously lived inside the summarize folder and forces unparameterised callers to narrow before indexed access. No public-surface signature change for callers that supply a concrete provider-options shape (every shipping adapter does). diff --git a/.changeset/summarize-unify-on-chat-stream-wrapper.md b/.changeset/summarize-unify-on-chat-stream-wrapper.md new file mode 100644 index 000000000..469898939 --- /dev/null +++ b/.changeset/summarize-unify-on-chat-stream-wrapper.md @@ -0,0 +1,23 @@ +--- +'@tanstack/ai': patch +'@tanstack/ai-anthropic': patch +'@tanstack/ai-gemini': patch +'@tanstack/ai-grok': patch +'@tanstack/ai-ollama': patch +'@tanstack/ai-openai': patch +'@tanstack/ai-openrouter': patch +--- + +Unify the summarize subsystem on a shared chat-stream wrapper, plumb `modelOptions` through end-to-end, and tighten the `TProviderOptions` generic. + +**Provider summarize adapters now share one implementation.** Anthropic, Gemini, Ollama, and OpenRouter previously each shipped a bespoke 200–300 LOC summarize adapter that re-implemented streaming, error handling, usage accounting, and chunk assembly on top of their text adapter. They now construct a `ChatStreamSummarizeAdapter` (formerly `ChatStreamWrapperAdapter`, renamed and exported from `@tanstack/ai/activities`) wrapping their own text adapter, matching the existing OpenAI/Grok pattern. Removes ~600 LOC of duplicated logic across the six providers and ensures behavioural parity. + +**`SummarizationOptions.modelOptions` now reaches the wire.** Previously the activity layer (`runSummarize` / `runStreamingSummarize`) silently dropped `modelOptions` when building the internal `SummarizationOptions` it forwarded to the adapter. Provider-specific knobs (Anthropic cache control, OpenRouter plugins, Gemini safety settings, Groq tuning params, …) now flow through correctly. + +**Provider summarize types resolve from the wrapped text adapter.** Each provider previously shipped a bespoke `XSummarizeProviderOptions` interface (a partial copy of its text provider options). Those interfaces are removed; summarize provider options are now inferred from the text adapter's `~types` via the new `InferTextProviderOptions` helper exported from `@tanstack/ai/activities`. IntelliSense for `modelOptions` on `summarize({ adapter: openai('gpt-4o'), … })` now matches what `chat({ adapter: openai('gpt-4o'), … })` would show. + +**`SummarizeAdapter` interface methods are now generic in `TProviderOptions`.** `summarize` and `summarizeStream` previously took `SummarizationOptions` (defaulted, so `modelOptions` was effectively `Record` regardless of the adapter's typed shape). They now take `SummarizationOptions`, threading the class's `TProviderOptions` generic through. Source-compatible for callers that didn't specify the generic; type-tighter for implementers and downstream consumers. + +**Generic constraint and default tightened across the summarize surface.** `SummarizationOptions`, `SummarizeAdapter`, `BaseSummarizeAdapter`, and `ChatStreamSummarizeAdapter` move from a mixed `extends object = Record` / `extends object = Record` set of declarations to a single `extends Record = Record` definition. Forces unparameterised consumers to narrow before indexed access on `modelOptions`. No public-surface signature change for callers that supply a concrete provider-options shape (every shipping adapter does). + +Bespoke `*SummarizeProviderOptions` interfaces (e.g. `OpenAISummarizeProviderOptions`, `AnthropicSummarizeProviderOptions`, `GeminiSummarizeProviderOptions`, `OllamaSummarizeProviderOptions`, `OpenRouterSummarizeProviderOptions`) are removed from the provider packages' public exports. Consumers who imported them should switch to inferring the type from the adapter (`InferTextProviderOptions`) or remove the explicit annotation (it'll be inferred from the adapter argument). diff --git a/packages/typescript/openai-base/README.md b/packages/typescript/openai-base/README.md index 897be381a..e9521217c 100644 --- a/packages/typescript/openai-base/README.md +++ b/packages/typescript/openai-base/README.md @@ -1,48 +1,41 @@ # @tanstack/openai-base -Shared adapters for providers that implement OpenAI's wire-format protocols. - -> Renamed from `@tanstack/openai-base` in 0.3.0. The "base" name was misleading. -> See [Why this package exists](#why-this-package-exists). +Shared base adapters for providers that drive the official `openai` SDK +against a different `baseURL`. ## TL;DR -OpenAI authored two wire formats — `/v1/chat/completions` and `/v1/responses` — -that other vendors have implemented to varying degrees. This package contains -the shared logic for talking to **any** server that speaks one of those wire -formats. OpenAI is one such server. OpenRouter, Groq, Grok, vLLM, SGLang, -Together, Ollama's compat layer, Fireworks, and others are too. - -The package holds two shared base classes: +Several providers ship endpoints that the official `openai` Node SDK can +talk to verbatim — you just point it at a different `baseURL`. xAI's Grok +endpoint, Groq's `/openai/v1` endpoint, and OpenAI itself all fall in this +camp. This package contains the shared logic for both wire formats those +endpoints expose: -- `OpenAIBaseChatCompletionsTextAdapter` -- `OpenAIBaseResponsesTextAdapter` +- `OpenAIBaseChatCompletionsTextAdapter` — for `/v1/chat/completions` +- `OpenAIBaseResponsesTextAdapter` — for `/v1/responses` -Provider packages (`@tanstack/ai-openai`, `@tanstack/ai-openrouter`, -`@tanstack/ai-groq`, `@tanstack/ai-grok`) subclass these and override a small -set of protected hooks for SDK-shape variance. +Provider packages (`@tanstack/ai-openai`, `@tanstack/ai-groq`, +`@tanstack/ai-grok`) construct an `OpenAI` client with their own `baseURL`, +pass it to the relevant base adapter, and override a small set of +protected hooks for SDK-shape variance and provider-specific quirks. ## Why this package exists -The old name, `@tanstack/openai-base`, implied that OpenAI's evolving API -_was_ the contract — that everyone else inherits from OpenAI. That framing -broke down in two ways: - -1. **OpenAI doesn't define the protocol; the ecosystem does.** Many providers - ship `/v1/chat/completions` as their native API (Groq, Together, vLLM, - SGLang, Fireworks, Ollama's compat layer). When OpenAI ships a new field - that no other provider supports, that field belongs to _OpenAI's product_, - not to the protocol. -2. **The Responses API has the same shape.** OpenRouter's beta Responses - endpoint routes requests with OpenAI's Responses wire format to Claude, - Gemini, and other underlying models. So Responses is also a multi-vendor - protocol, not an OpenAI-only product surface. - -Calling the protocol "OpenAI-compatible" matches the actual industry term — -Vercel publishes `@ai-sdk/openai-compatible`, BentoML and Lightning AI docs -use the same phrase, LiteLLM calls them "OpenAI-compatible endpoints." There -is no neutral standard name; the protocol is named after the vendor who -originally shipped it. +Every text adapter in TanStack AI — regardless of provider — emits +[AG-UI](https://github.com/CopilotKit/ag-ui) events (`RUN_STARTED`, +`TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. +That is the universal unification. + +Input protocols differ. The OpenAI Chat Completions and Responses wire +formats both have multiple implementers in the ecosystem, so it pays to +write the streaming-chunk assembly, partial-JSON tool-arg buffering, +tool-call deduplication, and structured-output coercion once and share +it. That shared code lives here. + +Providers whose native API doesn't match either OpenAI wire format +(Anthropic, Gemini, Ollama's native API, OpenRouter's own SDK) extend +`BaseTextAdapter` from `@tanstack/ai` directly — there's nothing to +share, so they don't pay the indirection cost. ## What goes here vs. in `@tanstack/ai-openai` @@ -50,76 +43,79 @@ originally shipped it. | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | | Logic for the Chat Completions wire format | OpenAI-specific tool types (`web_search_preview`, `code_interpreter`, `local_shell`, `apply_patch`, `computer_use`, `mcp`, …) | | Logic for the Responses wire format | OpenAI model metadata, model lists, capability matrices | -| Streaming chunk assembly, AG-UI lifecycle, partial-JSON tool-arg buffering, tool-call deduplication | OpenAI-only request/response fields that no other vendor supports | -| Schema converters and structured-output coercion that all OpenAI-compatible servers accept | OpenAI's media adapters (image/TTS/video/transcription) that other providers don't implement | +| Streaming chunk assembly, AG-UI lifecycle, partial-JSON tool-arg buffering, tool-call deduplication | OpenAI-only request/response fields that no other consumer of the base sets | +| Schema converters and structured-output coercion that all consumers accept | OpenAI's media adapters (image/TTS/video/transcription) that Grok/Groq don't implement | -**Rule of thumb**: if you'd add a field here, it should be supported by at -least two OpenAI-compatible providers. Otherwise it belongs in the -provider's own package, plumbed in via a subclass override or a hook. +**Rule of thumb**: if a field would be useful to at least two of the +consuming packages (`ai-openai`, `ai-grok`, `ai-groq`), it belongs here. +Otherwise it belongs in the provider's own package, plumbed in via a +subclass override or a hook. ## How providers extend the bases -Subclasses customize SDK-shape variance via a small set of protected hook -methods: +The base constructor takes a pre-built `OpenAI` client. Subclasses +construct the SDK with their own `baseURL` (and any other client +options) and pass it to `super`: + +```ts +class GrokTextAdapter extends OpenAIBaseChatCompletionsTextAdapter<…> { + constructor(config: GrokConfig, model: TModel) { + super(model, 'grok', new OpenAI(withGrokDefaults(config))) + } +} +``` + +Per-provider quirks are handled via protected hooks: -- `callChatCompletion`, `callChatCompletionStream` — substitute a different - SDK or HTTP client (OpenRouter uses `@openrouter/sdk` here; OpenAI and - Groq use the OpenAI SDK with a `baseURL` override). -- `convertMessage`, `mapOptionsToRequest` — bridge request-shape differences - (camelCase vs snake_case, additional provider fields). +- `convertMessage`, `mapOptionsToRequest` — bridge request-shape + differences (extra fields, omitted fields, alternative encodings). - `extractReasoning` — surface a provider's reasoning channel into the - shared `REASONING_*` lifecycle. + shared `REASONING_*` AG-UI lifecycle. - `transformStructuredOutput`, `makeStructuredOutputCompatible` — - adjust structured-output handling for provider quirks. + adjust structured-output handling for provider quirks (e.g. Groq's + schema-shape requirements). - `processStreamChunks` — wrap the shared chunk processor for last-mile - fixups (e.g. Groq's `x_groq.usage` → `chunk.usage`). + fixups (e.g. Groq's `x_groq.usage` → `chunk.usage` promotion). +- `extractTextFromResponse` — pull the assistant text out of the + provider's non-streaming response shape. Each provider typically overrides 2–6 hooks and inherits everything else. -## Architecture context - -Every text adapter in TanStack AI — regardless of provider — emits -[AG-UI](https://github.com/CopilotKit/ag-ui) events (`RUN_STARTED`, -`TEXT_MESSAGE_*`, `TOOL_CALL_*`, `RUN_FINISHED`, …) as its output stream. -That is the _universal_ unification. - -Input protocols are different. The OpenAI-compatible family (this package) -has many implementers and warrants shared classes. Anthropic, Google Gemini, -and Ollama have single-provider input protocols, so their adapters extend -`BaseTextAdapter` from `@tanstack/ai` directly — no compatible family exists -because no compatible family exists. +## Architecture ``` @tanstack/ai └── BaseTextAdapter (abstract — emits AG-UI events) │ ├── @tanstack/openai-base::OpenAIBaseChatCompletionsTextAdapter - │ ├── ai-openrouter │ ├── ai-groq │ └── ai-grok │ ├── @tanstack/openai-base::OpenAIBaseResponsesTextAdapter - │ ├── ai-openai (primary text adapter — Responses is OpenAI's preferred API) - │ └── ai-openrouter (beta — routes to any underlying model) + │ └── ai-openai (Responses is OpenAI's preferred API) │ ├── ai-anthropic::AnthropicTextAdapter extends BaseTextAdapter directly ├── ai-gemini::GeminiTextAdapter extends BaseTextAdapter directly - └── ai-ollama::OllamaTextAdapter extends BaseTextAdapter directly + ├── ai-ollama::OllamaTextAdapter extends BaseTextAdapter directly + └── ai-openrouter (text + responses) extends BaseTextAdapter directly + (uses @openrouter/sdk natively) ``` -Note: `ai-openai` ships only the Responses-based adapter. For pure Chat -Completions use cases without OpenAI-specific behaviour, use `ai-grok` -(xAI's API is a direct OpenAI Chat Completions clone) or build a new -provider package extending `OpenAIBaseChatCompletionsTextAdapter`. +Note: `ai-openai` ships only the Responses-based text adapter. For pure +Chat Completions use cases without OpenAI-specific behaviour, use +`ai-grok` or `ai-groq`, or build a new provider package extending +`OpenAIBaseChatCompletionsTextAdapter`. ## Direct use -Most users don't import from this package directly; they install a provider -package and the adapter from there does the work. - -If you're building an adapter for a new OpenAI-compatible provider (vLLM, -Together, Fireworks, a self-hosted gateway, …), import the abstract -adapters from this package and subclass them. The existing providers are -worked examples — `@tanstack/ai-grok` is the simplest (xAI's API is a -direct OpenAI clone), `@tanstack/ai-openrouter` is the most heavily -overridden (different SDK, camelCase fields, multi-provider routing). +Most users don't import from this package directly; they install a +provider package and the adapter from there does the work. + +If you're building an adapter for a new endpoint that the official +`openai` SDK can talk to verbatim (vLLM, Together, Fireworks, a +self-hosted gateway, …), import the abstract adapters from this package +and subclass them. The existing providers are worked examples — +`@tanstack/ai-grok` is the simplest (xAI's API is a near-direct OpenAI +Chat Completions clone), `@tanstack/ai-groq` shows the +`processStreamChunks` and `makeStructuredOutputCompatible` override +pattern. From ff282fe8020fb744587a41421de590c7096d2076 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 09:54:49 +1000 Subject: [PATCH 45/49] fix(ai): revert summarize TProviderOptions constraint to extends object MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 6d99fad8 tightened the constraint from `extends object` to `extends Record` alongside aligning the default. The default change was correct; the constraint change broke vite build / DTS emit for ai-openai, ai-anthropic, ai-gemini, ai-grok, ai-ollama. Their summarize factories instantiate `ChatStreamSummarizeAdapter>>`, and the inferred per-model option shapes (`OpenAIBaseOptions & OpenAIReasoningOptions & ...` etc.) are typed interfaces with named optional fields and no string index signature — TS won't assign them to `Record`. Revert just the constraint to `extends object`, keep the default at `Record`. Restores the pattern `BaseSummarizeAdapter` already had on main, now applied uniformly across all four declarations. The 7 activity-layer `>` instantiations in summarize/index.ts revert to ``, and the two `summarizeOptions: SummarizationOptions = {...}` literals are explicitly annotated `SummarizationOptions` so the modelOptions: object | undefined destructured from the activity-layer options assigns correctly. Changeset paragraph 5 amended to describe what actually shipped (default-aligned, constraint preserved). Co-Authored-By: Claude Opus 4.7 (1M context) --- .../summarize-unify-on-chat-stream-wrapper.md | 2 +- .../ai/src/activities/summarize/adapter.ts | 4 ++-- .../summarize/chat-stream-summarize.ts | 2 +- .../ai/src/activities/summarize/index.ts | 18 +++++++++--------- packages/typescript/ai/src/types.ts | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.changeset/summarize-unify-on-chat-stream-wrapper.md b/.changeset/summarize-unify-on-chat-stream-wrapper.md index 469898939..1e1607888 100644 --- a/.changeset/summarize-unify-on-chat-stream-wrapper.md +++ b/.changeset/summarize-unify-on-chat-stream-wrapper.md @@ -18,6 +18,6 @@ Unify the summarize subsystem on a shared chat-stream wrapper, plumb `modelOptio **`SummarizeAdapter` interface methods are now generic in `TProviderOptions`.** `summarize` and `summarizeStream` previously took `SummarizationOptions` (defaulted, so `modelOptions` was effectively `Record` regardless of the adapter's typed shape). They now take `SummarizationOptions`, threading the class's `TProviderOptions` generic through. Source-compatible for callers that didn't specify the generic; type-tighter for implementers and downstream consumers. -**Generic constraint and default tightened across the summarize surface.** `SummarizationOptions`, `SummarizeAdapter`, `BaseSummarizeAdapter`, and `ChatStreamSummarizeAdapter` move from a mixed `extends object = Record` / `extends object = Record` set of declarations to a single `extends Record = Record` definition. Forces unparameterised consumers to narrow before indexed access on `modelOptions`. No public-surface signature change for callers that supply a concrete provider-options shape (every shipping adapter does). +**Default aligned across the summarize surface.** `SummarizationOptions`, `SummarizeAdapter`, `BaseSummarizeAdapter`, and `ChatStreamSummarizeAdapter` previously had a mixed `Record` / `Record` / `object` set of defaults for `TProviderOptions`. They now uniformly default to `Record` so unparameterised consumers narrow before indexed access on `modelOptions`. The `extends object` constraint is unchanged — per-model typed interfaces (e.g. `OpenAIBaseOptions & OpenAIReasoningOptions & ...`) inferred via `InferTextProviderOptions` continue to satisfy it without needing a string index signature. No public-surface signature change for callers that supply a concrete provider-options shape (every shipping adapter does). Bespoke `*SummarizeProviderOptions` interfaces (e.g. `OpenAISummarizeProviderOptions`, `AnthropicSummarizeProviderOptions`, `GeminiSummarizeProviderOptions`, `OllamaSummarizeProviderOptions`, `OpenRouterSummarizeProviderOptions`) are removed from the provider packages' public exports. Consumers who imported them should switch to inferring the type from the adapter (`InferTextProviderOptions`) or remove the explicit annotation (it'll be inferred from the adapter argument). diff --git a/packages/typescript/ai/src/activities/summarize/adapter.ts b/packages/typescript/ai/src/activities/summarize/adapter.ts index 022261280..2f7cc34f6 100644 --- a/packages/typescript/ai/src/activities/summarize/adapter.ts +++ b/packages/typescript/ai/src/activities/summarize/adapter.ts @@ -27,7 +27,7 @@ export interface SummarizeAdapterConfig { */ export interface SummarizeAdapter< TModel extends string = string, - TProviderOptions extends Record = Record, + TProviderOptions extends object = Record, > { /** Discriminator for adapter kind - used by generate() to determine API shape */ readonly kind: 'summarize' @@ -74,7 +74,7 @@ export type AnySummarizeAdapter = SummarizeAdapter */ export abstract class BaseSummarizeAdapter< TModel extends string = string, - TProviderOptions extends Record = Record, + TProviderOptions extends object = Record, > implements SummarizeAdapter { readonly kind = 'summarize' as const abstract readonly name: string diff --git a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts index ec6d3abc3..2e7abb0c1 100644 --- a/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts +++ b/packages/typescript/ai/src/activities/summarize/chat-stream-summarize.ts @@ -41,7 +41,7 @@ export type InferTextProviderOptions = TAdapter extends { */ export class ChatStreamSummarizeAdapter< TModel extends string, - TProviderOptions extends Record = Record, + TProviderOptions extends object = Record, > extends BaseSummarizeAdapter { readonly name: string diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index 6644a25f3..57acc5023 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -46,7 +46,7 @@ export type SummarizeProviderOptions = * @template TStream - Whether to stream the output */ export interface SummarizeActivityOptions< - TAdapter extends SummarizeAdapter>, + TAdapter extends SummarizeAdapter, TStream extends boolean = false, > { /** The summarize adapter to use (must be created with a model) */ @@ -154,7 +154,7 @@ function createId(prefix: string): string { * ``` */ export function summarize< - TAdapter extends SummarizeAdapter>, + TAdapter extends SummarizeAdapter, TStream extends boolean = false, >( options: SummarizeActivityOptions, @@ -164,7 +164,7 @@ export function summarize< if (stream) { return runStreamingSummarize( options as unknown as SummarizeActivityOptions< - SummarizeAdapter>, + SummarizeAdapter, true >, ) as SummarizeActivityResult @@ -172,7 +172,7 @@ export function summarize< return runSummarize( options as unknown as SummarizeActivityOptions< - SummarizeAdapter>, + SummarizeAdapter, false >, ) as SummarizeActivityResult @@ -183,7 +183,7 @@ export function summarize< */ async function runSummarize( options: SummarizeActivityOptions< - SummarizeAdapter>, + SummarizeAdapter, false >, ): Promise { @@ -208,7 +208,7 @@ async function runSummarize( inputLength, }) - const summarizeOptions: SummarizationOptions = { + const summarizeOptions: SummarizationOptions = { model, text, maxLength, @@ -256,7 +256,7 @@ async function runSummarize( */ async function* runStreamingSummarize( options: SummarizeActivityOptions< - SummarizeAdapter>, + SummarizeAdapter, true >, ): AsyncIterable { @@ -270,7 +270,7 @@ async function* runStreamingSummarize( stream: true, }) - const summarizeOptions: SummarizationOptions = { + const summarizeOptions: SummarizationOptions = { model, text, maxLength, @@ -306,7 +306,7 @@ async function* runStreamingSummarize( * Create typed options for the summarize() function without executing. */ export function createSummarizeOptions< - TAdapter extends SummarizeAdapter>, + TAdapter extends SummarizeAdapter, TStream extends boolean = false, >( options: SummarizeActivityOptions, diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index 9802f2e8a..e51870b7c 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -1180,7 +1180,7 @@ export interface TextCompletionChunk { } export interface SummarizationOptions< - TProviderOptions extends Record = Record, + TProviderOptions extends object = Record, > { model: string text: string From 9f9b746e948730e25a5258891340abadbd9e9e23 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 23:55:58 +0000 Subject: [PATCH 46/49] ci: apply automated fixes --- .../typescript/ai/src/activities/summarize/index.ts | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index 57acc5023..27799d819 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -182,10 +182,7 @@ export function summarize< * Run non-streaming summarization */ async function runSummarize( - options: SummarizeActivityOptions< - SummarizeAdapter, - false - >, + options: SummarizeActivityOptions, false>, ): Promise { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model @@ -255,10 +252,7 @@ async function runSummarize( * to non-streaming and yields the result as a single chunk. */ async function* runStreamingSummarize( - options: SummarizeActivityOptions< - SummarizeAdapter, - true - >, + options: SummarizeActivityOptions, true>, ): AsyncIterable { const { adapter, text, maxLength, style, focus, modelOptions } = options const model = adapter.model From 35618ca67cae8b542ec61e5e7f0d4ca4e5aa0568 Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 10:05:19 +1000 Subject: [PATCH 47/49] refactor(ai): drop the SummarizationOptions annotation noise MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let TS infer summarizeOptions from the literal in runSummarize / runStreamingSummarize. The contextual check happens at the adapter.summarize(...) / adapter.summarizeStream(...) call site against the adapter's own typed signature, which is sufficient — the explicit local annotation was just visual noise. Drops the unused SummarizationOptions import too. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../typescript/ai/src/activities/summarize/index.ts | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/packages/typescript/ai/src/activities/summarize/index.ts b/packages/typescript/ai/src/activities/summarize/index.ts index 27799d819..449318fc0 100644 --- a/packages/typescript/ai/src/activities/summarize/index.ts +++ b/packages/typescript/ai/src/activities/summarize/index.ts @@ -11,11 +11,7 @@ import { resolveDebugOption } from '../../logger/resolve' import type { InternalLogger } from '../../logger/internal-logger' import type { DebugOption } from '../../logger/types' import type { SummarizeAdapter } from './adapter' -import type { - StreamChunk, - SummarizationOptions, - SummarizationResult, -} from '../../types' +import type { StreamChunk, SummarizationResult } from '../../types' // =========================== // Activity Kind @@ -205,7 +201,7 @@ async function runSummarize( inputLength, }) - const summarizeOptions: SummarizationOptions = { + const summarizeOptions = { model, text, maxLength, @@ -264,7 +260,7 @@ async function* runStreamingSummarize( stream: true, }) - const summarizeOptions: SummarizationOptions = { + const summarizeOptions = { model, text, maxLength, From 60a130272abf0008f8f8b88757d93932ce451d2a Mon Sep 17 00:00:00 2001 From: Tom Beckenham <34339192+tombeckenham@users.noreply.github.com> Date: Thu, 14 May 2026 11:22:02 +1000 Subject: [PATCH 48/49] fix(ai, ai-client): replace removed StreamChunk casts with typed event data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to the cast removals: where the old `as unknown as StreamChunk` casts were hiding real data-shape issues, fix the data instead of re-introducing the bypass. Source: - ai-client/src/connection-adapters.ts: synth RUN_FINISHED chunk now includes `threadId` (the cast had been hiding the missing required field). Use `EventType.RUN_FINISHED` / `EventType.RUN_ERROR` literals. Test helpers (`chunk()` / `makeChunk()` / `sc()`): - Replace string-typed `(type: string, fields) => StreamChunk` (which needed `as unknown as StreamChunk` to lie) with a generic `(type: T, fields?) => Extract`. One typed cast remains inside each helper at the boundary; no `as unknown` casts. - `sc()` retyped as a typed identity (`(c: T) => T`) so inline literal narrowing flows from the `type` discriminant. Inline literals + missing fields fixed at call sites: - All `chunk('X', ...)` → `chunk(EventType.X, ...)` across stream-processor.test.ts (42), strip-to-spec-middleware.test.ts (4), chat.test.ts (1). - All `type: 'X'` inside test object literals → `type: EventType.X` across stream-to-response, custom-events-integration, extend-adapter, stream-processor (the four MESSAGES_SNAPSHOT inline literals). - extend-adapter mock RUN_FINISHED gained `threadId`. - custom-events-integration TOOL_CALL_START gained `toolCallName` (the cast had been hiding the missing required field). - stream-processor MESSAGES_SNAPSHOT bodies (the two whose casts were removed) converted from TanStack `UIMessage` shape (parts/createdAt) to AG-UI `Message` shape (id/role/content) — the processor casts internally, but the upstream MessagesSnapshotEvent.messages field requires AG-UI Message. types.ts is untouched. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../ai-client/src/connection-adapters.ts | 10 +- .../ai-client/tests/chat-client.test.ts | 83 ++++++------ .../tests/connection-adapters.test.ts | 1 + .../ai-client/tests/generation-client.test.ts | 8 +- packages/typescript/ai/tests/chat.test.ts | 3 +- .../tests/custom-events-integration.test.ts | 25 ++-- .../ai/tests/extend-adapter.test.ts | 10 +- .../ai/tests/stream-processor.test.ts | 125 +++++++++--------- .../ai/tests/stream-to-response.test.ts | 5 +- .../ai/tests/strip-to-spec-middleware.test.ts | 21 ++- packages/typescript/ai/tests/test-utils.ts | 19 +-- 11 files changed, 170 insertions(+), 140 deletions(-) diff --git a/packages/typescript/ai-client/src/connection-adapters.ts b/packages/typescript/ai-client/src/connection-adapters.ts index 91d63a146..a9a48066a 100644 --- a/packages/typescript/ai-client/src/connection-adapters.ts +++ b/packages/typescript/ai-client/src/connection-adapters.ts @@ -1,3 +1,4 @@ +import { EventType } from '@tanstack/ai' import type { ModelMessage, StreamChunk, UIMessage } from '@tanstack/ai' /** @@ -188,17 +189,18 @@ export function normalizeConnectionAdapter( // synthesize RUN_FINISHED so request-scoped consumers can complete. if (!abortSignal?.aborted && !hasTerminalEvent) { push({ - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: `run-${Date.now()}`, + threadId: `thread-${Date.now()}`, model: 'connect-wrapper', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk) + }) } } catch (err) { if (!abortSignal?.aborted && !hasTerminalEvent) { push({ - type: 'RUN_ERROR', + type: EventType.RUN_ERROR, timestamp: Date.now(), message: err instanceof Error ? err.message : 'Unknown error in connect()', @@ -208,7 +210,7 @@ export function normalizeConnectionAdapter( ? err.message : 'Unknown error in connect()', }, - } as unknown as StreamChunk) + }) } throw err } diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index cc2b2ed75..42c7fb9bc 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -2,16 +2,16 @@ import { describe, expect, it, vi } from 'vitest' import { EventType } from '@tanstack/ai' import { ChatClient } from '../src/chat-client' import { + createApprovalToolCallChunks, + createCustomEventChunks, createMockConnectionAdapter, createTextChunks, createThinkingChunks, createToolCallChunks, - createApprovalToolCallChunks, - createCustomEventChunks, } from './test-utils' import type { - ConnectionAdapter, ConnectConnectionAdapter, + ConnectionAdapter, } from '../src/connection-adapters' import type { StreamChunk } from '@tanstack/ai' import type { UIMessage } from '../src/types' @@ -157,7 +157,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ]) const client = new ChatClient({ connection: adapter }) @@ -263,7 +263,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } as unknown as StreamChunk, + } , ]) const client = new ChatClient({ connection: adapter }) @@ -299,6 +299,7 @@ describe('ChatClient', () => { it('should expose connectionStatus error for subscription loop failures', async () => { const connection = { + // eslint-disable-next-line require-yield subscribe: async function* () { throw new Error('subscription failed') }, @@ -322,7 +323,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } as unknown as StreamChunk, + } , ]) const client = new ChatClient({ connection: adapter }) @@ -357,7 +358,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -365,7 +366,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -373,7 +374,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -398,7 +399,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.RUN_ERROR, message: 'something went wrong', @@ -406,7 +407,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), error: { message: 'something went wrong' }, - } as unknown as StreamChunk, + } , ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -431,7 +432,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -439,7 +440,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -447,7 +448,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ] const adapter = createSubscribeAdapter(chunks) const client = new ChatClient({ connection: adapter }) @@ -557,14 +558,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.RUN_STARTED, runId: 'run-1', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -572,7 +573,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -580,7 +581,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -588,7 +589,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -612,7 +613,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -620,7 +621,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'A', content: 'A', - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -628,7 +629,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -688,14 +689,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.RUN_STARTED, runId: 'run-2', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -710,7 +711,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk) + } ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -724,7 +725,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk) + } ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -777,14 +778,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.RUN_STARTED, runId: 'run-2', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -798,7 +799,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), error: { message: 'session crashed' }, - } as unknown as StreamChunk) + } ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -1375,7 +1376,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } as unknown as StreamChunk, + } , ], chunkDelay: 50, }) @@ -1401,6 +1402,7 @@ describe('ChatClient', () => { it('should surface subscription loop failures without hanging', async () => { const connection = { + // eslint-disable-next-line require-yield subscribe: async function* () { throw new Error('subscription exploded') }, @@ -2016,22 +2018,23 @@ describe('ChatClient', () => { toolName: 'dangerous_tool_2', model: 'test', timestamp: Date.now(), + toolCallName: 'dangerous_tool_call_2', index: 0, - } as unknown as StreamChunk, + } , { type: EventType.TOOL_CALL_ARGS, toolCallId: 'tc-2', model: 'test', timestamp: Date.now(), delta: '{}', - } as unknown as StreamChunk, + } , { type: EventType.TOOL_CALL_END, toolCallId: 'tc-2', toolName: 'dangerous_tool_2', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.CUSTOM, model: 'test', @@ -2043,7 +2046,7 @@ describe('ChatClient', () => { input: {}, approval: { id: 'approval-2', needsApproval: true }, }, - } as unknown as StreamChunk, + } , ] for (const chunk of preChunks) yield chunk @@ -2160,7 +2163,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_START, messageId: 'msg-a', @@ -2187,7 +2190,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_START, messageId: 'msg-b', @@ -2214,7 +2217,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk) + } ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -2254,7 +2257,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk) + } ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -2319,7 +2322,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'asst-1', @@ -2334,7 +2337,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } as unknown as StreamChunk, + } , ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index f1511aa5f..14d8b0bdb 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -903,6 +903,7 @@ describe('connection-adapters', () => { }) it('should synthesize RUN_ERROR when wrapped connect stream throws', async () => { + // eslint-disable-next-line require-yield const base = stream(async function* () { throw new Error('connect exploded') }) diff --git a/packages/typescript/ai-client/tests/generation-client.test.ts b/packages/typescript/ai-client/tests/generation-client.test.ts index 895e9f067..1dbae462a 100644 --- a/packages/typescript/ai-client/tests/generation-client.test.ts +++ b/packages/typescript/ai-client/tests/generation-client.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi } from 'vitest' +import { describe, expect, it, vi } from 'vitest' import { EventType } from '@tanstack/ai' import { GenerationClient } from '../src/generation-client' import type { StreamChunk } from '@tanstack/ai' @@ -244,20 +244,20 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - } as unknown as StreamChunk, + } , { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } as unknown as StreamChunk, + } , ] const connection = createMockConnection(chunks) diff --git a/packages/typescript/ai/tests/chat.test.ts b/packages/typescript/ai/tests/chat.test.ts index f69234c54..bd338666b 100644 --- a/packages/typescript/ai/tests/chat.test.ts +++ b/packages/typescript/ai/tests/chat.test.ts @@ -1,5 +1,6 @@ import { describe, expect, it, vi } from 'vitest' import { chat, createChatOptions } from '../src/activities/chat/index' +import { EventType } from '../src/types' import type { StreamChunk, Tool } from '../src/types' import { chunk, @@ -1385,7 +1386,7 @@ describe('chat()', () => { iterations: [ [ ev.runStarted(), - chunk('TOOL_CALL_START', { + chunk(EventType.TOOL_CALL_START, { toolCallId: 'call_1', toolName: 'getInventory', }), diff --git a/packages/typescript/ai/tests/custom-events-integration.test.ts b/packages/typescript/ai/tests/custom-events-integration.test.ts index 36ecdae8f..e4b599c89 100644 --- a/packages/typescript/ai/tests/custom-events-integration.test.ts +++ b/packages/typescript/ai/tests/custom-events-integration.test.ts @@ -1,11 +1,13 @@ import { describe, expect, it, vi } from 'vitest' +import { z } from 'zod' import { toolDefinition } from '../src/activities/chat/tools/tool-definition' import { StreamProcessor } from '../src/activities/chat/stream/processor' +import { EventType } from '../src/types' import type { StreamChunk } from '../src/types' -import { z } from 'zod' -/** Cast a plain object to StreamChunk for test convenience. */ -const sc = (obj: Record) => obj as unknown as StreamChunk +/** Identity helper for inline StreamChunk literals — gives TS a target + * type so the literal narrows to the right variant by `type`. */ +const sc = (chunk: T): T => chunk describe('Custom Events Integration', () => { it('should emit custom events from tool execution context', async () => { @@ -67,8 +69,9 @@ describe('Custom Events Integration', () => { // Simulate tool call sequence processor.processChunk( sc({ - type: 'TOOL_CALL_START', + type: EventType.TOOL_CALL_START, toolCallId: 'tc-1', + toolCallName: 'testTool', toolName: 'testTool', timestamp: Date.now(), index: 0, @@ -77,7 +80,7 @@ describe('Custom Events Integration', () => { processor.processChunk( sc({ - type: 'TOOL_CALL_ARGS', + type: EventType.TOOL_CALL_ARGS, toolCallId: 'tc-1', timestamp: Date.now(), delta: '{"message": "Hello World"}', @@ -86,7 +89,7 @@ describe('Custom Events Integration', () => { processor.processChunk( sc({ - type: 'TOOL_CALL_END', + type: EventType.TOOL_CALL_END, toolCallId: 'tc-1', toolName: 'testTool', timestamp: Date.now(), @@ -103,7 +106,7 @@ describe('Custom Events Integration', () => { // This simulates the real behavior where emitCustomEvent creates CUSTOM stream chunks processor.processChunk( sc({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: eventName, value: { ...data, toolCallId: 'tc-1' }, timestamp: Date.now(), @@ -171,7 +174,7 @@ describe('Custom Events Integration', () => { // Emit custom event without toolCallId processor.processChunk( sc({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'system:status', value: { status: 'ready', version: '1.0.0' }, timestamp: Date.now(), @@ -210,7 +213,7 @@ describe('Custom Events Integration', () => { // System event: tool-input-available processor.processChunk( sc({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'tool-input-available', value: { toolCallId: 'tc-1', @@ -224,7 +227,7 @@ describe('Custom Events Integration', () => { // System event: approval-requested processor.processChunk( sc({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'approval-requested', value: { toolCallId: 'tc-2', @@ -239,7 +242,7 @@ describe('Custom Events Integration', () => { // Custom event (should be forwarded) processor.processChunk( sc({ - type: 'CUSTOM', + type: EventType.CUSTOM, name: 'user:custom-event', value: { message: 'This should be forwarded' }, timestamp: Date.now(), diff --git a/packages/typescript/ai/tests/extend-adapter.test.ts b/packages/typescript/ai/tests/extend-adapter.test.ts index e919c8b5a..1f2c667f8 100644 --- a/packages/typescript/ai/tests/extend-adapter.test.ts +++ b/packages/typescript/ai/tests/extend-adapter.test.ts @@ -11,6 +11,7 @@ import { describe, expect, expectTypeOf, it } from 'vitest' import { createModel, extendAdapter } from '../src/extend-adapter' import { BaseTextAdapter } from '../src/activities/chat/adapter' import { chat } from '../src/activities/chat' +import { EventType } from '../src/types' import type { StreamChunk, TextOptions } from '../src/types' import type { StructuredOutputOptions, @@ -91,20 +92,21 @@ class MockTextAdapter extends BaseTextAdapter< _options: TextOptions>, ): AsyncIterable { yield { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'mock-id', timestamp: Date.now(), delta: 'Hello', content: 'Hello', model: this.model, - } as unknown as StreamChunk + } yield { - type: 'RUN_FINISHED', + type: EventType.RUN_FINISHED, runId: 'mock-id', + threadId: 'mock-thread', timestamp: Date.now(), finishReason: 'stop', model: this.model, - } as unknown as StreamChunk + } } /* eslint-enable @typescript-eslint/require-await */ diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index bce85813a..a171351a3 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -1,8 +1,10 @@ -import { type Mock, describe, expect, it, vi } from 'vitest' +import { describe, expect, it, vi } from 'vitest' import { StreamProcessor, createReplayStream, } from '../src/activities/chat/stream/processor' +import { EventType } from '../src/types' +import type { Mock } from 'vitest' import type { StreamProcessorEvents } from '../src/activities/chat/stream/processor' import type { ChunkStrategy } from '../src/activities/chat/stream/types' import type { @@ -16,12 +18,16 @@ import type { // Helpers // ============================================================================ -/** Create a typed StreamChunk with minimal boilerplate. */ -function chunk( - type: string, - fields: Record = {}, -): StreamChunk { - return { type, timestamp: Date.now(), ...fields } as unknown as StreamChunk +/** Create a typed StreamChunk by event type. Narrows the return to the + * matching variant via `Extract`. */ +function chunk( + type: T, + fields?: Record, +): Extract { + return { type, timestamp: Date.now(), ...fields } as Extract< + StreamChunk, + { type: T } + > } /** Create an async iterable from a list of chunks. */ @@ -36,27 +42,27 @@ async function* streamOf( /** Shorthand for common event sequences. */ const ev = { runStarted: (runId = 'run-1', threadId = 'thread-1') => - chunk('RUN_STARTED', { runId, threadId }), + chunk(EventType.RUN_STARTED, { runId, threadId }), textStart: (messageId = 'msg-1') => - chunk('TEXT_MESSAGE_START', { messageId, role: 'assistant' as const }), + chunk(EventType.TEXT_MESSAGE_START, { messageId, role: 'assistant' as const }), textContent: (delta: string, messageId = 'msg-1') => - chunk('TEXT_MESSAGE_CONTENT', { messageId, delta }), - textEnd: (messageId = 'msg-1') => chunk('TEXT_MESSAGE_END', { messageId }), + chunk(EventType.TEXT_MESSAGE_CONTENT, { messageId, delta }), + textEnd: (messageId = 'msg-1') => chunk(EventType.TEXT_MESSAGE_END, { messageId }), toolStart: (toolCallId: string, toolCallName: string, index?: number) => - chunk('TOOL_CALL_START', { + chunk(EventType.TOOL_CALL_START, { toolCallId, toolCallName, toolName: toolCallName, ...(index !== undefined ? { index } : {}), }), toolArgs: (toolCallId: string, delta: string) => - chunk('TOOL_CALL_ARGS', { toolCallId, delta }), + chunk(EventType.TOOL_CALL_ARGS, { toolCallId, delta }), toolEnd: ( toolCallId: string, toolCallName: string, opts?: { input?: unknown; result?: string }, ) => - chunk('TOOL_CALL_END', { + chunk(EventType.TOOL_CALL_END, { toolCallId, toolCallName, toolName: toolCallName, @@ -71,14 +77,14 @@ const ev = { | null = 'stop', runId = 'run-1', threadId = 'thread-1', - ) => chunk('RUN_FINISHED', { runId, threadId, finishReason }), + ) => chunk(EventType.RUN_FINISHED, { runId, threadId, finishReason }), runError: (message: string, runId = 'run-1') => - chunk('RUN_ERROR', { message, runId, error: { message } }), + chunk(EventType.RUN_ERROR, { message, runId, error: { message } }), stepStarted: (stepId = 'step-1', stepType = 'thinking') => - chunk('STEP_STARTED', { stepId, stepType }), + chunk(EventType.STEP_STARTED, { stepId, stepType }), stepFinished: (delta: string, stepId = 'step-1') => - chunk('STEP_FINISHED', { stepId, delta }), - custom: (name: string, value?: unknown) => chunk('CUSTOM', { name, value }), + chunk(EventType.STEP_FINISHED, { stepId, delta }), + custom: (name: string, value?: unknown) => chunk(EventType.CUSTOM, { name, value }), } /** Events object with vi.fn() mocks for assertions. */ @@ -1651,19 +1657,19 @@ describe('StreamProcessor', () => { // These should not create any messages processor.processChunk( - chunk('RUN_STARTED', { runId: 'run-1', threadId: 'thread-1' }), + chunk(EventType.RUN_STARTED, { runId: 'run-1', threadId: 'thread-1' }), ) - processor.processChunk(chunk('TEXT_MESSAGE_END', { messageId: 'msg-1' })) + processor.processChunk(chunk(EventType.TEXT_MESSAGE_END, { messageId: 'msg-1' })) processor.processChunk( - chunk('STEP_STARTED', { stepName: 'step-1', stepId: 'step-1' }), + chunk(EventType.STEP_STARTED, { stepName: 'step-1', stepId: 'step-1' }), ) processor.processChunk( - chunk('STATE_SNAPSHOT', { + chunk(EventType.STATE_SNAPSHOT, { snapshot: { key: 'val' }, state: { key: 'val' }, }), ) - processor.processChunk(chunk('STATE_DELTA', { delta: [{ key: 'val' }] })) + processor.processChunk(chunk(EventType.STATE_DELTA, { delta: [{ key: 'val' }] })) // No messages created (none of these are content-bearing) expect(processor.getMessages()).toHaveLength(0) @@ -2525,7 +2531,7 @@ describe('StreamProcessor', () => { ] processor.processChunk({ - type: 'MESSAGES_SNAPSHOT', + type: EventType.MESSAGES_SNAPSHOT, messages: snapshotMessages, timestamp: Date.now(), } as StreamChunk) @@ -2548,17 +2554,18 @@ describe('StreamProcessor', () => { // Snapshot replaces all messages processor.processChunk({ - type: 'MESSAGES_SNAPSHOT', + type: EventType.MESSAGES_SNAPSHOT, + // AG-UI Message shape; processor reinterprets as UIMessage at runtime + // (see processor.ts handleMessagesSnapshotEvent) messages: [ { id: 'snap-1', role: 'assistant', - parts: [{ type: 'text', content: 'Snapshot content' }], - createdAt: new Date(), + content: 'Snapshot content', }, ], timestamp: Date.now(), - } as unknown as StreamChunk) + }) const messages = processor.getMessages() expect(messages).toHaveLength(1) @@ -2692,17 +2699,17 @@ describe('StreamProcessor', () => { // MESSAGES_SNAPSHOT replaces everything (e.g., on reconnection) processor.processChunk({ - type: 'MESSAGES_SNAPSHOT', + type: EventType.MESSAGES_SNAPSHOT, + // AG-UI Message shape; processor reinterprets as UIMessage at runtime messages: [ { id: 'snap-user', role: 'user', - parts: [{ type: 'text', content: 'Hello' }], - createdAt: new Date(), + content: 'Hello', }, ], timestamp: Date.now(), - } as unknown as StreamChunk) + }) // Verify old messages are replaced const messagesAfterSnapshot = processor.getMessages() @@ -3022,7 +3029,7 @@ describe('StreamProcessor', () => { ] processor.processChunk({ - type: 'MESSAGES_SNAPSHOT', + type: EventType.MESSAGES_SNAPSHOT, messages: snapshotMessages, timestamp: Date.now(), } as StreamChunk) @@ -3075,29 +3082,29 @@ describe('StreamProcessor', () => { processor.processChunk(ev.runStarted()) processor.processChunk(ev.textStart()) - processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) processor.processChunk( - chunk('REASONING_MESSAGE_START', { + chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', role: 'reasoning', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_CONTENT', { + chunk(EventType.REASONING_MESSAGE_CONTENT, { messageId: 'r-1', delta: 'Let me think', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_CONTENT', { + chunk(EventType.REASONING_MESSAGE_CONTENT, { messageId: 'r-1', delta: ' about this...', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_END', { messageId: 'r-1' }), + chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk('REASONING_END', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) // Should fire onThinkingUpdate with accumulated content expect(events.onThinkingUpdate).toHaveBeenCalledTimes(2) @@ -3121,23 +3128,23 @@ describe('StreamProcessor', () => { processor.processChunk(ev.runStarted()) processor.processChunk(ev.textStart()) - processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) processor.processChunk( - chunk('REASONING_MESSAGE_START', { + chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', role: 'reasoning', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_CONTENT', { + chunk(EventType.REASONING_MESSAGE_CONTENT, { messageId: 'r-1', delta: 'Thinking...', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_END', { messageId: 'r-1' }), + chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk('REASONING_END', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) processor.processChunk(ev.textContent('Answer')) processor.processChunk(ev.textEnd()) processor.processChunk(ev.runFinished()) @@ -3161,22 +3168,22 @@ describe('StreamProcessor', () => { const processor = new StreamProcessor() processor.processChunk(ev.runStarted()) - processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) processor.processChunk( - chunk('REASONING_MESSAGE_START', { + chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', role: 'reasoning', }), ) processor.processChunk(ev.stepStarted('step-1')) processor.processChunk( - chunk('REASONING_MESSAGE_CONTENT', { + chunk(EventType.REASONING_MESSAGE_CONTENT, { messageId: 'r-1', delta: 'Thinking...', }), ) processor.processChunk( - chunk('STEP_FINISHED', { + chunk(EventType.STEP_FINISHED, { stepName: 'step-1', stepId: 'step-1', content: 'Thinking...', @@ -3200,23 +3207,23 @@ describe('StreamProcessor', () => { // REASONING events before TEXT_MESSAGE_START — should not crash processor.processChunk(ev.runStarted()) - processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) processor.processChunk( - chunk('REASONING_MESSAGE_START', { + chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', role: 'reasoning', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_CONTENT', { + chunk(EventType.REASONING_MESSAGE_CONTENT, { messageId: 'r-1', delta: 'thinking', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_END', { messageId: 'r-1' }), + chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk('REASONING_END', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) // Should not throw, onThinkingUpdate should still fire since // ensureAssistantMessage creates one @@ -3230,17 +3237,17 @@ describe('StreamProcessor', () => { processor.processChunk(ev.textStart()) // These are no-ops but should not throw - processor.processChunk(chunk('REASONING_START', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) processor.processChunk( - chunk('REASONING_MESSAGE_START', { + chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', role: 'reasoning', }), ) processor.processChunk( - chunk('REASONING_MESSAGE_END', { messageId: 'r-1' }), + chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk('REASONING_END', { messageId: 'r-1' })) + processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) // No crash = success expect(processor.getMessages()).toBeDefined() @@ -3264,7 +3271,7 @@ describe('StreamProcessor', () => { }), ) processor.processChunk( - chunk('TOOL_CALL_RESULT', { + chunk(EventType.TOOL_CALL_RESULT, { messageId: 'tool-result-1', toolCallId: 'tc-1', content: '{"temp": 72}', diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index f57425fb0..e3248fa9c 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -3,6 +3,7 @@ import { toServerSentEventsStream, toServerSentEventsResponse, } from '../src/stream-to-response' +import { EventType } from '../src/types' import type { StreamChunk } from '../src/types' // Helper to create mock async iterable @@ -228,13 +229,13 @@ describe('toServerSentEventsStream', () => { it('should handle stream errors and send error chunk', async () => { async function* errorStream(): AsyncGenerator { yield { - type: 'TEXT_MESSAGE_CONTENT', + type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', model: 'test', timestamp: Date.now(), delta: 'Test', content: 'Test', - } as unknown as StreamChunk + } throw new Error('Stream error') } diff --git a/packages/typescript/ai/tests/strip-to-spec-middleware.test.ts b/packages/typescript/ai/tests/strip-to-spec-middleware.test.ts index beb4aa1fa..e27a7e7a7 100644 --- a/packages/typescript/ai/tests/strip-to-spec-middleware.test.ts +++ b/packages/typescript/ai/tests/strip-to-spec-middleware.test.ts @@ -1,14 +1,21 @@ -import { describe, it, expect } from 'vitest' +import { describe, expect, it } from 'vitest' import { stripToSpec } from '../src/strip-to-spec-middleware' +import { EventType } from '../src/types' import type { StreamChunk } from '../src/types' -function makeChunk(type: string, fields: Record): StreamChunk { - return { type, timestamp: Date.now(), ...fields } as unknown as StreamChunk +function makeChunk( + type: T, + fields: Record, +): Extract { + return { type, timestamp: Date.now(), ...fields } as Extract< + StreamChunk, + { type: T } + > } describe('stripToSpec', () => { it('strips deprecated nested error from RUN_ERROR, keeps flat message/code', () => { - const chunk = makeChunk('RUN_ERROR', { + const chunk = makeChunk(EventType.RUN_ERROR, { message: 'Something went wrong', code: 'INTERNAL_ERROR', error: { message: 'Something went wrong' }, @@ -22,7 +29,7 @@ describe('stripToSpec', () => { }) it('passes through all other events unchanged', () => { - const chunk = makeChunk('TOOL_CALL_START', { + const chunk = makeChunk(EventType.TOOL_CALL_START, { toolCallId: 'tc-1', toolCallName: 'getTodos', toolName: 'getTodos', @@ -35,7 +42,7 @@ describe('stripToSpec', () => { }) it('keeps model, content, finishReason, usage, result, etc.', () => { - const chunk = makeChunk('RUN_FINISHED', { + const chunk = makeChunk(EventType.RUN_FINISHED, { runId: 'run-1', threadId: 'thread-1', model: 'gpt-4o', @@ -49,7 +56,7 @@ describe('stripToSpec', () => { }) it('keeps toolName, stepId, and other deprecated aliases (passthrough)', () => { - const chunk = makeChunk('TOOL_CALL_END', { + const chunk = makeChunk(EventType.TOOL_CALL_END, { toolCallId: 'tc-1', toolCallName: 'getTodos', toolName: 'getTodos', diff --git a/packages/typescript/ai/tests/test-utils.ts b/packages/typescript/ai/tests/test-utils.ts index 978c38387..a2fbf4af0 100644 --- a/packages/typescript/ai/tests/test-utils.ts +++ b/packages/typescript/ai/tests/test-utils.ts @@ -20,14 +20,17 @@ import type { // Chunk factory // ============================================================================ -/** Escape hatch for tests that deliberately construct off-spec chunks (e.g. - * to exercise deprecated-field handling or malformed input). Prefer the - * strictly-typed `ev.*` builders below for normal cases. */ -export function chunk( - type: string, - fields: Record = {}, -): StreamChunk { - return { type, timestamp: Date.now(), ...fields } as unknown as StreamChunk +/** Builds a typed StreamChunk by event type. Narrows the return to the + * matching variant via `Extract`, so callers get the right shape and TS + * catches missing required fields. Pass `EventType.X` for `type`. */ +export function chunk( + type: T, + fields?: Record, +): Extract { + return { type, timestamp: Date.now(), ...fields } as Extract< + StreamChunk, + { type: T } + > } // ============================================================================ From d205b2387fa6ef3f6d19a194d2384200f5357c97 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Thu, 14 May 2026 01:23:05 +0000 Subject: [PATCH 49/49] ci: apply automated fixes --- .../ai-client/tests/chat-client.test.ts | 74 +++++++++---------- .../ai-client/tests/generation-client.test.ts | 6 +- .../ai/tests/stream-processor.test.ts | 55 ++++++++++---- 3 files changed, 81 insertions(+), 54 deletions(-) diff --git a/packages/typescript/ai-client/tests/chat-client.test.ts b/packages/typescript/ai-client/tests/chat-client.test.ts index 42c7fb9bc..2633c2677 100644 --- a/packages/typescript/ai-client/tests/chat-client.test.ts +++ b/packages/typescript/ai-client/tests/chat-client.test.ts @@ -157,7 +157,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ]) const client = new ChatClient({ connection: adapter }) @@ -263,7 +263,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } , + }, ]) const client = new ChatClient({ connection: adapter }) @@ -323,7 +323,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } , + }, ]) const client = new ChatClient({ connection: adapter }) @@ -358,7 +358,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -366,7 +366,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -374,7 +374,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -399,7 +399,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.RUN_ERROR, message: 'something went wrong', @@ -407,7 +407,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), error: { message: 'something went wrong' }, - } , + }, ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -432,7 +432,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -440,7 +440,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -448,7 +448,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ] const adapter = createSubscribeAdapter(chunks) const client = new ChatClient({ connection: adapter }) @@ -558,14 +558,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.RUN_STARTED, runId: 'run-1', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -573,7 +573,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'Hi', content: 'Hi', - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -581,7 +581,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -589,7 +589,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -613,7 +613,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'msg-1', @@ -621,7 +621,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'A', content: 'A', - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', @@ -629,7 +629,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ] const adapter = createSubscribeAdapter(chunks) const generatingChanges: Array = [] @@ -689,14 +689,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.RUN_STARTED, runId: 'run-2', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -711,7 +711,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } ) + }) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -725,7 +725,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } ) + }) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -778,14 +778,14 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.RUN_STARTED, runId: 'run-2', threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -799,7 +799,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), error: { message: 'session crashed' }, - } ) + }) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -1376,7 +1376,7 @@ describe('ChatClient', () => { timestamp: Date.now(), delta: 'H', content: 'H', - } , + }, ], chunkDelay: 50, }) @@ -2020,21 +2020,21 @@ describe('ChatClient', () => { timestamp: Date.now(), toolCallName: 'dangerous_tool_call_2', index: 0, - } , + }, { type: EventType.TOOL_CALL_ARGS, toolCallId: 'tc-2', model: 'test', timestamp: Date.now(), delta: '{}', - } , + }, { type: EventType.TOOL_CALL_END, toolCallId: 'tc-2', toolName: 'dangerous_tool_2', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.CUSTOM, model: 'test', @@ -2046,7 +2046,7 @@ describe('ChatClient', () => { input: {}, approval: { id: 'approval-2', needsApproval: true }, }, - } , + }, ] for (const chunk of preChunks) yield chunk @@ -2163,7 +2163,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_START, messageId: 'msg-a', @@ -2190,7 +2190,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_START, messageId: 'msg-b', @@ -2217,7 +2217,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } ) + }) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -2257,7 +2257,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } ) + }) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) @@ -2322,7 +2322,7 @@ describe('ChatClient', () => { threadId: 'thread-1', model: 'test', timestamp: Date.now(), - } , + }, { type: EventType.TEXT_MESSAGE_CONTENT, messageId: 'asst-1', @@ -2337,7 +2337,7 @@ describe('ChatClient', () => { model: 'test', timestamp: Date.now(), finishReason: 'stop', - } , + }, ) wake.fn?.() await new Promise((resolve) => setTimeout(resolve, 20)) diff --git a/packages/typescript/ai-client/tests/generation-client.test.ts b/packages/typescript/ai-client/tests/generation-client.test.ts index 1dbae462a..216bfded9 100644 --- a/packages/typescript/ai-client/tests/generation-client.test.ts +++ b/packages/typescript/ai-client/tests/generation-client.test.ts @@ -244,20 +244,20 @@ describe('GenerationClient', () => { runId: 'run-1', threadId: 'thread-1', timestamp: Date.now(), - } , + }, { type: EventType.CUSTOM, name: 'generation:result', value: { id: '1' }, timestamp: Date.now(), - } , + }, { type: EventType.RUN_FINISHED, runId: 'run-1', threadId: 'thread-1', finishReason: 'stop', timestamp: Date.now(), - } , + }, ] const connection = createMockConnection(chunks) diff --git a/packages/typescript/ai/tests/stream-processor.test.ts b/packages/typescript/ai/tests/stream-processor.test.ts index a171351a3..0b3db9cfa 100644 --- a/packages/typescript/ai/tests/stream-processor.test.ts +++ b/packages/typescript/ai/tests/stream-processor.test.ts @@ -44,10 +44,14 @@ const ev = { runStarted: (runId = 'run-1', threadId = 'thread-1') => chunk(EventType.RUN_STARTED, { runId, threadId }), textStart: (messageId = 'msg-1') => - chunk(EventType.TEXT_MESSAGE_START, { messageId, role: 'assistant' as const }), + chunk(EventType.TEXT_MESSAGE_START, { + messageId, + role: 'assistant' as const, + }), textContent: (delta: string, messageId = 'msg-1') => chunk(EventType.TEXT_MESSAGE_CONTENT, { messageId, delta }), - textEnd: (messageId = 'msg-1') => chunk(EventType.TEXT_MESSAGE_END, { messageId }), + textEnd: (messageId = 'msg-1') => + chunk(EventType.TEXT_MESSAGE_END, { messageId }), toolStart: (toolCallId: string, toolCallName: string, index?: number) => chunk(EventType.TOOL_CALL_START, { toolCallId, @@ -84,7 +88,8 @@ const ev = { chunk(EventType.STEP_STARTED, { stepId, stepType }), stepFinished: (delta: string, stepId = 'step-1') => chunk(EventType.STEP_FINISHED, { stepId, delta }), - custom: (name: string, value?: unknown) => chunk(EventType.CUSTOM, { name, value }), + custom: (name: string, value?: unknown) => + chunk(EventType.CUSTOM, { name, value }), } /** Events object with vi.fn() mocks for assertions. */ @@ -1659,7 +1664,9 @@ describe('StreamProcessor', () => { processor.processChunk( chunk(EventType.RUN_STARTED, { runId: 'run-1', threadId: 'thread-1' }), ) - processor.processChunk(chunk(EventType.TEXT_MESSAGE_END, { messageId: 'msg-1' })) + processor.processChunk( + chunk(EventType.TEXT_MESSAGE_END, { messageId: 'msg-1' }), + ) processor.processChunk( chunk(EventType.STEP_STARTED, { stepName: 'step-1', stepId: 'step-1' }), ) @@ -1669,7 +1676,9 @@ describe('StreamProcessor', () => { state: { key: 'val' }, }), ) - processor.processChunk(chunk(EventType.STATE_DELTA, { delta: [{ key: 'val' }] })) + processor.processChunk( + chunk(EventType.STATE_DELTA, { delta: [{ key: 'val' }] }), + ) // No messages created (none of these are content-bearing) expect(processor.getMessages()).toHaveLength(0) @@ -3082,7 +3091,9 @@ describe('StreamProcessor', () => { processor.processChunk(ev.runStarted()) processor.processChunk(ev.textStart()) - processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_START, { messageId: 'r-1' }), + ) processor.processChunk( chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', @@ -3104,7 +3115,9 @@ describe('StreamProcessor', () => { processor.processChunk( chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_END, { messageId: 'r-1' }), + ) // Should fire onThinkingUpdate with accumulated content expect(events.onThinkingUpdate).toHaveBeenCalledTimes(2) @@ -3128,7 +3141,9 @@ describe('StreamProcessor', () => { processor.processChunk(ev.runStarted()) processor.processChunk(ev.textStart()) - processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_START, { messageId: 'r-1' }), + ) processor.processChunk( chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', @@ -3144,7 +3159,9 @@ describe('StreamProcessor', () => { processor.processChunk( chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_END, { messageId: 'r-1' }), + ) processor.processChunk(ev.textContent('Answer')) processor.processChunk(ev.textEnd()) processor.processChunk(ev.runFinished()) @@ -3168,7 +3185,9 @@ describe('StreamProcessor', () => { const processor = new StreamProcessor() processor.processChunk(ev.runStarted()) - processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_START, { messageId: 'r-1' }), + ) processor.processChunk( chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', @@ -3207,7 +3226,9 @@ describe('StreamProcessor', () => { // REASONING events before TEXT_MESSAGE_START — should not crash processor.processChunk(ev.runStarted()) - processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_START, { messageId: 'r-1' }), + ) processor.processChunk( chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', @@ -3223,7 +3244,9 @@ describe('StreamProcessor', () => { processor.processChunk( chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_END, { messageId: 'r-1' }), + ) // Should not throw, onThinkingUpdate should still fire since // ensureAssistantMessage creates one @@ -3237,7 +3260,9 @@ describe('StreamProcessor', () => { processor.processChunk(ev.textStart()) // These are no-ops but should not throw - processor.processChunk(chunk(EventType.REASONING_START, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_START, { messageId: 'r-1' }), + ) processor.processChunk( chunk(EventType.REASONING_MESSAGE_START, { messageId: 'r-1', @@ -3247,7 +3272,9 @@ describe('StreamProcessor', () => { processor.processChunk( chunk(EventType.REASONING_MESSAGE_END, { messageId: 'r-1' }), ) - processor.processChunk(chunk(EventType.REASONING_END, { messageId: 'r-1' })) + processor.processChunk( + chunk(EventType.REASONING_END, { messageId: 'r-1' }), + ) // No crash = success expect(processor.getMessages()).toBeDefined()