From 43224f5d053eeded56ddb90e5d1389e6dc1e0542 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Fri, 1 May 2026 09:16:04 -0500 Subject: [PATCH] feat: add Vercel AI Runner protocol implementation (AIC-2388) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds VercelModelRunner and VercelRunnerFactory implementing the Runner protocol introduced in JS PR 6. The runner takes an LDAICompletionConfig at construction and exposes run(prompt: string) returning RunnerResult, preserving the v4/v5 token field handling already present in VercelProvider. VercelModelRunner.run(prompt) prepends the config's messages before the user prompt. No agent runner is provided because the Vercel AI SDK is a thin model layer rather than an agent framework — the existing getAIMetricsFromStream helper covers the streaming use case and is exported alongside the new Runner classes. mapProvider is renamed to mapProviderName on both the helper module and the VercelProvider class; the old mapProvider name is kept as a deprecated alias. convertMessagesToVercel is added for parity with the other provider helpers. Co-Authored-By: Claude Opus 4.7 --- .../__tests__/VercelModelRunner.test.ts | 148 ++++++++++++++++++ .../__tests__/VercelRunnerFactory.test.ts | 76 +++++++++ .../__tests__/vercelHelper.test.ts | 96 ++++++++++++ .../server-ai-vercel/src/VercelModelRunner.ts | 114 ++++++++++++++ .../server-ai-vercel/src/VercelProvider.ts | 15 +- .../src/VercelRunnerFactory.ts | 139 ++++++++++++++++ .../server-ai-vercel/src/index.ts | 9 ++ .../server-ai-vercel/src/vercelHelper.ts | 89 +++++++++++ 8 files changed, 681 insertions(+), 5 deletions(-) create mode 100644 packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts create mode 100644 packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts create mode 100644 packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts create mode 100644 packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts create mode 100644 packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts create mode 100644 packages/ai-providers/server-ai-vercel/src/vercelHelper.ts diff --git a/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts new file mode 100644 index 0000000000..9f27de7114 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts @@ -0,0 +1,148 @@ +import { generateObject, generateText, jsonSchema } from 'ai'; + +import type { LDAICompletionConfig } from '@launchdarkly/server-sdk-ai'; + +import { VercelModelRunner } from '../src/VercelModelRunner'; + +jest.mock('ai', () => ({ + generateText: jest.fn(), + generateObject: jest.fn(), + jsonSchema: jest.fn((schema) => schema), +})); + +const mockLogger = { + warn: jest.fn(), + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), +}; + +const baseConfig: LDAICompletionConfig = { + key: 'completion', + enabled: true, + model: { name: 'mock' }, +}; + +describe('VercelModelRunner', () => { + const fakeModel = { name: 'mock' }; + let runner: VercelModelRunner; + + beforeEach(() => { + runner = new VercelModelRunner(fakeModel as any, baseConfig, {}, mockLogger); + jest.clearAllMocks(); + }); + + describe('run (chat completion)', () => { + it('returns a successful RunnerResult with content, metrics, and raw response', async () => { + const result = { + text: 'Hi!', + usage: { totalTokens: 12, promptTokens: 7, completionTokens: 5 }, + }; + (generateText as jest.Mock).mockResolvedValue(result); + + const out = await runner.run('hello'); + + expect(generateText).toHaveBeenCalledWith({ + model: fakeModel, + messages: [{ role: 'user', content: 'hello' }], + experimental_telemetry: { isEnabled: true }, + }); + expect(out.content).toBe('Hi!'); + expect(out.metrics).toEqual({ + success: true, + usage: { total: 12, input: 7, output: 5 }, + }); + expect(out.raw).toBe(result); + }); + + it('prepends config messages before the user prompt', async () => { + (generateText as jest.Mock).mockResolvedValue({ + text: 'reply', + usage: { totalTokens: 1, promptTokens: 1, completionTokens: 0 }, + }); + + const configWithMessages: LDAICompletionConfig = { + ...baseConfig, + messages: [{ role: 'system', content: 'You are X' }], + }; + const r = new VercelModelRunner(fakeModel as any, configWithMessages, {}, mockLogger); + await r.run('hi'); + + expect(generateText).toHaveBeenCalledWith({ + model: fakeModel, + messages: [ + { role: 'system', content: 'You are X' }, + { role: 'user', content: 'hi' }, + ], + experimental_telemetry: { isEnabled: true }, + }); + }); + + it('preserves v5 token field handling via getAIMetricsFromResponse', async () => { + (generateText as jest.Mock).mockResolvedValue({ + text: 'ok', + usage: { totalTokens: 100, inputTokens: 40, outputTokens: 60 }, + }); + + const out = await runner.run('hello'); + + expect(out.metrics.usage).toEqual({ total: 100, input: 40, output: 60 }); + }); + + it('returns success=false when generateText throws', async () => { + const err = new Error('boom'); + (generateText as jest.Mock).mockRejectedValue(err); + + const out = await runner.run('hello'); + + expect(out.content).toBe(''); + expect(out.metrics.success).toBe(false); + expect(mockLogger.warn).toHaveBeenCalledWith('Vercel AI model invocation failed:', err); + }); + }); + + describe('run (structured output)', () => { + it('exposes parsed structured output via parsed', async () => { + const obj = { name: 'Ada', age: 36 }; + (generateObject as jest.Mock).mockResolvedValue({ + object: obj, + usage: { totalTokens: 30, promptTokens: 10, completionTokens: 20 }, + }); + + const schema = { type: 'object' }; + const out = await runner.run('tell', schema); + + expect(jsonSchema).toHaveBeenCalledWith(schema); + expect(generateObject).toHaveBeenCalledWith({ + model: fakeModel, + messages: [{ role: 'user', content: 'tell' }], + schema, + experimental_telemetry: { isEnabled: true }, + }); + expect(out.parsed).toEqual(obj); + expect(out.content).toBe(JSON.stringify(obj)); + expect(out.metrics.success).toBe(true); + }); + + it('returns success=false when generateObject throws', async () => { + const err = new Error('struct boom'); + (generateObject as jest.Mock).mockRejectedValue(err); + + const out = await runner.run('tell', { type: 'object' }); + + expect(out.content).toBe(''); + expect(out.parsed).toBeUndefined(); + expect(out.metrics.success).toBe(false); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Vercel AI structured model invocation failed:', + err, + ); + }); + }); + + describe('getModel', () => { + it('returns the underlying Vercel AI model', () => { + expect(runner.getModel()).toBe(fakeModel); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts new file mode 100644 index 0000000000..aa994203fd --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts @@ -0,0 +1,76 @@ +import { VercelModelRunner } from '../src/VercelModelRunner'; +import { VercelRunnerFactory } from '../src/VercelRunnerFactory'; + +describe('VercelRunnerFactory', () => { + describe('createModel', () => { + it('builds a VercelModelRunner with mapped parameters', async () => { + const fakeModel = { name: 'gpt-4o' }; + jest.doMock('@ai-sdk/openai', () => ({ + openai: jest.fn().mockReturnValue(fakeModel), + })); + + const factory = new VercelRunnerFactory(); + const runner = await factory.createModel({ + key: 'completion', + enabled: true, + provider: { name: 'openai' }, + model: { name: 'gpt-4o', parameters: { max_tokens: 100, temperature: 0.7 } }, + }); + + expect(runner).toBeInstanceOf(VercelModelRunner); + expect(runner.getModel()).toBe(fakeModel); + }); + }); + + describe('mapParameters', () => { + it('maps known LD parameters to Vercel AI SDK names', () => { + const params = VercelRunnerFactory.mapParameters({ + max_tokens: 100, + max_completion_tokens: 200, + temperature: 0.7, + top_p: 0.9, + top_k: 50, + presence_penalty: 0.1, + frequency_penalty: 0.2, + stop: ['x', 'y'], + seed: 42, + }); + + expect(params).toEqual({ + maxTokens: 100, + maxOutputTokens: 200, + temperature: 0.7, + topP: 0.9, + topK: 50, + presencePenalty: 0.1, + frequencyPenalty: 0.2, + stopSequences: ['x', 'y'], + seed: 42, + }); + }); + + it('returns an empty object when parameters is undefined', () => { + expect(VercelRunnerFactory.mapParameters(undefined)).toEqual({}); + }); + }); + + describe('createVercelModel', () => { + it('throws on an unsupported provider', async () => { + await expect( + VercelRunnerFactory.createVercelModel({ + key: 'k', + enabled: true, + provider: { name: 'unsupported' }, + model: { name: 'm' }, + }), + ).rejects.toThrow('Unsupported Vercel AI provider: unsupported'); + }); + }); + + describe('create', () => { + it('creates a VercelRunnerFactory instance', async () => { + const f = await VercelRunnerFactory.create(); + expect(f).toBeInstanceOf(VercelRunnerFactory); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts new file mode 100644 index 0000000000..b61e7a5942 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts @@ -0,0 +1,96 @@ +import { + convertMessagesToVercel, + getAIMetricsFromResponse, + getAIMetricsFromStream, + mapProviderName, + mapUsageDataToLDTokenUsage, +} from '../src/vercelHelper'; + +describe('convertMessagesToVercel', () => { + it('passes role and content through unchanged', () => { + expect( + convertMessagesToVercel([ + { role: 'system', content: 'sys' }, + { role: 'user', content: 'u' }, + { role: 'assistant', content: 'a' }, + ]), + ).toEqual([ + { role: 'system', content: 'sys' }, + { role: 'user', content: 'u' }, + { role: 'assistant', content: 'a' }, + ]); + }); +}); + +describe('mapProviderName', () => { + it('maps gemini to google (case-insensitive)', () => { + expect(mapProviderName('gemini')).toBe('google'); + expect(mapProviderName('Gemini')).toBe('google'); + }); + + it('returns the provider unchanged when no mapping exists', () => { + expect(mapProviderName('openai')).toBe('openai'); + expect(mapProviderName('anthropic')).toBe('anthropic'); + }); +}); + +describe('mapUsageDataToLDTokenUsage', () => { + it('prefers v5 field names (inputTokens / outputTokens) over v4', () => { + const usage = mapUsageDataToLDTokenUsage({ + totalTokens: 100, + inputTokens: 40, + outputTokens: 60, + promptTokens: 1, + completionTokens: 2, + }); + expect(usage).toEqual({ total: 100, input: 40, output: 60 }); + }); + + it('falls back to v4 field names when v5 is absent', () => { + const usage = mapUsageDataToLDTokenUsage({ + totalTokens: 50, + promptTokens: 20, + completionTokens: 30, + }); + expect(usage).toEqual({ total: 50, input: 20, output: 30 }); + }); +}); + +describe('getAIMetricsFromResponse', () => { + it('treats missing finishReason as success', () => { + expect( + getAIMetricsFromResponse({ + usage: { totalTokens: 5, promptTokens: 2, completionTokens: 3 }, + }), + ).toEqual({ success: true, usage: { total: 5, input: 2, output: 3 } }); + }); + + it('marks success=false when finishReason is "error"', () => { + expect( + getAIMetricsFromResponse({ + finishReason: 'error', + usage: { totalTokens: 10, promptTokens: 4, completionTokens: 6 }, + }).success, + ).toBe(false); + }); +}); + +describe('getAIMetricsFromStream', () => { + it('extracts usage from a successful stream', async () => { + const result = await getAIMetricsFromStream({ + finishReason: Promise.resolve('stop'), + usage: Promise.resolve({ totalTokens: 100, promptTokens: 49, completionTokens: 51 }), + }); + expect(result).toEqual({ + success: true, + usage: { total: 100, input: 49, output: 51 }, + }); + }); + + it('marks success=false on error finishReason', async () => { + const result = await getAIMetricsFromStream({ + finishReason: Promise.resolve('error'), + }); + expect(result.success).toBe(false); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts b/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts new file mode 100644 index 0000000000..7c1bc061ff --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts @@ -0,0 +1,114 @@ +import { generateObject, generateText, jsonSchema, LanguageModel } from 'ai'; + +import type { + LDAICompletionConfig, + LDLogger, + LDMessage, + Runner, + RunnerResult, +} from '@launchdarkly/server-sdk-ai'; + +import type { VercelAIModelParameters } from './types'; +import { convertMessagesToVercel, getAIMetricsFromResponse } from './vercelHelper'; + +/** + * Runner implementation for Vercel AI SDK chat models. + * + * Implements the unified `Runner` protocol via {@link run}. Returned by + * {@link VercelRunnerFactory.createModel}. + */ +export class VercelModelRunner implements Runner { + private _model: LanguageModel; + private _config: LDAICompletionConfig; + private _parameters: VercelAIModelParameters; + private _logger?: LDLogger; + + constructor( + model: LanguageModel, + config: LDAICompletionConfig, + parameters: VercelAIModelParameters, + logger?: LDLogger, + ) { + this._model = model; + this._config = config; + this._parameters = parameters; + this._logger = logger; + } + + /** + * Run the Vercel AI model with the given prompt. + * + * @param input The user prompt to send to the model. + * @param outputType Optional JSON schema for structured output. When provided, + * the parsed object is exposed via {@link RunnerResult.parsed}. + */ + async run(input: string, outputType?: Record): Promise { + const messages: LDMessage[] = [ + ...(this._config.messages ?? []), + { role: 'user', content: input }, + ]; + + if (outputType !== undefined) { + return this._runStructured(messages, outputType); + } + return this._runCompletion(messages); + } + + /** + * Get the underlying Vercel AI model instance. + */ + getModel(): LanguageModel { + return this._model; + } + + private async _runCompletion(messages: LDMessage[]): Promise { + try { + const result = await generateText({ + ...this._parameters, + model: this._model, + messages: convertMessagesToVercel(messages), + experimental_telemetry: { isEnabled: true }, + }); + + const metrics = getAIMetricsFromResponse(result); + return { content: result.text, metrics, raw: result }; + } catch (error) { + this._logger?.warn('Vercel AI model invocation failed:', error); + return { + content: '', + metrics: { success: false }, + }; + } + } + + private async _runStructured( + messages: LDMessage[], + outputType: Record, + ): Promise { + try { + const result = await generateObject({ + ...this._parameters, + model: this._model, + messages: convertMessagesToVercel(messages), + schema: jsonSchema(outputType), + experimental_telemetry: { isEnabled: true }, + }); + + const metrics = getAIMetricsFromResponse(result); + const parsed = result.object as Record; + + return { + content: JSON.stringify(parsed), + metrics, + raw: result, + parsed, + }; + } catch (error) { + this._logger?.warn('Vercel AI structured model invocation failed:', error); + return { + content: '', + metrics: { success: false }, + }; + } + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index 0f98e8ece2..b67a27c087 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -141,11 +141,9 @@ export class VercelProvider extends AIProvider { } /** - * Map LaunchDarkly provider names to LangChain provider names. - * This method enables seamless integration between LaunchDarkly's standardized - * provider naming and LangChain's naming conventions. + * Map LaunchDarkly provider names to Vercel AI SDK provider identifiers. */ - static mapProvider(ldProviderName: string): string { + static mapProviderName(ldProviderName: string): string { const lowercasedName = ldProviderName.toLowerCase(); const mapping: Record = { @@ -155,6 +153,13 @@ export class VercelProvider extends AIProvider { return mapping[lowercasedName] || lowercasedName; } + /** + * @deprecated Use {@link mapProviderName} instead. + */ + static mapProvider(ldProviderName: string): string { + return VercelProvider.mapProviderName(ldProviderName); + } + /** * Map Vercel AI SDK usage data to LaunchDarkly token usage. * @@ -376,7 +381,7 @@ export class VercelProvider extends AIProvider { * @returns A Promise that resolves to a configured Vercel AI model */ static async createVercelModel(aiConfig: LDAIConfig): Promise { - const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || ''); + const providerName = VercelProvider.mapProviderName(aiConfig.provider?.name || ''); const modelName = aiConfig.model?.name || ''; // Map provider names to their corresponding Vercel AI SDK imports diff --git a/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts b/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts new file mode 100644 index 0000000000..7dce9ec9e2 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts @@ -0,0 +1,139 @@ +import { LanguageModel } from 'ai'; + +import type { LDAICompletionConfig, LDAIConfig, LDLogger } from '@launchdarkly/server-sdk-ai'; + +import type { VercelAIModelParameters } from './types'; +import { mapProviderName } from './vercelHelper'; +import { VercelModelRunner } from './VercelModelRunner'; + +/** + * Factory for creating Vercel AI runners. + * + * Vercel ships only a model runner; agent and graph runners are not provided + * because the Vercel AI SDK is a thin model layer rather than an agent + * framework. + */ +export class VercelRunnerFactory { + private _logger?: LDLogger; + + constructor(logger?: LDLogger) { + this._logger = logger; + } + + /** + * Static convenience constructor matching the other provider factories. + */ + static async create(logger?: LDLogger): Promise { + return new VercelRunnerFactory(logger); + } + + /** + * Create a model runner from a completion AI configuration. + */ + async createModel(config: LDAICompletionConfig): Promise { + const model = await VercelRunnerFactory.createVercelModel(config); + const parameters = VercelRunnerFactory.mapParameters(config.model?.parameters); + return new VercelModelRunner(model, config, parameters, this._logger); + } + + /** + * Create a Vercel AI model from an AI configuration. + * This method auto-detects the provider and creates the model instance. + */ + static async createVercelModel(aiConfig: LDAIConfig): Promise { + const providerName = mapProviderName(aiConfig.provider?.name || ''); + const modelName = aiConfig.model?.name || ''; + + switch (providerName) { + case 'openai': + try { + const { openai } = await import('@ai-sdk/openai'); + return openai(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/openai: ${error}`); + } + case 'anthropic': + try { + const { anthropic } = await import('@ai-sdk/anthropic'); + return anthropic(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/anthropic: ${error}`); + } + case 'google': + try { + const { google } = await import('@ai-sdk/google'); + return google(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/google: ${error}`); + } + case 'cohere': + try { + const { cohere } = await import('@ai-sdk/cohere'); + return cohere(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/cohere: ${error}`); + } + case 'mistral': + try { + const { mistral } = await import('@ai-sdk/mistral'); + return mistral(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/mistral: ${error}`); + } + default: + throw new Error(`Unsupported Vercel AI provider: ${providerName}`); + } + } + + /** + * Map LaunchDarkly model parameters to Vercel AI SDK parameters. + * + * Parameter mappings: + * - max_tokens -> maxTokens + * - max_completion_tokens -> maxOutputTokens + * - temperature -> temperature + * - top_p -> topP + * - top_k -> topK + * - presence_penalty -> presencePenalty + * - frequency_penalty -> frequencyPenalty + * - stop -> stopSequences + * - seed -> seed + */ + static mapParameters(parameters?: { [index: string]: unknown }): VercelAIModelParameters { + if (!parameters) { + return {}; + } + + const params: VercelAIModelParameters = {}; + + if (parameters.max_tokens !== undefined) { + params.maxTokens = parameters.max_tokens as number; + } + if (parameters.max_completion_tokens !== undefined) { + params.maxOutputTokens = parameters.max_completion_tokens as number; + } + if (parameters.temperature !== undefined) { + params.temperature = parameters.temperature as number; + } + if (parameters.top_p !== undefined) { + params.topP = parameters.top_p as number; + } + if (parameters.top_k !== undefined) { + params.topK = parameters.top_k as number; + } + if (parameters.presence_penalty !== undefined) { + params.presencePenalty = parameters.presence_penalty as number; + } + if (parameters.frequency_penalty !== undefined) { + params.frequencyPenalty = parameters.frequency_penalty as number; + } + if (parameters.stop !== undefined) { + params.stopSequences = parameters.stop as string[]; + } + if (parameters.seed !== undefined) { + params.seed = parameters.seed as number; + } + + return params; + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/index.ts b/packages/ai-providers/server-ai-vercel/src/index.ts index 6e7eb55023..92898caf0c 100644 --- a/packages/ai-providers/server-ai-vercel/src/index.ts +++ b/packages/ai-providers/server-ai-vercel/src/index.ts @@ -1,4 +1,13 @@ export { VercelProvider } from './VercelProvider'; +export { VercelModelRunner } from './VercelModelRunner'; +export { VercelRunnerFactory } from './VercelRunnerFactory'; +export { + convertMessagesToVercel, + getAIMetricsFromResponse, + getAIMetricsFromStream, + mapProviderName, + mapUsageDataToLDTokenUsage, +} from './vercelHelper'; export type { VercelAIModelParameters, VercelAISDKConfig, diff --git a/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts b/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts new file mode 100644 index 0000000000..add9b5d9f0 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts @@ -0,0 +1,89 @@ +import type { LDAIMetrics, LDMessage, LDTokenUsage } from '@launchdarkly/server-sdk-ai'; + +import type { ModelUsageTokens, StreamResponse, TextResponse } from './types'; + +/** + * Convert LaunchDarkly messages to the Vercel AI SDK message format. + * + * The Vercel AI SDK accepts the same `{ role, content }` shape that LDMessage + * uses, so this helper currently performs a structural pass-through. + */ +export function convertMessagesToVercel(messages: LDMessage[]): LDMessage[] { + return messages.map((msg) => ({ role: msg.role, content: msg.content })); +} + +/** + * Map LaunchDarkly provider names to Vercel AI SDK provider identifiers. + */ +export function mapProviderName(ldProviderName: string): string { + const lowercasedName = ldProviderName.toLowerCase(); + const mapping: Record = { + gemini: 'google', + }; + return mapping[lowercasedName] || lowercasedName; +} + +/** + * Map Vercel AI SDK usage data to LaunchDarkly token usage. + * Supports both v4 (promptTokens/completionTokens) and v5 + * (inputTokens/outputTokens) field names. + */ +export function mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage { + const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData; + return { + total: totalTokens ?? 0, + input: inputTokens ?? promptTokens ?? 0, + output: outputTokens ?? completionTokens ?? 0, + }; +} + +/** + * Get AI metrics from a Vercel AI SDK text response (e.g., generateText). + * Supports both v4 and v5 token field names. + */ +export function getAIMetricsFromResponse(response: TextResponse): LDAIMetrics { + const finishReason = response?.finishReason ?? 'unknown'; + + let usage: LDTokenUsage | undefined; + if (response?.totalUsage) { + usage = mapUsageDataToLDTokenUsage(response.totalUsage); + } else if (response?.usage) { + usage = mapUsageDataToLDTokenUsage(response.usage); + } + + return { + success: finishReason !== 'error', + usage, + }; +} + +/** + * Get AI metrics from a Vercel AI SDK streaming result. + * + * Awaits the stream's terminal promises and prefers `totalUsage` over + * `usage` for cumulative usage across all steps. + */ +export async function getAIMetricsFromStream(stream: StreamResponse): Promise { + const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown'; + + let usage: LDTokenUsage | undefined; + + if (stream.totalUsage) { + const usageData = await stream.totalUsage.catch(() => undefined); + if (usageData) { + usage = mapUsageDataToLDTokenUsage(usageData); + } + } + + if (!usage && stream.usage) { + const usageData = await stream.usage.catch(() => undefined); + if (usageData) { + usage = mapUsageDataToLDTokenUsage(usageData); + } + } + + return { + success: finishReason !== 'error', + usage, + }; +}