diff --git a/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts new file mode 100644 index 0000000000..4ca5d0ce93 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/VercelModelRunner.test.ts @@ -0,0 +1,117 @@ +import { generateObject, generateText, jsonSchema } from 'ai'; + +import { VercelModelRunner } from '../src/VercelModelRunner'; + +jest.mock('ai', () => ({ + generateText: jest.fn(), + generateObject: jest.fn(), + jsonSchema: jest.fn((schema) => schema), +})); + +const mockLogger = { + warn: jest.fn(), + info: jest.fn(), + error: jest.fn(), + debug: jest.fn(), +}; + +describe('VercelModelRunner', () => { + const fakeModel = { name: 'mock' }; + let runner: VercelModelRunner; + + beforeEach(() => { + runner = new VercelModelRunner(fakeModel as any, {}, mockLogger); + jest.clearAllMocks(); + }); + + describe('run (chat completion)', () => { + it('returns a successful RunnerResult with content, metrics, and raw response', async () => { + const result = { + text: 'Hi!', + usage: { totalTokens: 12, promptTokens: 7, completionTokens: 5 }, + }; + (generateText as jest.Mock).mockResolvedValue(result); + + const out = await runner.run([{ role: 'user', content: 'hello' }]); + + expect(generateText).toHaveBeenCalledWith({ + model: fakeModel, + messages: [{ role: 'user', content: 'hello' }], + experimental_telemetry: { isEnabled: true }, + }); + expect(out.content).toBe('Hi!'); + expect(out.metrics).toEqual({ + success: true, + usage: { total: 12, input: 7, output: 5 }, + }); + expect(out.raw).toBe(result); + }); + + it('preserves v5 token field handling via getAIMetricsFromResponse', async () => { + (generateText as jest.Mock).mockResolvedValue({ + text: 'ok', + usage: { totalTokens: 100, inputTokens: 40, outputTokens: 60 }, + }); + + const out = await runner.run([{ role: 'user', content: 'hello' }]); + + expect(out.metrics.usage).toEqual({ total: 100, input: 40, output: 60 }); + }); + + it('returns success=false when generateText throws', async () => { + const err = new Error('boom'); + (generateText as jest.Mock).mockRejectedValue(err); + + const out = await runner.run([{ role: 'user', content: 'hello' }]); + + expect(out.content).toBe(''); + expect(out.metrics.success).toBe(false); + expect(mockLogger.warn).toHaveBeenCalledWith('Vercel AI model invocation failed:', err); + }); + }); + + describe('run (structured output)', () => { + it('exposes parsed structured output via parsed', async () => { + const obj = { name: 'Ada', age: 36 }; + (generateObject as jest.Mock).mockResolvedValue({ + object: obj, + usage: { totalTokens: 30, promptTokens: 10, completionTokens: 20 }, + }); + + const schema = { type: 'object' }; + const out = await runner.run([{ role: 'user', content: 'tell' }], schema); + + expect(jsonSchema).toHaveBeenCalledWith(schema); + expect(generateObject).toHaveBeenCalledWith({ + model: fakeModel, + messages: [{ role: 'user', content: 'tell' }], + schema, + experimental_telemetry: { isEnabled: true }, + }); + expect(out.parsed).toEqual(obj); + expect(out.content).toBe(JSON.stringify(obj)); + expect(out.metrics.success).toBe(true); + }); + + it('returns success=false when generateObject throws', async () => { + const err = new Error('struct boom'); + (generateObject as jest.Mock).mockRejectedValue(err); + + const out = await runner.run([{ role: 'user', content: 'tell' }], { type: 'object' }); + + expect(out.content).toBe(''); + expect(out.parsed).toBeUndefined(); + expect(out.metrics.success).toBe(false); + expect(mockLogger.warn).toHaveBeenCalledWith( + 'Vercel AI structured model invocation failed:', + err, + ); + }); + }); + + describe('getModel', () => { + it('returns the underlying Vercel AI model', () => { + expect(runner.getModel()).toBe(fakeModel); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts new file mode 100644 index 0000000000..aa994203fd --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/VercelRunnerFactory.test.ts @@ -0,0 +1,76 @@ +import { VercelModelRunner } from '../src/VercelModelRunner'; +import { VercelRunnerFactory } from '../src/VercelRunnerFactory'; + +describe('VercelRunnerFactory', () => { + describe('createModel', () => { + it('builds a VercelModelRunner with mapped parameters', async () => { + const fakeModel = { name: 'gpt-4o' }; + jest.doMock('@ai-sdk/openai', () => ({ + openai: jest.fn().mockReturnValue(fakeModel), + })); + + const factory = new VercelRunnerFactory(); + const runner = await factory.createModel({ + key: 'completion', + enabled: true, + provider: { name: 'openai' }, + model: { name: 'gpt-4o', parameters: { max_tokens: 100, temperature: 0.7 } }, + }); + + expect(runner).toBeInstanceOf(VercelModelRunner); + expect(runner.getModel()).toBe(fakeModel); + }); + }); + + describe('mapParameters', () => { + it('maps known LD parameters to Vercel AI SDK names', () => { + const params = VercelRunnerFactory.mapParameters({ + max_tokens: 100, + max_completion_tokens: 200, + temperature: 0.7, + top_p: 0.9, + top_k: 50, + presence_penalty: 0.1, + frequency_penalty: 0.2, + stop: ['x', 'y'], + seed: 42, + }); + + expect(params).toEqual({ + maxTokens: 100, + maxOutputTokens: 200, + temperature: 0.7, + topP: 0.9, + topK: 50, + presencePenalty: 0.1, + frequencyPenalty: 0.2, + stopSequences: ['x', 'y'], + seed: 42, + }); + }); + + it('returns an empty object when parameters is undefined', () => { + expect(VercelRunnerFactory.mapParameters(undefined)).toEqual({}); + }); + }); + + describe('createVercelModel', () => { + it('throws on an unsupported provider', async () => { + await expect( + VercelRunnerFactory.createVercelModel({ + key: 'k', + enabled: true, + provider: { name: 'unsupported' }, + model: { name: 'm' }, + }), + ).rejects.toThrow('Unsupported Vercel AI provider: unsupported'); + }); + }); + + describe('create', () => { + it('creates a VercelRunnerFactory instance', async () => { + const f = await VercelRunnerFactory.create(); + expect(f).toBeInstanceOf(VercelRunnerFactory); + }); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts b/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts new file mode 100644 index 0000000000..b61e7a5942 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/__tests__/vercelHelper.test.ts @@ -0,0 +1,96 @@ +import { + convertMessagesToVercel, + getAIMetricsFromResponse, + getAIMetricsFromStream, + mapProviderName, + mapUsageDataToLDTokenUsage, +} from '../src/vercelHelper'; + +describe('convertMessagesToVercel', () => { + it('passes role and content through unchanged', () => { + expect( + convertMessagesToVercel([ + { role: 'system', content: 'sys' }, + { role: 'user', content: 'u' }, + { role: 'assistant', content: 'a' }, + ]), + ).toEqual([ + { role: 'system', content: 'sys' }, + { role: 'user', content: 'u' }, + { role: 'assistant', content: 'a' }, + ]); + }); +}); + +describe('mapProviderName', () => { + it('maps gemini to google (case-insensitive)', () => { + expect(mapProviderName('gemini')).toBe('google'); + expect(mapProviderName('Gemini')).toBe('google'); + }); + + it('returns the provider unchanged when no mapping exists', () => { + expect(mapProviderName('openai')).toBe('openai'); + expect(mapProviderName('anthropic')).toBe('anthropic'); + }); +}); + +describe('mapUsageDataToLDTokenUsage', () => { + it('prefers v5 field names (inputTokens / outputTokens) over v4', () => { + const usage = mapUsageDataToLDTokenUsage({ + totalTokens: 100, + inputTokens: 40, + outputTokens: 60, + promptTokens: 1, + completionTokens: 2, + }); + expect(usage).toEqual({ total: 100, input: 40, output: 60 }); + }); + + it('falls back to v4 field names when v5 is absent', () => { + const usage = mapUsageDataToLDTokenUsage({ + totalTokens: 50, + promptTokens: 20, + completionTokens: 30, + }); + expect(usage).toEqual({ total: 50, input: 20, output: 30 }); + }); +}); + +describe('getAIMetricsFromResponse', () => { + it('treats missing finishReason as success', () => { + expect( + getAIMetricsFromResponse({ + usage: { totalTokens: 5, promptTokens: 2, completionTokens: 3 }, + }), + ).toEqual({ success: true, usage: { total: 5, input: 2, output: 3 } }); + }); + + it('marks success=false when finishReason is "error"', () => { + expect( + getAIMetricsFromResponse({ + finishReason: 'error', + usage: { totalTokens: 10, promptTokens: 4, completionTokens: 6 }, + }).success, + ).toBe(false); + }); +}); + +describe('getAIMetricsFromStream', () => { + it('extracts usage from a successful stream', async () => { + const result = await getAIMetricsFromStream({ + finishReason: Promise.resolve('stop'), + usage: Promise.resolve({ totalTokens: 100, promptTokens: 49, completionTokens: 51 }), + }); + expect(result).toEqual({ + success: true, + usage: { total: 100, input: 49, output: 51 }, + }); + }); + + it('marks success=false on error finishReason', async () => { + const result = await getAIMetricsFromStream({ + finishReason: Promise.resolve('error'), + }); + expect(result.success).toBe(false); + }); +}); diff --git a/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts b/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts new file mode 100644 index 0000000000..15e607b109 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts @@ -0,0 +1,96 @@ +import { generateObject, generateText, jsonSchema, LanguageModel } from 'ai'; + +import type { LDLogger, LDMessage, Runner, RunnerResult } from '@launchdarkly/server-sdk-ai'; + +import type { VercelAIModelParameters } from './types'; +import { convertMessagesToVercel, getAIMetricsFromResponse } from './vercelHelper'; + +/** + * Runner implementation for Vercel AI SDK chat models. + * + * Implements the unified `Runner` protocol via {@link run}. Returned by + * {@link VercelRunnerFactory.createModel}. + */ +export class VercelModelRunner implements Runner { + private _model: LanguageModel; + private _parameters: VercelAIModelParameters; + private _logger?: LDLogger; + + constructor(model: LanguageModel, parameters: VercelAIModelParameters, logger?: LDLogger) { + this._model = model; + this._parameters = parameters; + this._logger = logger; + } + + /** + * Run the Vercel AI model with the given messages. + * + * @param input Array of LDMessage objects + * @param outputType Optional JSON schema for structured output. When provided, + * the parsed object is exposed via {@link RunnerResult.parsed}. + */ + async run(input: LDMessage[], outputType?: Record): Promise { + if (outputType !== undefined) { + return this._runStructured(input, outputType); + } + return this._runCompletion(input); + } + + /** + * Get the underlying Vercel AI model instance. + */ + getModel(): LanguageModel { + return this._model; + } + + private async _runCompletion(messages: LDMessage[]): Promise { + try { + const result = await generateText({ + ...this._parameters, + model: this._model, + messages: convertMessagesToVercel(messages), + experimental_telemetry: { isEnabled: true }, + }); + + const metrics = getAIMetricsFromResponse(result); + return { content: result.text, metrics, raw: result }; + } catch (error) { + this._logger?.warn('Vercel AI model invocation failed:', error); + return { + content: '', + metrics: { success: false }, + }; + } + } + + private async _runStructured( + messages: LDMessage[], + outputType: Record, + ): Promise { + try { + const result = await generateObject({ + ...this._parameters, + model: this._model, + messages: convertMessagesToVercel(messages), + schema: jsonSchema(outputType), + experimental_telemetry: { isEnabled: true }, + }); + + const metrics = getAIMetricsFromResponse(result); + const parsed = result.object as Record; + + return { + content: JSON.stringify(parsed), + metrics, + raw: result, + parsed, + }; + } catch (error) { + this._logger?.warn('Vercel AI structured model invocation failed:', error); + return { + content: '', + metrics: { success: false }, + }; + } + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts index 0f98e8ece2..b67a27c087 100644 --- a/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts +++ b/packages/ai-providers/server-ai-vercel/src/VercelProvider.ts @@ -141,11 +141,9 @@ export class VercelProvider extends AIProvider { } /** - * Map LaunchDarkly provider names to LangChain provider names. - * This method enables seamless integration between LaunchDarkly's standardized - * provider naming and LangChain's naming conventions. + * Map LaunchDarkly provider names to Vercel AI SDK provider identifiers. */ - static mapProvider(ldProviderName: string): string { + static mapProviderName(ldProviderName: string): string { const lowercasedName = ldProviderName.toLowerCase(); const mapping: Record = { @@ -155,6 +153,13 @@ export class VercelProvider extends AIProvider { return mapping[lowercasedName] || lowercasedName; } + /** + * @deprecated Use {@link mapProviderName} instead. + */ + static mapProvider(ldProviderName: string): string { + return VercelProvider.mapProviderName(ldProviderName); + } + /** * Map Vercel AI SDK usage data to LaunchDarkly token usage. * @@ -376,7 +381,7 @@ export class VercelProvider extends AIProvider { * @returns A Promise that resolves to a configured Vercel AI model */ static async createVercelModel(aiConfig: LDAIConfig): Promise { - const providerName = VercelProvider.mapProvider(aiConfig.provider?.name || ''); + const providerName = VercelProvider.mapProviderName(aiConfig.provider?.name || ''); const modelName = aiConfig.model?.name || ''; // Map provider names to their corresponding Vercel AI SDK imports diff --git a/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts b/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts new file mode 100644 index 0000000000..6299e65597 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/VercelRunnerFactory.ts @@ -0,0 +1,139 @@ +import { LanguageModel } from 'ai'; + +import type { LDAICompletionConfig, LDAIConfig, LDLogger } from '@launchdarkly/server-sdk-ai'; + +import type { VercelAIModelParameters } from './types'; +import { mapProviderName } from './vercelHelper'; +import { VercelModelRunner } from './VercelModelRunner'; + +/** + * Factory for creating Vercel AI runners. + * + * Vercel ships only a model runner; agent and graph runners are not provided + * because the Vercel AI SDK is a thin model layer rather than an agent + * framework. + */ +export class VercelRunnerFactory { + private _logger?: LDLogger; + + constructor(logger?: LDLogger) { + this._logger = logger; + } + + /** + * Static convenience constructor matching the other provider factories. + */ + static async create(logger?: LDLogger): Promise { + return new VercelRunnerFactory(logger); + } + + /** + * Create a model runner from a completion AI configuration. + */ + async createModel(config: LDAICompletionConfig): Promise { + const model = await VercelRunnerFactory.createVercelModel(config); + const parameters = VercelRunnerFactory.mapParameters(config.model?.parameters); + return new VercelModelRunner(model, parameters, this._logger); + } + + /** + * Create a Vercel AI model from an AI configuration. + * This method auto-detects the provider and creates the model instance. + */ + static async createVercelModel(aiConfig: LDAIConfig): Promise { + const providerName = mapProviderName(aiConfig.provider?.name || ''); + const modelName = aiConfig.model?.name || ''; + + switch (providerName) { + case 'openai': + try { + const { openai } = await import('@ai-sdk/openai'); + return openai(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/openai: ${error}`); + } + case 'anthropic': + try { + const { anthropic } = await import('@ai-sdk/anthropic'); + return anthropic(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/anthropic: ${error}`); + } + case 'google': + try { + const { google } = await import('@ai-sdk/google'); + return google(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/google: ${error}`); + } + case 'cohere': + try { + const { cohere } = await import('@ai-sdk/cohere'); + return cohere(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/cohere: ${error}`); + } + case 'mistral': + try { + const { mistral } = await import('@ai-sdk/mistral'); + return mistral(modelName); + } catch (error) { + throw new Error(`Failed to load @ai-sdk/mistral: ${error}`); + } + default: + throw new Error(`Unsupported Vercel AI provider: ${providerName}`); + } + } + + /** + * Map LaunchDarkly model parameters to Vercel AI SDK parameters. + * + * Parameter mappings: + * - max_tokens → maxTokens + * - max_completion_tokens → maxOutputTokens + * - temperature → temperature + * - top_p → topP + * - top_k → topK + * - presence_penalty → presencePenalty + * - frequency_penalty → frequencyPenalty + * - stop → stopSequences + * - seed → seed + */ + static mapParameters(parameters?: { [index: string]: unknown }): VercelAIModelParameters { + if (!parameters) { + return {}; + } + + const params: VercelAIModelParameters = {}; + + if (parameters.max_tokens !== undefined) { + params.maxTokens = parameters.max_tokens as number; + } + if (parameters.max_completion_tokens !== undefined) { + params.maxOutputTokens = parameters.max_completion_tokens as number; + } + if (parameters.temperature !== undefined) { + params.temperature = parameters.temperature as number; + } + if (parameters.top_p !== undefined) { + params.topP = parameters.top_p as number; + } + if (parameters.top_k !== undefined) { + params.topK = parameters.top_k as number; + } + if (parameters.presence_penalty !== undefined) { + params.presencePenalty = parameters.presence_penalty as number; + } + if (parameters.frequency_penalty !== undefined) { + params.frequencyPenalty = parameters.frequency_penalty as number; + } + if (parameters.stop !== undefined) { + params.stopSequences = parameters.stop as string[]; + } + if (parameters.seed !== undefined) { + params.seed = parameters.seed as number; + } + + return params; + } +} diff --git a/packages/ai-providers/server-ai-vercel/src/index.ts b/packages/ai-providers/server-ai-vercel/src/index.ts index 6e7eb55023..92898caf0c 100644 --- a/packages/ai-providers/server-ai-vercel/src/index.ts +++ b/packages/ai-providers/server-ai-vercel/src/index.ts @@ -1,4 +1,13 @@ export { VercelProvider } from './VercelProvider'; +export { VercelModelRunner } from './VercelModelRunner'; +export { VercelRunnerFactory } from './VercelRunnerFactory'; +export { + convertMessagesToVercel, + getAIMetricsFromResponse, + getAIMetricsFromStream, + mapProviderName, + mapUsageDataToLDTokenUsage, +} from './vercelHelper'; export type { VercelAIModelParameters, VercelAISDKConfig, diff --git a/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts b/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts new file mode 100644 index 0000000000..bc5b971782 --- /dev/null +++ b/packages/ai-providers/server-ai-vercel/src/vercelHelper.ts @@ -0,0 +1,91 @@ +import type { LDAIMetrics, LDMessage, LDTokenUsage } from '@launchdarkly/server-sdk-ai'; + +import type { ModelUsageTokens, StreamResponse, TextResponse } from './types'; + +/** + * Convert LaunchDarkly messages to the Vercel AI SDK message format. + * + * The Vercel AI SDK accepts the same `{ role, content }` shape that LDMessage + * uses, so this helper currently performs a structural pass-through. Having + * an explicit helper keeps the call sites consistent across providers and + * gives us a single place to adapt if Vercel's message shape diverges. + */ +export function convertMessagesToVercel(messages: LDMessage[]): LDMessage[] { + return messages.map((msg) => ({ role: msg.role, content: msg.content })); +} + +/** + * Map LaunchDarkly provider names to Vercel AI SDK provider identifiers. + */ +export function mapProviderName(ldProviderName: string): string { + const lowercasedName = ldProviderName.toLowerCase(); + const mapping: Record = { + gemini: 'google', + }; + return mapping[lowercasedName] || lowercasedName; +} + +/** + * Map Vercel AI SDK usage data to LaunchDarkly token usage. + * Supports both v4 (promptTokens/completionTokens) and v5 + * (inputTokens/outputTokens) field names. + */ +export function mapUsageDataToLDTokenUsage(usageData: ModelUsageTokens): LDTokenUsage { + const { totalTokens, inputTokens, outputTokens, promptTokens, completionTokens } = usageData; + return { + total: totalTokens ?? 0, + input: inputTokens ?? promptTokens ?? 0, + output: outputTokens ?? completionTokens ?? 0, + }; +} + +/** + * Get AI metrics from a Vercel AI SDK text response (e.g., generateText). + * Supports both v4 and v5 token field names. + */ +export function getAIMetricsFromResponse(response: TextResponse): LDAIMetrics { + const finishReason = response?.finishReason ?? 'unknown'; + + let usage: LDTokenUsage | undefined; + if (response?.totalUsage) { + usage = mapUsageDataToLDTokenUsage(response.totalUsage); + } else if (response?.usage) { + usage = mapUsageDataToLDTokenUsage(response.usage); + } + + return { + success: finishReason !== 'error', + usage, + }; +} + +/** + * Get AI metrics from a Vercel AI SDK streaming result. + * + * Awaits the stream's terminal promises and prefers `totalUsage` over + * `usage` for cumulative usage across all steps. + */ +export async function getAIMetricsFromStream(stream: StreamResponse): Promise { + const finishReason = (await stream.finishReason?.catch(() => 'error')) ?? 'unknown'; + + let usage: LDTokenUsage | undefined; + + if (stream.totalUsage) { + const usageData = await stream.totalUsage.catch(() => undefined); + if (usageData) { + usage = mapUsageDataToLDTokenUsage(usageData); + } + } + + if (!usage && stream.usage) { + const usageData = await stream.usage.catch(() => undefined); + if (usageData) { + usage = mapUsageDataToLDTokenUsage(usageData); + } + } + + return { + success: finishReason !== 'error', + usage, + }; +}