Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import { generateObject, generateText, jsonSchema } from 'ai';

import { VercelModelRunner } from '../src/VercelModelRunner';

jest.mock('ai', () => ({
generateText: jest.fn(),
generateObject: jest.fn(),
jsonSchema: jest.fn((schema) => schema),
}));

const mockLogger = {
warn: jest.fn(),
info: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
};

describe('VercelModelRunner', () => {
const fakeModel = { name: 'mock' };
let runner: VercelModelRunner;

beforeEach(() => {
runner = new VercelModelRunner(fakeModel as any, {}, mockLogger);
jest.clearAllMocks();
});

describe('run (chat completion)', () => {
it('returns a successful RunnerResult with content, metrics, and raw response', async () => {
const result = {
text: 'Hi!',
usage: { totalTokens: 12, promptTokens: 7, completionTokens: 5 },
};
(generateText as jest.Mock).mockResolvedValue(result);

const out = await runner.run([{ role: 'user', content: 'hello' }]);

expect(generateText).toHaveBeenCalledWith({
model: fakeModel,
messages: [{ role: 'user', content: 'hello' }],
experimental_telemetry: { isEnabled: true },
});
expect(out.content).toBe('Hi!');
expect(out.metrics).toEqual({
success: true,
usage: { total: 12, input: 7, output: 5 },
});
expect(out.raw).toBe(result);
});

it('preserves v5 token field handling via getAIMetricsFromResponse', async () => {
(generateText as jest.Mock).mockResolvedValue({
text: 'ok',
usage: { totalTokens: 100, inputTokens: 40, outputTokens: 60 },
});

const out = await runner.run([{ role: 'user', content: 'hello' }]);

expect(out.metrics.usage).toEqual({ total: 100, input: 40, output: 60 });
});

it('returns success=false when generateText throws', async () => {
const err = new Error('boom');
(generateText as jest.Mock).mockRejectedValue(err);

const out = await runner.run([{ role: 'user', content: 'hello' }]);

expect(out.content).toBe('');
expect(out.metrics.success).toBe(false);
expect(mockLogger.warn).toHaveBeenCalledWith('Vercel AI model invocation failed:', err);
});
});

describe('run (structured output)', () => {
it('exposes parsed structured output via parsed', async () => {
const obj = { name: 'Ada', age: 36 };
(generateObject as jest.Mock).mockResolvedValue({
object: obj,
usage: { totalTokens: 30, promptTokens: 10, completionTokens: 20 },
});

const schema = { type: 'object' };
const out = await runner.run([{ role: 'user', content: 'tell' }], schema);

expect(jsonSchema).toHaveBeenCalledWith(schema);
expect(generateObject).toHaveBeenCalledWith({
model: fakeModel,
messages: [{ role: 'user', content: 'tell' }],
schema,
experimental_telemetry: { isEnabled: true },
});
expect(out.parsed).toEqual(obj);
expect(out.content).toBe(JSON.stringify(obj));
expect(out.metrics.success).toBe(true);
});

it('returns success=false when generateObject throws', async () => {
const err = new Error('struct boom');
(generateObject as jest.Mock).mockRejectedValue(err);

const out = await runner.run([{ role: 'user', content: 'tell' }], { type: 'object' });

expect(out.content).toBe('');
expect(out.parsed).toBeUndefined();
expect(out.metrics.success).toBe(false);
expect(mockLogger.warn).toHaveBeenCalledWith(
'Vercel AI structured model invocation failed:',
err,
);
});
});

describe('getModel', () => {
it('returns the underlying Vercel AI model', () => {
expect(runner.getModel()).toBe(fakeModel);
});
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import { VercelModelRunner } from '../src/VercelModelRunner';
import { VercelRunnerFactory } from '../src/VercelRunnerFactory';

describe('VercelRunnerFactory', () => {
describe('createModel', () => {
it('builds a VercelModelRunner with mapped parameters', async () => {
const fakeModel = { name: 'gpt-4o' };
jest.doMock('@ai-sdk/openai', () => ({
openai: jest.fn().mockReturnValue(fakeModel),
}));

const factory = new VercelRunnerFactory();
const runner = await factory.createModel({
key: 'completion',
enabled: true,
provider: { name: 'openai' },
model: { name: 'gpt-4o', parameters: { max_tokens: 100, temperature: 0.7 } },
});

expect(runner).toBeInstanceOf(VercelModelRunner);
expect(runner.getModel()).toBe(fakeModel);
});
});

describe('mapParameters', () => {
it('maps known LD parameters to Vercel AI SDK names', () => {
const params = VercelRunnerFactory.mapParameters({
max_tokens: 100,
max_completion_tokens: 200,
temperature: 0.7,
top_p: 0.9,
top_k: 50,
presence_penalty: 0.1,
frequency_penalty: 0.2,
stop: ['x', 'y'],
seed: 42,
});

expect(params).toEqual({
maxTokens: 100,
maxOutputTokens: 200,
temperature: 0.7,
topP: 0.9,
topK: 50,
presencePenalty: 0.1,
frequencyPenalty: 0.2,
stopSequences: ['x', 'y'],
seed: 42,
});
});

it('returns an empty object when parameters is undefined', () => {
expect(VercelRunnerFactory.mapParameters(undefined)).toEqual({});
});
});

describe('createVercelModel', () => {
it('throws on an unsupported provider', async () => {
await expect(
VercelRunnerFactory.createVercelModel({
key: 'k',
enabled: true,
provider: { name: 'unsupported' },
model: { name: 'm' },
}),
).rejects.toThrow('Unsupported Vercel AI provider: unsupported');
});
});

describe('create', () => {
it('creates a VercelRunnerFactory instance', async () => {
const f = await VercelRunnerFactory.create();
expect(f).toBeInstanceOf(VercelRunnerFactory);
});
});
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import {
convertMessagesToVercel,
getAIMetricsFromResponse,
getAIMetricsFromStream,
mapProviderName,
mapUsageDataToLDTokenUsage,
} from '../src/vercelHelper';

describe('convertMessagesToVercel', () => {
it('passes role and content through unchanged', () => {
expect(
convertMessagesToVercel([
{ role: 'system', content: 'sys' },
{ role: 'user', content: 'u' },
{ role: 'assistant', content: 'a' },
]),
).toEqual([
{ role: 'system', content: 'sys' },
{ role: 'user', content: 'u' },
{ role: 'assistant', content: 'a' },
]);
});
});

describe('mapProviderName', () => {
it('maps gemini to google (case-insensitive)', () => {
expect(mapProviderName('gemini')).toBe('google');
expect(mapProviderName('Gemini')).toBe('google');
});

it('returns the provider unchanged when no mapping exists', () => {
expect(mapProviderName('openai')).toBe('openai');
expect(mapProviderName('anthropic')).toBe('anthropic');
});
});

describe('mapUsageDataToLDTokenUsage', () => {
it('prefers v5 field names (inputTokens / outputTokens) over v4', () => {
const usage = mapUsageDataToLDTokenUsage({
totalTokens: 100,
inputTokens: 40,
outputTokens: 60,
promptTokens: 1,
completionTokens: 2,
});
expect(usage).toEqual({ total: 100, input: 40, output: 60 });
});

it('falls back to v4 field names when v5 is absent', () => {
const usage = mapUsageDataToLDTokenUsage({
totalTokens: 50,
promptTokens: 20,
completionTokens: 30,
});
expect(usage).toEqual({ total: 50, input: 20, output: 30 });
});
});

describe('getAIMetricsFromResponse', () => {
it('treats missing finishReason as success', () => {
expect(
getAIMetricsFromResponse({
usage: { totalTokens: 5, promptTokens: 2, completionTokens: 3 },
}),
).toEqual({ success: true, usage: { total: 5, input: 2, output: 3 } });
});

it('marks success=false when finishReason is "error"', () => {
expect(
getAIMetricsFromResponse({
finishReason: 'error',
usage: { totalTokens: 10, promptTokens: 4, completionTokens: 6 },
}).success,
).toBe(false);
});
});

describe('getAIMetricsFromStream', () => {
it('extracts usage from a successful stream', async () => {
const result = await getAIMetricsFromStream({
finishReason: Promise.resolve('stop'),
usage: Promise.resolve({ totalTokens: 100, promptTokens: 49, completionTokens: 51 }),
});
expect(result).toEqual({
success: true,
usage: { total: 100, input: 49, output: 51 },
});
});

it('marks success=false on error finishReason', async () => {
const result = await getAIMetricsFromStream({
finishReason: Promise.resolve('error'),
});
expect(result.success).toBe(false);
});
});
96 changes: 96 additions & 0 deletions packages/ai-providers/server-ai-vercel/src/VercelModelRunner.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import { generateObject, generateText, jsonSchema, LanguageModel } from 'ai';

import type { LDLogger, LDMessage, Runner, RunnerResult } from '@launchdarkly/server-sdk-ai';

import type { VercelAIModelParameters } from './types';
import { convertMessagesToVercel, getAIMetricsFromResponse } from './vercelHelper';

/**
* Runner implementation for Vercel AI SDK chat models.
*
* Implements the unified `Runner` protocol via {@link run}. Returned by
* {@link VercelRunnerFactory.createModel}.
*/
export class VercelModelRunner implements Runner {
private _model: LanguageModel;
private _parameters: VercelAIModelParameters;
private _logger?: LDLogger;

constructor(model: LanguageModel, parameters: VercelAIModelParameters, logger?: LDLogger) {
this._model = model;
this._parameters = parameters;
this._logger = logger;
}

/**
* Run the Vercel AI model with the given messages.
*
* @param input Array of LDMessage objects
* @param outputType Optional JSON schema for structured output. When provided,
* the parsed object is exposed via {@link RunnerResult.parsed}.
*/
async run(input: LDMessage[], outputType?: Record<string, unknown>): Promise<RunnerResult> {
if (outputType !== undefined) {
return this._runStructured(input, outputType);
}
return this._runCompletion(input);
}

/**
* Get the underlying Vercel AI model instance.
*/
getModel(): LanguageModel {
return this._model;
}

private async _runCompletion(messages: LDMessage[]): Promise<RunnerResult> {
try {
const result = await generateText({
...this._parameters,
model: this._model,
messages: convertMessagesToVercel(messages),
experimental_telemetry: { isEnabled: true },
});

const metrics = getAIMetricsFromResponse(result);
return { content: result.text, metrics, raw: result };
} catch (error) {
this._logger?.warn('Vercel AI model invocation failed:', error);
return {
content: '',
metrics: { success: false },
};
}
}

private async _runStructured(
messages: LDMessage[],
outputType: Record<string, unknown>,
): Promise<RunnerResult> {
try {
const result = await generateObject({
...this._parameters,
model: this._model,
messages: convertMessagesToVercel(messages),
schema: jsonSchema(outputType),
experimental_telemetry: { isEnabled: true },
});

const metrics = getAIMetricsFromResponse(result);
const parsed = result.object as Record<string, unknown>;

return {
content: JSON.stringify(parsed),
metrics,
raw: result,
parsed,
};
} catch (error) {
this._logger?.warn('Vercel AI structured model invocation failed:', error);
return {
content: '',
metrics: { success: false },
};
}
}
}
Loading
Loading