test: add LLM service tests
Made-with: Cursor
This commit is contained in:
224
tests/services/llm.service.test.ts
Normal file
224
tests/services/llm.service.test.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
vi.mock('../../src/config/env.js', () => ({
|
||||
env: {
|
||||
LLM_BASE_URL: 'http://test',
|
||||
LLM_MODEL: 'test-model',
|
||||
LLM_FALLBACK_MODEL: 'fallback-model',
|
||||
LLM_API_KEY: 'key',
|
||||
LLM_TIMEOUT_MS: 5000,
|
||||
LLM_TEMPERATURE: 0.7,
|
||||
LLM_MAX_TOKENS: 2048,
|
||||
LLM_MAX_RETRIES: 1,
|
||||
LLM_RETRY_DELAY_MS: 10,
|
||||
},
|
||||
}));
|
||||
|
||||
import { LlmService } from '../../src/services/llm/llm.service.js';
|
||||
|
||||
const mockConfig = {
|
||||
baseUrl: 'http://llm.test/v1',
|
||||
model: 'test-model',
|
||||
fallbackModel: 'fallback-model',
|
||||
apiKey: 'test-key',
|
||||
timeoutMs: 5000,
|
||||
temperature: 0.7,
|
||||
maxTokens: 2048,
|
||||
maxRetries: 1,
|
||||
retryDelayMs: 10,
|
||||
};
|
||||
|
||||
const validQuestionsJson = JSON.stringify({
|
||||
questions: [
|
||||
{
|
||||
questionText: 'What is 2+2?',
|
||||
type: 'single_choice',
|
||||
options: [{ key: 'a', text: '4' }, { key: 'b', text: '3' }],
|
||||
correctAnswer: 'a',
|
||||
explanation: 'Basic math',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
describe('LlmService', () => {
|
||||
let mockFetch: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
mockFetch = vi.fn();
|
||||
vi.stubGlobal('fetch', mockFetch);
|
||||
});
|
||||
|
||||
describe('chat', () => {
|
||||
it('returns content from LLM response', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: 'Hello!' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
const result = await service.chat([
|
||||
{ role: 'user', content: 'Hi' },
|
||||
]);
|
||||
|
||||
expect(result).toBe('Hello!');
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
'http://llm.test/v1/chat/completions',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.objectContaining({
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer test-key',
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('chatWithMeta', () => {
|
||||
it('returns content and model name', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: 'Response' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
const result = await service.chatWithMeta([{ role: 'user', content: 'Q' }]);
|
||||
|
||||
expect(result.content).toBe('Response');
|
||||
expect(result.model).toBe('test-model');
|
||||
});
|
||||
|
||||
it('retries on failure then succeeds', async () => {
|
||||
mockFetch
|
||||
.mockRejectedValueOnce(new Error('Network error'))
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: 'Retry OK' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService({ ...mockConfig, maxRetries: 1 });
|
||||
const result = await service.chatWithMeta([{ role: 'user', content: 'Q' }]);
|
||||
|
||||
expect(result.content).toBe('Retry OK');
|
||||
expect(mockFetch).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('falls back to fallbackModel when primary fails', async () => {
|
||||
// Primary model: 2 attempts (initial + 1 retry), both fail
|
||||
mockFetch
|
||||
.mockResolvedValueOnce({ ok: false, status: 500, statusText: 'Error', text: () => Promise.resolve('') })
|
||||
.mockResolvedValueOnce({ ok: false, status: 500, statusText: 'Error', text: () => Promise.resolve('') })
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: 'Fallback OK' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
const result = await service.chatWithMeta([{ role: 'user', content: 'Q' }]);
|
||||
|
||||
expect(result.content).toBe('Fallback OK');
|
||||
expect(result.model).toBe('fallback-model');
|
||||
});
|
||||
|
||||
it('throws when all attempts fail', async () => {
|
||||
mockFetch.mockRejectedValue(new Error('Network error'));
|
||||
|
||||
const service = new LlmService({ ...mockConfig, maxRetries: 0 });
|
||||
await expect(
|
||||
service.chatWithMeta([{ role: 'user', content: 'Q' }])
|
||||
).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateQuestions', () => {
|
||||
it('returns validated questions with meta', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: validQuestionsJson } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
const result = await service.generateQuestions({
|
||||
stack: 'js',
|
||||
level: 'beginner',
|
||||
count: 1,
|
||||
});
|
||||
|
||||
expect(result.questions).toHaveLength(1);
|
||||
expect(result.questions[0].questionText).toBe('What is 2+2?');
|
||||
expect(result.questions[0].stack).toBe('js');
|
||||
expect(result.questions[0].level).toBe('beginner');
|
||||
expect(result.meta.llmModel).toBe('test-model');
|
||||
expect(result.meta.promptHash).toBeDefined();
|
||||
expect(result.meta.generationTimeMs).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
|
||||
it('extracts JSON from markdown code block', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: '```json\n' + validQuestionsJson + '\n```' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
const result = await service.generateQuestions({
|
||||
stack: 'ts',
|
||||
level: 'intermediate',
|
||||
count: 1,
|
||||
});
|
||||
|
||||
expect(result.questions).toHaveLength(1);
|
||||
expect(result.questions[0].type).toBe('single_choice');
|
||||
});
|
||||
|
||||
it('throws when response validation fails', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: '{"invalid": "response"}' } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
await expect(
|
||||
service.generateQuestions({ stack: 'js', level: 'beginner', count: 1 })
|
||||
).rejects.toThrow('LLM response validation failed');
|
||||
});
|
||||
|
||||
it('throws when single_choice has no options', async () => {
|
||||
const invalidJson = JSON.stringify({
|
||||
questions: [
|
||||
{
|
||||
questionText: 'Q?',
|
||||
type: 'single_choice',
|
||||
options: [],
|
||||
correctAnswer: 'a',
|
||||
explanation: 'e',
|
||||
},
|
||||
],
|
||||
});
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({
|
||||
choices: [{ message: { content: invalidJson } }],
|
||||
}),
|
||||
});
|
||||
|
||||
const service = new LlmService(mockConfig);
|
||||
await expect(
|
||||
service.generateQuestions({ stack: 'js', level: 'beginner', count: 1 })
|
||||
).rejects.toThrow('Question validation failed');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2,4 +2,10 @@ import { beforeAll, vi } from 'vitest';
|
||||
|
||||
beforeAll(() => {
|
||||
vi.stubEnv('NODE_ENV', 'test');
|
||||
if (!process.env.DATABASE_URL) {
|
||||
process.env.DATABASE_URL = 'postgresql://test:test@localhost:5432/test';
|
||||
}
|
||||
if (!process.env.JWT_SECRET) {
|
||||
process.env.JWT_SECRET = 'test-secret-min-32-chars-long-for-validation';
|
||||
}
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user