5 Commits

Author SHA1 Message Date
Anton
50d6b34f11 feat: log LLM metadata to question_cache_meta
Made-with: Cursor
2026-03-04 14:37:58 +03:00
Anton
189e9c127f feat: add LLM retry and fallback
Made-with: Cursor
2026-03-04 14:19:04 +03:00
Anton
e7c7bf363e feat: register profile routes
Made-with: Cursor
2026-03-04 14:18:23 +03:00
Anton
0564dc4b91 feat: add LLM question generation
Made-with: Cursor
2026-03-04 14:18:22 +03:00
Anton
9da82c839f feat: add stats to profile response
Made-with: Cursor
2026-03-04 14:17:54 +03:00
4 changed files with 293 additions and 8 deletions

View File

@@ -7,6 +7,7 @@ import rateLimitPlugin from './plugins/rateLimit.js';
import authPlugin from './plugins/auth.js';
import subscriptionPlugin from './plugins/subscription.js';
import { authRoutes } from './routes/auth.js';
import { profileRoutes } from './routes/profile.js';
import { env } from './config/env.js';
import { randomUUID } from 'node:crypto';
@@ -75,6 +76,7 @@ export async function buildApp(): Promise<FastifyInstance> {
await app.register(authPlugin);
await app.register(subscriptionPlugin);
await app.register(authRoutes, { prefix: '/auth' });
await app.register(profileRoutes, { prefix: '/profile' });
app.get('/health', async () => ({ status: 'ok', timestamp: new Date().toISOString() }));

View File

@@ -14,9 +14,11 @@ const envSchema = z.object({
LLM_BASE_URL: z.string().url().default('http://localhost:11434/v1'),
LLM_MODEL: z.string().default('qwen2.5:14b'),
LLM_FALLBACK_MODEL: z.string().optional(),
LLM_API_KEY: z.string().optional(),
LLM_TIMEOUT_MS: z.coerce.number().default(15000),
LLM_MAX_RETRIES: z.coerce.number().min(0).default(1),
LLM_RETRY_DELAY_MS: z.coerce.number().min(0).default(1000),
LLM_TEMPERATURE: z.coerce.number().min(0).max(2).default(0.7),
LLM_MAX_TOKENS: z.coerce.number().default(2048),

View File

@@ -0,0 +1,237 @@
import { z } from 'zod';
import { createHash } from 'node:crypto';
import { env } from '../../config/env.js';
import type { Stack, Level, QuestionType } from '../../db/schema/enums.js';
export interface LlmConfig {
baseUrl: string;
model: string;
fallbackModel?: string;
apiKey?: string;
timeoutMs: number;
temperature: number;
maxTokens: number;
maxRetries: number;
retryDelayMs: number;
}
export interface ChatMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface ChatCompletionResponse {
choices: Array<{
message?: { content: string };
text?: string;
}>;
}
const QUESTION_TYPES: QuestionType[] = ['single_choice', 'multiple_select', 'true_false', 'short_text'];
const optionSchema = z.object({
key: z.string().min(1),
text: z.string().min(1),
});
const generatedQuestionSchema = z.object({
questionText: z.string().min(1),
type: z.enum(QUESTION_TYPES as [string, ...string[]]),
options: z.array(optionSchema).optional(),
correctAnswer: z.union([z.string(), z.array(z.string())]),
explanation: z.string().min(1),
});
const generateQuestionsResponseSchema = z.object({
questions: z.array(generatedQuestionSchema),
});
export type GeneratedQuestion = z.infer<typeof generatedQuestionSchema> & {
stack: Stack;
level: Level;
};
export interface GenerateQuestionsInput {
stack: Stack;
level: Level;
count: number;
types?: QuestionType[];
}
/** Metadata for persisting to question_cache_meta (used by QuestionService) */
export interface LlmGenerationMeta {
llmModel: string;
promptHash: string;
generationTimeMs: number;
rawResponse: unknown;
}
export interface GenerateQuestionsResult {
questions: GeneratedQuestion[];
meta: LlmGenerationMeta;
}
export class LlmService {
private readonly config: LlmConfig;
constructor(config?: Partial<LlmConfig>) {
this.config = {
baseUrl: config?.baseUrl ?? env.LLM_BASE_URL,
model: config?.model ?? env.LLM_MODEL,
fallbackModel: config?.fallbackModel ?? env.LLM_FALLBACK_MODEL,
apiKey: config?.apiKey ?? env.LLM_API_KEY,
timeoutMs: config?.timeoutMs ?? env.LLM_TIMEOUT_MS,
temperature: config?.temperature ?? env.LLM_TEMPERATURE,
maxTokens: config?.maxTokens ?? env.LLM_MAX_TOKENS,
maxRetries: config?.maxRetries ?? env.LLM_MAX_RETRIES,
retryDelayMs: config?.retryDelayMs ?? env.LLM_RETRY_DELAY_MS,
};
}
async chat(messages: ChatMessage[]): Promise<string> {
const { content } = await this.chatWithMeta(messages);
return content;
}
/** Returns content and model used (for logging to question_cache_meta) */
async chatWithMeta(messages: ChatMessage[]): Promise<{ content: string; model: string }> {
let lastError: Error | null = null;
const modelsToTry = [this.config.model];
if (this.config.fallbackModel) {
modelsToTry.push(this.config.fallbackModel);
}
for (const model of modelsToTry) {
for (let attempt = 0; attempt <= this.config.maxRetries; attempt++) {
try {
const content = await this.executeChat(messages, model);
return { content, model };
} catch (err) {
lastError = err instanceof Error ? err : new Error('LLM request failed');
if (attempt < this.config.maxRetries) {
const delayMs = this.config.retryDelayMs * Math.pow(2, attempt);
await sleep(delayMs);
}
}
}
}
throw lastError ?? new Error('LLM request failed');
}
private async executeChat(messages: ChatMessage[], model: string): Promise<string> {
const url = `${this.config.baseUrl.replace(/\/$/, '')}/chat/completions`;
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (this.config.apiKey) {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
const body = {
model,
messages: messages.map((m) => ({ role: m.role, content: m.content })),
temperature: this.config.temperature,
max_tokens: this.config.maxTokens,
};
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.config.timeoutMs);
try {
const res = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!res.ok) {
const text = await res.text();
throw new Error(`LLM request failed: ${res.status} ${res.statusText} - ${text}`);
}
const data = (await res.json()) as ChatCompletionResponse;
const choice = data.choices?.[0];
const content = choice?.message?.content ?? choice?.text ?? '';
return content.trim();
} catch (err) {
clearTimeout(timeoutId);
if (err instanceof Error) {
throw err;
}
throw new Error('LLM request failed');
}
}
async generateQuestions(input: GenerateQuestionsInput): Promise<GenerateQuestionsResult> {
const { stack, level, count, types = QUESTION_TYPES } = input;
const typeList = types.join(', ');
const systemPrompt = `You are a technical interview question generator. Generate exactly ${count} programming/tech questions.
Return ONLY valid JSON in this exact format (no markdown, no code blocks):
{"questions":[{"questionText":"...","type":"single_choice|multiple_select|true_false|short_text","options":[{"key":"a","text":"..."}],"correctAnswer":"a" or ["a","b"],"explanation":"..."}]}
Rules: type must be one of: ${typeList}. For single_choice/multiple_select: options array required with key (a,b,c,d). For true_false: options [{"key":"true","text":"True"},{"key":"false","text":"False"}]. For short_text: options omitted, correctAnswer is string.`;
const userPrompt = `Generate ${count} questions for stack="${stack}", level="${level}". Use types: ${typeList}.`;
const promptForHash = systemPrompt + '\n---\n' + userPrompt;
const promptHash = createHash('sha256').update(promptForHash).digest('hex');
const start = Date.now();
const { content: raw, model } = await this.chatWithMeta([
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
]);
const generationTimeMs = Date.now() - start;
const jsonStr = extractJson(raw);
const parsed = JSON.parse(jsonStr) as unknown;
const result = generateQuestionsResponseSchema.safeParse(parsed);
if (!result.success) {
throw new Error(`LLM response validation failed: ${result.error.message}`);
}
const questions: GeneratedQuestion[] = result.data.questions.map((q) => ({
...q,
stack,
level,
}));
for (const q of questions) {
if ((q.type === 'single_choice' || q.type === 'multiple_select') && (!q.options || q.options.length === 0)) {
throw new Error(`Question validation failed: ${q.type} requires options`);
}
if (q.type === 'true_false' && (!q.options || q.options.length < 2)) {
throw new Error(`Question validation failed: true_false requires true/false options`);
}
}
return {
questions,
meta: {
llmModel: model,
promptHash,
generationTimeMs,
rawResponse: parsed,
},
};
}
}
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
function extractJson(text: string): string {
const trimmed = text.trim();
const match = trimmed.match(/\{[\s\S]*\}/);
return match ? match[0]! : trimmed;
}

View File

@@ -1,13 +1,30 @@
import { eq } from 'drizzle-orm';
import type { NodePgDatabase } from 'drizzle-orm/node-postgres';
import type * as schema from '../../db/schema/index.js';
import { users } from '../../db/schema/index.js';
import { users, userStats } from '../../db/schema/index.js';
import { notFound, conflict, ERROR_CODES } from '../../utils/errors.js';
import type { User } from '../../db/schema/users.js';
import type { SelfLevel } from '../../db/schema/index.js';
type Db = NodePgDatabase<typeof schema>;
export type UserStatItem = {
stack: string;
level: string;
totalQuestions: number;
correctAnswers: number;
testsTaken: number;
lastTestAt: string | null;
};
export type ProfileStats = {
byStack: UserStatItem[];
totalTestsTaken: number;
totalQuestions: number;
correctAnswers: number;
accuracy: number | null;
};
export type ProfileUpdateInput = {
nickname?: string;
avatarUrl?: string | null;
@@ -25,6 +42,7 @@ export type PublicProfile = {
city: string | null;
selfLevel: string | null;
isPublic: boolean;
stats: ProfileStats;
};
export type PrivateProfile = PublicProfile & {
@@ -34,7 +52,30 @@ export type PrivateProfile = PublicProfile & {
updatedAt: string;
};
function toPublicProfile(user: User): PublicProfile {
async function getStatsForUser(db: Db, userId: string): Promise<ProfileStats> {
const rows = await db
.select()
.from(userStats)
.where(eq(userStats.userId, userId));
const byStack: UserStatItem[] = rows.map((r) => ({
stack: r.stack,
level: r.level,
totalQuestions: r.totalQuestions,
correctAnswers: r.correctAnswers,
testsTaken: r.testsTaken,
lastTestAt: r.lastTestAt?.toISOString() ?? null,
}));
const totalTestsTaken = rows.reduce((sum, r) => sum + r.testsTaken, 0);
const totalQuestions = rows.reduce((sum, r) => sum + r.totalQuestions, 0);
const correctAnswers = rows.reduce((sum, r) => sum + r.correctAnswers, 0);
const accuracy = totalQuestions > 0 ? correctAnswers / totalQuestions : null;
return { byStack, totalTestsTaken, totalQuestions, correctAnswers, accuracy };
}
function toPublicProfile(user: User, stats: ProfileStats): PublicProfile {
return {
id: user.id,
nickname: user.nickname,
@@ -43,12 +84,13 @@ function toPublicProfile(user: User): PublicProfile {
city: user.city,
selfLevel: user.selfLevel,
isPublic: user.isPublic,
stats,
};
}
function toPrivateProfile(user: User): PrivateProfile {
function toPrivateProfile(user: User, stats: ProfileStats): PrivateProfile {
return {
...toPublicProfile(user),
...toPublicProfile(user, stats),
email: user.email,
emailVerifiedAt: user.emailVerifiedAt?.toISOString() ?? null,
createdAt: user.createdAt.toISOString(),
@@ -74,11 +116,11 @@ export class UserService {
}
async getPrivateProfile(userId: string): Promise<PrivateProfile> {
const user = await this.getById(userId);
const [user, stats] = await Promise.all([this.getById(userId), getStatsForUser(this.db, userId)]);
if (!user) {
throw notFound('User not found');
}
return toPrivateProfile(user);
return toPrivateProfile(user, stats);
}
async getPublicProfile(username: string): Promise<PublicProfile> {
@@ -89,7 +131,8 @@ export class UserService {
if (!user.isPublic) {
throw notFound('User not found');
}
return toPublicProfile(user);
const stats = await getStatsForUser(this.db, user.id);
return toPublicProfile(user, stats);
}
async updateProfile(userId: string, input: ProfileUpdateInput): Promise<PrivateProfile> {
@@ -126,6 +169,7 @@ export class UserService {
throw notFound('User not found');
}
return toPrivateProfile(updated);
const stats = await getStatsForUser(this.db, userId);
return toPrivateProfile(updated, stats);
}
}