feat: log LLM metadata to question_cache_meta

Made-with: Cursor
This commit is contained in:
Anton
2026-03-04 14:37:58 +03:00
parent 189e9c127f
commit 50d6b34f11

View File

@@ -1,4 +1,5 @@
import { z } from 'zod';
import { createHash } from 'node:crypto';
import { env } from '../../config/env.js';
import type { Stack, Level, QuestionType } from '../../db/schema/enums.js';
@@ -57,6 +58,19 @@ export interface GenerateQuestionsInput {
types?: QuestionType[];
}
/** Metadata for persisting to question_cache_meta (used by QuestionService) */
export interface LlmGenerationMeta {
llmModel: string;
promptHash: string;
generationTimeMs: number;
rawResponse: unknown;
}
export interface GenerateQuestionsResult {
questions: GeneratedQuestion[];
meta: LlmGenerationMeta;
}
export class LlmService {
private readonly config: LlmConfig;
@@ -75,6 +89,12 @@ export class LlmService {
}
async chat(messages: ChatMessage[]): Promise<string> {
const { content } = await this.chatWithMeta(messages);
return content;
}
/** Returns content and model used (for logging to question_cache_meta) */
async chatWithMeta(messages: ChatMessage[]): Promise<{ content: string; model: string }> {
let lastError: Error | null = null;
const modelsToTry = [this.config.model];
@@ -85,7 +105,8 @@ export class LlmService {
for (const model of modelsToTry) {
for (let attempt = 0; attempt <= this.config.maxRetries; attempt++) {
try {
return await this.executeChat(messages, model);
const content = await this.executeChat(messages, model);
return { content, model };
} catch (err) {
lastError = err instanceof Error ? err : new Error('LLM request failed');
if (attempt < this.config.maxRetries) {
@@ -149,7 +170,7 @@ export class LlmService {
}
}
async generateQuestions(input: GenerateQuestionsInput): Promise<GeneratedQuestion[]> {
async generateQuestions(input: GenerateQuestionsInput): Promise<GenerateQuestionsResult> {
const { stack, level, count, types = QUESTION_TYPES } = input;
const typeList = types.join(', ');
@@ -160,10 +181,15 @@ Rules: type must be one of: ${typeList}. For single_choice/multiple_select: opti
const userPrompt = `Generate ${count} questions for stack="${stack}", level="${level}". Use types: ${typeList}.`;
const raw = await this.chat([
const promptForHash = systemPrompt + '\n---\n' + userPrompt;
const promptHash = createHash('sha256').update(promptForHash).digest('hex');
const start = Date.now();
const { content: raw, model } = await this.chatWithMeta([
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
]);
const generationTimeMs = Date.now() - start;
const jsonStr = extractJson(raw);
const parsed = JSON.parse(jsonStr) as unknown;
@@ -188,7 +214,15 @@ Rules: type must be one of: ${typeList}. For single_choice/multiple_select: opti
}
}
return questions;
return {
questions,
meta: {
llmModel: model,
promptHash,
generationTimeMs,
rawResponse: parsed,
},
};
}
}