feat: add LLM question generation
Made-with: Cursor
This commit is contained in:
@@ -1,4 +1,6 @@
|
||||
import { z } from 'zod';
|
||||
import { env } from '../../config/env.js';
|
||||
import type { Stack, Level, QuestionType } from '../../db/schema/enums.js';
|
||||
|
||||
export interface LlmConfig {
|
||||
baseUrl: string;
|
||||
@@ -21,6 +23,37 @@ export interface ChatCompletionResponse {
|
||||
}>;
|
||||
}
|
||||
|
||||
const QUESTION_TYPES: QuestionType[] = ['single_choice', 'multiple_select', 'true_false', 'short_text'];
|
||||
|
||||
const optionSchema = z.object({
|
||||
key: z.string().min(1),
|
||||
text: z.string().min(1),
|
||||
});
|
||||
|
||||
const generatedQuestionSchema = z.object({
|
||||
questionText: z.string().min(1),
|
||||
type: z.enum(QUESTION_TYPES as [string, ...string[]]),
|
||||
options: z.array(optionSchema).optional(),
|
||||
correctAnswer: z.union([z.string(), z.array(z.string())]),
|
||||
explanation: z.string().min(1),
|
||||
});
|
||||
|
||||
const generateQuestionsResponseSchema = z.object({
|
||||
questions: z.array(generatedQuestionSchema),
|
||||
});
|
||||
|
||||
export type GeneratedQuestion = z.infer<typeof generatedQuestionSchema> & {
|
||||
stack: Stack;
|
||||
level: Level;
|
||||
};
|
||||
|
||||
export interface GenerateQuestionsInput {
|
||||
stack: Stack;
|
||||
level: Level;
|
||||
count: number;
|
||||
types?: QuestionType[];
|
||||
}
|
||||
|
||||
export class LlmService {
|
||||
private readonly config: LlmConfig;
|
||||
|
||||
@@ -84,4 +117,52 @@ export class LlmService {
|
||||
throw new Error('LLM request failed');
|
||||
}
|
||||
}
|
||||
|
||||
async generateQuestions(input: GenerateQuestionsInput): Promise<GeneratedQuestion[]> {
|
||||
const { stack, level, count, types = QUESTION_TYPES } = input;
|
||||
|
||||
const typeList = types.join(', ');
|
||||
const systemPrompt = `You are a technical interview question generator. Generate exactly ${count} programming/tech questions.
|
||||
Return ONLY valid JSON in this exact format (no markdown, no code blocks):
|
||||
{"questions":[{"questionText":"...","type":"single_choice|multiple_select|true_false|short_text","options":[{"key":"a","text":"..."}],"correctAnswer":"a" or ["a","b"],"explanation":"..."}]}
|
||||
Rules: type must be one of: ${typeList}. For single_choice/multiple_select: options array required with key (a,b,c,d). For true_false: options [{"key":"true","text":"True"},{"key":"false","text":"False"}]. For short_text: options omitted, correctAnswer is string.`;
|
||||
|
||||
const userPrompt = `Generate ${count} questions for stack="${stack}", level="${level}". Use types: ${typeList}.`;
|
||||
|
||||
const raw = await this.chat([
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userPrompt },
|
||||
]);
|
||||
|
||||
const jsonStr = extractJson(raw);
|
||||
const parsed = JSON.parse(jsonStr) as unknown;
|
||||
|
||||
const result = generateQuestionsResponseSchema.safeParse(parsed);
|
||||
if (!result.success) {
|
||||
throw new Error(`LLM response validation failed: ${result.error.message}`);
|
||||
}
|
||||
|
||||
const questions: GeneratedQuestion[] = result.data.questions.map((q) => ({
|
||||
...q,
|
||||
stack,
|
||||
level,
|
||||
}));
|
||||
|
||||
for (const q of questions) {
|
||||
if ((q.type === 'single_choice' || q.type === 'multiple_select') && (!q.options || q.options.length === 0)) {
|
||||
throw new Error(`Question validation failed: ${q.type} requires options`);
|
||||
}
|
||||
if (q.type === 'true_false' && (!q.options || q.options.length < 2)) {
|
||||
throw new Error(`Question validation failed: true_false requires true/false options`);
|
||||
}
|
||||
}
|
||||
|
||||
return questions;
|
||||
}
|
||||
}
|
||||
|
||||
function extractJson(text: string): string {
|
||||
const trimmed = text.trim();
|
||||
const match = trimmed.match(/\{[\s\S]*\}/);
|
||||
return match ? match[0]! : trimmed;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user