from fastapi import FastAPI, HTTPException from pydantic import BaseModel from langchain.document_loaders import WikipediaLoader from langchain_groq import ChatGroq import os app = FastAPI(title="Quiz Generator API") # in-memory store for the last quiz + context STORE = { "quiz": None, # str "context": None, # str } # Replace with your actual Groq API key GROQ_API_KEY = os.getenv('api_key') class QuizRequest(BaseModel): search_query: str class GradeRequest(BaseModel): answers: str def get_llm(): return ChatGroq( model="meta-llama/llama-4-scout-17b-16e-instruct", temperature=0, max_tokens=1024, api_key=GROQ_API_KEY ) def wikipedia_query(search_query: str): try: docs = WikipediaLoader(query=search_query, load_max_docs=2).load() return docs except Exception as e: raise HTTPException(status_code=500, detail=f"Wikipedia query failed: {e}") @app.get("/") async def root(): return { "message": "Welcome to the Quiz Generator API! \n" "POST /generate_quiz/ to create a new quiz. \n" "POST /grade_quiz/ to grade your answers against the last quiz." } @app.post("/generate_quiz/") async def generate_quiz(request: QuizRequest): # fetch & store context context_docs = wikipedia_query(request.search_query) context_text = str(context_docs) STORE["context"] = context_text # generate quiz llm = get_llm() prompt = f""" You are a quiz generator assistant. Create a quiz for the given context. Instructions: - Do not write answers in the quiz. - Quiz should be based on the following context: context: {context_text} question: generate quiz on {request.search_query} Your response: """ result = llm.invoke(prompt) STORE["quiz"] = result.content return { "quiz": result.content } @app.post("/grade_quiz/") async def grade_quiz(request: GradeRequest): # ensure we have a quiz to grade if STORE["quiz"] is None or STORE["context"] is None: raise HTTPException(status_code=400, detail="No quiz available. Call /generate_quiz/ first.") llm = get_llm() prompt = f""" Check the quiz answers and give marks and also provide the correct answers. Use the following context to check the quiz. Return only the total mark and the correct answers. quiz: {STORE['quiz']} answers: {request.answers} context: {STORE['context']} """ result = llm.invoke(prompt) return { "grade": result.content }