Create utils.py
Browse files
utils.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
from transformers import pipeline
|
3 |
+
from langchain_core.documents import Document
|
4 |
+
|
5 |
+
def generate_summary(full_text):
|
6 |
+
summarizer = pipeline("summarization")
|
7 |
+
result = summarizer(full_text[:4000], max_length=150, min_length=30, do_sample=False)
|
8 |
+
return result[0]['summary_text']
|
9 |
+
|
10 |
+
def generate_challenge_questions(qa_chain):
|
11 |
+
prompt = "Generate 3 logic/comprehension-based questions from the uploaded document."
|
12 |
+
result = qa_chain({ "question": prompt, "chat_history": [] })
|
13 |
+
output = result["answer"]
|
14 |
+
questions = output.split("\n")[:3]
|
15 |
+
return [q.strip() for q in questions if q.strip()]
|
16 |
+
|
17 |
+
def evaluate_responses(qa_chain, questions, answers):
|
18 |
+
combined = "\n".join([f"Q: {q}\nA: {a}" for q, a in zip(questions, answers)])
|
19 |
+
prompt = f"Evaluate the user's answers below. For each, say if it's correct or not and justify:\n\n{combined}"
|
20 |
+
result = qa_chain({ "question": prompt, "chat_history": [] })
|
21 |
+
return result["answer"]
|
22 |
+
|
23 |
+
def extract_highlight_snippets(source_docs: list[Document]):
|
24 |
+
refs = [doc.page_content.strip()[:300] + "..." for doc in source_docs[:3]]
|
25 |
+
return refs + [""] * (3 - len(refs))
|