Talha1786 commited on
Commit
500d177
·
verified ·
1 Parent(s): 41d0564

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +200 -0
app.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import faiss
4
+ import wikipediaapi
5
+ from fpdf import FPDF
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from sentence_transformers import SentenceTransformer
8
+ import gradio as gr
9
+ from googletrans import Translator
10
+
11
+ # Check if CUDA (GPU) is available and set the device
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ # Initialize Wikipedia API with a User-Agent
15
+ wiki_wiki = wikipediaapi.Wikipedia(
16
+ language="en",
17
+ user_agent="AdaptiveLearningApp/1.0 (Contact: [email protected])"
18
+ )
19
+
20
+ # PDF Generation from Wikipedia Content
21
+ def generate_pdf_from_wikipedia(subject, topic):
22
+ page = wiki_wiki.page(topic)
23
+ if not page.exists():
24
+ return None, f"Topic '{topic}' not found on Wikipedia."
25
+
26
+ pdf = FPDF()
27
+ pdf.add_page()
28
+ pdf.set_font("Arial", size=12)
29
+ pdf.cell(200, 10, txt=f"{subject.upper()} - {topic.upper()}", ln=True, align="C")
30
+ pdf.ln(10)
31
+
32
+ # Add Wikipedia content with basic formatting
33
+ for line in page.text.split("\n"):
34
+ pdf.multi_cell(0, 10, line)
35
+ pdf.ln(5)
36
+
37
+ pdf_path = f"{topic.replace(' ', '_')}.pdf"
38
+ pdf.output(pdf_path)
39
+ return pdf_path, f"PDF for topic '{topic}' has been generated successfully."
40
+
41
+ # Chunking Text
42
+ def chunk_text(text, chunk_size=300):
43
+ sentences = text.split(". ")
44
+ chunks, current_chunk = [], ""
45
+ for sentence in sentences:
46
+ if len(current_chunk) + len(sentence) < chunk_size:
47
+ current_chunk += sentence + ". "
48
+ else:
49
+ chunks.append(current_chunk.strip())
50
+ current_chunk = sentence + ". "
51
+ if current_chunk:
52
+ chunks.append(current_chunk.strip())
53
+ return chunks
54
+
55
+ # Creating Embeddings
56
+ def create_embeddings(chunks):
57
+ embeddings = sentence_model.encode(chunks, convert_to_tensor=False)
58
+ return embeddings
59
+
60
+ # Storing Embeddings in FAISS
61
+ def store_in_faiss(chunks, embeddings):
62
+ dimension = embeddings[0].shape[0]
63
+ res = faiss.StandardGpuResources() # GPU resources
64
+ index = faiss.IndexFlatL2(dimension)
65
+ index = faiss.index_cpu_to_gpu(res, 0, index)
66
+ index.add(embeddings)
67
+ return index
68
+
69
+ # Generate Quiz using BLOOM Model
70
+ def generate_quiz(content):
71
+ prompt = f"Generate 10 quiz questions from the following content:\n{content}"
72
+ inputs = bloom_tokenizer(prompt, return_tensors="pt", truncation=True).to(device)
73
+ outputs = bloom_model.generate(inputs["input_ids"], max_length=512, num_return_sequences=1)
74
+ quiz = bloom_tokenizer.decode(outputs[0], skip_special_tokens=True)
75
+ return quiz
76
+
77
+ # Translate Content to Urdu
78
+ def translate_to_urdu(content):
79
+ translator = Translator()
80
+ translation = translator.translate(content, src="en", dest="ur")
81
+ return translation.text
82
+
83
+ # Retrieve Content by Topic
84
+ def get_content_by_topic(topic):
85
+ page = wiki_wiki.page(topic)
86
+ if not page.exists():
87
+ return f"Topic '{topic}' not found on Wikipedia."
88
+ return page.text
89
+
90
+ # Evaluate Quiz Results
91
+ def evaluate_quiz(user_answers, correct_answers):
92
+ score = 0
93
+ feedback = []
94
+ for user, correct in zip(user_answers, correct_answers):
95
+ if user.strip().lower() == correct.strip().lower():
96
+ score += 1
97
+ feedback.append("Correct")
98
+ else:
99
+ feedback.append(f"Incorrect. Correct answer: {correct}")
100
+ return score, feedback
101
+
102
+ # Adaptive Learning App
103
+ def adaptive_learning_app(subject, topic):
104
+ content = get_content_by_topic(topic)
105
+ if "not found" in content:
106
+ return None, content
107
+
108
+ # Chunk Text and Create Embeddings
109
+ chunks = chunk_text(content)
110
+ embeddings = create_embeddings(chunks)
111
+ faiss_index = store_in_faiss(chunks, embeddings)
112
+ return content, chunks, embeddings, faiss_index, "Processing complete."
113
+
114
+ # Gradio User Interface
115
+ def main_ui():
116
+ def process_input(subject, topic):
117
+ global content, chunks, embeddings, faiss_index
118
+ content, chunks, embeddings, faiss_index, message = adaptive_learning_app(subject, topic)
119
+ return content, message
120
+
121
+ def generate_pdf(subject, topic):
122
+ pdf_path, message = generate_pdf_from_wikipedia(subject, topic)
123
+ return pdf_path, message
124
+
125
+ def interactive_quiz(content):
126
+ quiz = generate_quiz(content)
127
+ return quiz
128
+
129
+ def urdu_translation(content):
130
+ return translate_to_urdu(content)
131
+
132
+ def submit_answers(user_answers, correct_answers):
133
+ score, feedback = evaluate_quiz(user_answers, correct_answers)
134
+ return f"Your Score: {score}/{len(correct_answers)}", feedback
135
+
136
+ # Gradio Interface
137
+ interface = gr.Blocks()
138
+
139
+ with interface:
140
+ with gr.Row():
141
+ gr.Markdown("### Adaptive Learning App with Wikipedia Integration")
142
+
143
+ with gr.Row():
144
+ subject_input = gr.Textbox(label="Enter Subject")
145
+ topic_input = gr.Textbox(label="Enter Topic")
146
+ process_button = gr.Button("Process")
147
+ course_material = gr.TextArea(label="Course Material", lines=15)
148
+ process_button.click(
149
+ process_input,
150
+ inputs=[subject_input, topic_input],
151
+ outputs=[course_material, gr.Textbox(label="Status")]
152
+ )
153
+
154
+ with gr.Row():
155
+ pdf_button = gr.Button("Generate PDF")
156
+ pdf_download = gr.File(label="Download PDF")
157
+ pdf_button.click(
158
+ generate_pdf,
159
+ inputs=[subject_input, topic_input],
160
+ outputs=[pdf_download, gr.Textbox(label="PDF Status")]
161
+ )
162
+
163
+ with gr.Row():
164
+ quiz_button = gr.Button("Generate Quiz")
165
+ quiz_view = gr.TextArea(label="Quiz Questions", lines=10)
166
+ quiz_button.click(
167
+ interactive_quiz,
168
+ inputs=course_material,
169
+ outputs=quiz_view
170
+ )
171
+
172
+ with gr.Row():
173
+ urdu_button = gr.Button("Translate to Urdu")
174
+ urdu_translation_view = gr.TextArea(label="Urdu Translation", lines=10)
175
+ urdu_button.click(
176
+ urdu_translation,
177
+ inputs=course_material,
178
+ outputs=urdu_translation_view
179
+ )
180
+
181
+ with gr.Row():
182
+ user_answers = gr.Textbox(label="Your Answers (comma-separated)")
183
+ submit_button = gr.Button("Submit Answers")
184
+ result_output = gr.Textbox(label="Quiz Result")
185
+ feedback_output = gr.TextArea(label="Feedback", lines=10)
186
+ submit_button.click(
187
+ submit_answers,
188
+ inputs=[user_answers, quiz_view],
189
+ outputs=[result_output, feedback_output]
190
+ )
191
+
192
+ interface.launch()
193
+
194
+ # Load Models
195
+ bloom_model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m").to(device)
196
+ bloom_tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
197
+ sentence_model = SentenceTransformer("all-MiniLM-L6-v2", device=device)
198
+
199
+ # Run the App
200
+ main_ui()