Johan713 commited on
Commit
ade39be
·
verified ·
1 Parent(s): 2fd7e58

Update pages/exam_prepration.py

Browse files
Files changed (1) hide show
  1. pages/exam_prepration.py +325 -325
pages/exam_prepration.py CHANGED
@@ -1,325 +1,325 @@
1
- import streamlit as st
2
- import random
3
- import time
4
- from typing import List, Dict
5
- from langchain_openai import ChatOpenAI
6
- from langchain.schema import HumanMessage, SystemMessage
7
- from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredWordDocumentLoader
8
- from langchain.text_splitter import RecursiveCharacterTextSplitter
9
- from langchain_huggingface import HuggingFaceEmbeddings
10
- from langchain_community.vectorstores import FAISS
11
- from langchain.chains import RetrievalQA
12
- from langchain_community.graphs import NetworkxEntityGraph
13
- from googleapiclient.discovery import build
14
- from googleapiclient.errors import HttpError
15
- import os
16
- from dotenv import load_dotenv
17
- import requests
18
- from bs4 import BeautifulSoup
19
-
20
- # Load environment variables
21
- load_dotenv()
22
-
23
- AI71_BASE_URL = "https://api.ai71.ai/v1/"
24
- AI71_API_KEY = "api71-api-92fc2ef9-9f3c-47e5-a019-18e257b04af2"
25
- GOOGLE_API_KEY = "AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8"
26
- GOOGLE_CSE_ID = "877170db56f5c4629"
27
- YOUTUBE_API_KEY = "AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8"
28
-
29
- # Initialize the Falcon model
30
- chat = ChatOpenAI(
31
- model="tiiuae/falcon-180B-chat",
32
- api_key=AI71_API_KEY,
33
- base_url=AI71_BASE_URL,
34
- streaming=True,
35
- )
36
-
37
- # Initialize embeddings
38
- embeddings = HuggingFaceEmbeddings()
39
-
40
- FIELDS = [
41
- "Mathematics", "Physics", "Chemistry", "Biology", "Computer Science",
42
- "History", "Geography", "Literature", "Philosophy", "Psychology",
43
- "Sociology", "Economics", "Business", "Finance", "Accounting",
44
- "Law", "Political Science", "Environmental Science", "Astronomy", "Geology",
45
- "Linguistics", "Anthropology", "Art History", "Music Theory", "Film Studies",
46
- "Medical Science", "Nursing", "Public Health", "Nutrition", "Physical Education",
47
- "Engineering", "Architecture", "Urban Planning", "Agriculture", "Veterinary Science",
48
- "Oceanography", "Meteorology", "Statistics", "Data Science", "Artificial Intelligence",
49
- "Cybersecurity", "Renewable Energy", "Quantum Physics", "Neuroscience", "Genetics",
50
- "Biotechnology", "Nanotechnology", "Robotics", "Space Exploration", "Cryptography"
51
- ]
52
-
53
- # List of educational resources
54
- EDUCATIONAL_RESOURCES = [
55
- "https://www.coursera.org",
56
- "https://www.khanacademy.org",
57
- "https://scholar.google.com",
58
- "https://www.edx.org",
59
- "https://www.udacity.com",
60
- "https://www.udemy.com",
61
- "https://www.futurelearn.com",
62
- "https://www.lynda.com",
63
- "https://www.skillshare.com",
64
- "https://www.codecademy.com",
65
- "https://www.brilliant.org",
66
- "https://www.duolingo.com",
67
- "https://www.ted.com/talks",
68
- "https://ocw.mit.edu",
69
- "https://www.open.edu/openlearn",
70
- "https://www.coursebuffet.com",
71
- "https://www.academicearth.org",
72
- "https://www.edutopia.org",
73
- "https://www.saylor.org",
74
- "https://www.openculture.com",
75
- "https://www.gutenberg.org",
76
- "https://www.archive.org",
77
- "https://www.wolframalpha.com",
78
- "https://www.quizlet.com",
79
- "https://www.mathway.com",
80
- "https://www.symbolab.com",
81
- "https://www.lessonplanet.com",
82
- "https://www.teacherspayteachers.com",
83
- "https://www.brainpop.com",
84
- "https://www.ck12.org"
85
- ]
86
-
87
- def search_web(query: str, num_results: int = 30, max_retries: int = 3) -> List[Dict[str, str]]:
88
- user_agents = [
89
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
90
- 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15',
91
- 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'
92
- ]
93
-
94
- for attempt in range(max_retries):
95
- try:
96
- headers = {'User-Agent': random.choice(user_agents)}
97
- service = build("customsearch", "v1", developerKey=GOOGLE_API_KEY)
98
- res = service.cse().list(q=query, cx=GOOGLE_CSE_ID, num=num_results).execute()
99
-
100
- results = []
101
- if "items" in res:
102
- for item in res["items"]:
103
- result = {
104
- "title": item["title"],
105
- "link": item["link"],
106
- "snippet": item.get("snippet", "")
107
- }
108
- results.append(result)
109
-
110
- return results
111
- except Exception as e:
112
- print(f"An error occurred: {e}. Attempt {attempt + 1} of {max_retries}")
113
- time.sleep(2 ** attempt)
114
-
115
- print("Max retries reached. No results found.")
116
- return []
117
-
118
- def scrape_webpage(url: str) -> str:
119
- try:
120
- response = requests.get(url, timeout=10)
121
- soup = BeautifulSoup(response.content, 'html.parser')
122
- return soup.get_text()
123
- except Exception as e:
124
- print(f"Error scraping {url}: {e}")
125
- return ""
126
-
127
- def process_documents(uploaded_files):
128
- documents = []
129
- for uploaded_file in uploaded_files:
130
- file_extension = os.path.splitext(uploaded_file.name)[1].lower()
131
-
132
- if file_extension == '.pdf':
133
- loader = PyPDFLoader(uploaded_file)
134
- elif file_extension in ['.txt', '.md']:
135
- loader = TextLoader(uploaded_file)
136
- elif file_extension in ['.doc', '.docx']:
137
- loader = UnstructuredWordDocumentLoader(uploaded_file)
138
- else:
139
- st.warning(f"Unsupported file type: {file_extension}")
140
- continue
141
-
142
- documents.extend(loader.load())
143
-
144
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
145
- texts = text_splitter.split_documents(documents)
146
-
147
- vectorstore = FAISS.from_documents(texts, embeddings)
148
- graph = NetworkxEntityGraph()
149
- graph.add_documents(texts)
150
-
151
- retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
152
-
153
- qa_chain = RetrievalQA.from_chain_type(
154
- llm=chat,
155
- chain_type="stuff",
156
- retriever=retriever,
157
- return_source_documents=True
158
- )
159
-
160
- return qa_chain, graph
161
-
162
- def generate_questions(topic, difficulty, num_questions, include_answers, qa_chain=None, graph=None):
163
- system_prompt = f"""You are an expert exam question generator. Generate {num_questions} {difficulty}-level questions about {topic}.
164
- {"Each question should be followed by its correct answer." if include_answers else "Do not include answers."}
165
- Format your response as follows:
166
-
167
- Q1. [Question]
168
- {"A1. [Answer]" if include_answers else ""}
169
-
170
- Q2. [Question]
171
- {"A2. [Answer]" if include_answers else ""}
172
-
173
- ... and so on.
174
- """
175
-
176
- if qa_chain and graph:
177
- context = graph.get_relevant_documents(topic)
178
- context_text = "\n".join([doc.page_content for doc in context])
179
-
180
- result = qa_chain({"query": system_prompt, "context": context_text})
181
- questions = result['result']
182
- else:
183
- messages = [
184
- SystemMessage(content=system_prompt),
185
- HumanMessage(content=f"Please generate {num_questions} {difficulty} questions about {topic}.")
186
- ]
187
- questions = chat(messages).content
188
-
189
- return questions
190
-
191
- def gather_resources(field: str) -> List[Dict[str, str]]:
192
- resources = []
193
- for resource_url in EDUCATIONAL_RESOURCES:
194
- search_results = search_web(f"site:{resource_url} {field}", num_results=1)
195
- if search_results:
196
- result = search_results[0]
197
- content = scrape_webpage(result['link'])
198
- resources.append({
199
- "title": result['title'],
200
- "link": result['link'],
201
- "content": content[:500] + "..." if len(content) > 500 else content
202
- })
203
-
204
- # YouTube search
205
- youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
206
- youtube_results = youtube.search().list(q=field, type='video', part='id,snippet', maxResults=5).execute()
207
- for item in youtube_results.get('items', []):
208
- video_id = item['id']['videoId']
209
- resources.append({
210
- "title": item['snippet']['title'],
211
- "link": f"https://www.youtube.com/watch?v={video_id}",
212
- "content": item['snippet']['description'],
213
- "thumbnail": item['snippet']['thumbnails']['medium']['url']
214
- })
215
-
216
- return resources
217
-
218
- def main():
219
- st.set_page_config(page_title="Advanced Exam Preparation System", layout="wide")
220
-
221
- st.sidebar.title("Advanced Exam Prep")
222
- st.sidebar.markdown("""
223
- Welcome to our advanced exam preparation system!
224
- Here you can generate practice questions, explore educational resources,
225
- and interact with an AI tutor to enhance your learning experience.
226
- """)
227
-
228
- # Main area tabs
229
- tab1, tab2, tab3 = st.tabs(["Question Generator", "Resource Explorer", "Academic Tutor"])
230
-
231
- with tab1:
232
- st.header("Question Generator")
233
- col1, col2 = st.columns(2)
234
- with col1:
235
- topic = st.text_input("Enter the exam topic:")
236
- exam_type = st.selectbox("Select exam type:", ["General", "STEM", "Humanities", "Business", "Custom"])
237
- with col2:
238
- difficulty = st.select_slider(
239
- "Select difficulty level:",
240
- options=["Super Easy", "Easy", "Beginner", "Intermediate", "Higher Intermediate", "Master", "Advanced"]
241
- )
242
- num_questions = st.number_input("Number of questions:", min_value=1, max_value=50, value=5)
243
- include_answers = st.checkbox("Include answers", value=True)
244
-
245
- if st.button("Generate Questions", key="generate_questions"):
246
- if topic:
247
- with st.spinner("Generating questions..."):
248
- questions = generate_questions(topic, difficulty, num_questions, include_answers)
249
- st.success("Questions generated successfully!")
250
- st.markdown(questions)
251
- else:
252
- st.warning("Please enter a topic.")
253
-
254
- with tab2:
255
- st.header("Resource Explorer")
256
- selected_field = st.selectbox("Select a field to explore:", FIELDS)
257
- if st.button("Explore Resources", key="explore_resources"):
258
- with st.spinner("Gathering resources..."):
259
- resources = gather_resources(selected_field)
260
- st.success(f"Found {len(resources)} resources!")
261
-
262
- for i, resource in enumerate(resources):
263
- col1, col2 = st.columns([1, 3])
264
- with col1:
265
- if "thumbnail" in resource:
266
- st.image(resource["thumbnail"], use_column_width=True)
267
- else:
268
- st.image("https://via.placeholder.com/150", use_column_width=True)
269
- with col2:
270
- st.subheader(f"[{resource['title']}]({resource['link']})")
271
- st.write(resource['content'])
272
- st.markdown("---")
273
-
274
- with tab3:
275
- st.header("Academic Tutor")
276
- uploaded_files = st.file_uploader("Upload documents (PDF, TXT, MD, DOC, DOCX)", type=["pdf", "txt", "md", "doc", "docx"], accept_multiple_files=True)
277
-
278
- if uploaded_files:
279
- qa_chain, graph = process_documents(uploaded_files)
280
- st.success("Documents processed successfully!")
281
- else:
282
- qa_chain, graph = None, None
283
-
284
- st.subheader("Chat with AI Tutor")
285
- if 'chat_history' not in st.session_state:
286
- st.session_state.chat_history = []
287
-
288
- chat_container = st.container()
289
- with chat_container:
290
- for i, (role, message) in enumerate(st.session_state.chat_history):
291
- with st.chat_message(role):
292
- st.write(message)
293
-
294
- user_input = st.chat_input("Ask a question or type 'search: your query' to perform a web search:")
295
- if user_input:
296
- st.session_state.chat_history.append(("user", user_input))
297
- with st.chat_message("user"):
298
- st.write(user_input)
299
-
300
- with st.chat_message("assistant"):
301
- if user_input.lower().startswith("search:"):
302
- search_query = user_input[7:].strip()
303
- search_results = search_web(search_query, num_results=3)
304
- response = f"Here are some search results for '{search_query}':\n\n"
305
- for result in search_results:
306
- response += f"- [{result['title']}]({result['link']})\n {result['snippet']}\n\n"
307
- else:
308
- response = chat([HumanMessage(content=user_input)]).content
309
- st.write(response)
310
- st.session_state.chat_history.append(("assistant", response))
311
-
312
- # Scroll to bottom of chat
313
- js = f"""
314
- <script>
315
- function scroll_to_bottom() {{
316
- var chatElement = window.parent.document.querySelector('.stChatFloatingInputContainer');
317
- chatElement.scrollIntoView({{behavior: 'smooth'}});
318
- }}
319
- scroll_to_bottom();
320
- </script>
321
- """
322
- st.components.v1.html(js)
323
-
324
- if __name__ == "__main__":
325
- main()
 
1
+ import streamlit as st
2
+ import random
3
+ import time
4
+ from typing import List, Dict
5
+ from langchain_community.chat_models import ChatOpenAI
6
+ from langchain.schema import HumanMessage, SystemMessage
7
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredWordDocumentLoader
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain_huggingface import HuggingFaceEmbeddings
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain.chains import RetrievalQA
12
+ from langchain_community.graphs import NetworkxEntityGraph
13
+ from googleapiclient.discovery import build
14
+ from googleapiclient.errors import HttpError
15
+ import os
16
+ from dotenv import load_dotenv
17
+ import requests
18
+ from bs4 import BeautifulSoup
19
+
20
+ # Load environment variables
21
+ load_dotenv()
22
+
23
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
24
+ AI71_API_KEY = "api71-api-92fc2ef9-9f3c-47e5-a019-18e257b04af2"
25
+ GOOGLE_API_KEY = "AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8"
26
+ GOOGLE_CSE_ID = "877170db56f5c4629"
27
+ YOUTUBE_API_KEY = "AIzaSyD-1OMuZ0CxGAek0PaXrzHOmcDWFvZQtm8"
28
+
29
+ # Initialize the Falcon model
30
+ chat = ChatOpenAI(
31
+ model="tiiuae/falcon-180B-chat",
32
+ api_key=AI71_API_KEY,
33
+ base_url=AI71_BASE_URL,
34
+ streaming=True,
35
+ )
36
+
37
+ # Initialize embeddings
38
+ embeddings = HuggingFaceEmbeddings()
39
+
40
+ FIELDS = [
41
+ "Mathematics", "Physics", "Chemistry", "Biology", "Computer Science",
42
+ "History", "Geography", "Literature", "Philosophy", "Psychology",
43
+ "Sociology", "Economics", "Business", "Finance", "Accounting",
44
+ "Law", "Political Science", "Environmental Science", "Astronomy", "Geology",
45
+ "Linguistics", "Anthropology", "Art History", "Music Theory", "Film Studies",
46
+ "Medical Science", "Nursing", "Public Health", "Nutrition", "Physical Education",
47
+ "Engineering", "Architecture", "Urban Planning", "Agriculture", "Veterinary Science",
48
+ "Oceanography", "Meteorology", "Statistics", "Data Science", "Artificial Intelligence",
49
+ "Cybersecurity", "Renewable Energy", "Quantum Physics", "Neuroscience", "Genetics",
50
+ "Biotechnology", "Nanotechnology", "Robotics", "Space Exploration", "Cryptography"
51
+ ]
52
+
53
+ # List of educational resources
54
+ EDUCATIONAL_RESOURCES = [
55
+ "https://www.coursera.org",
56
+ "https://www.khanacademy.org",
57
+ "https://scholar.google.com",
58
+ "https://www.edx.org",
59
+ "https://www.udacity.com",
60
+ "https://www.udemy.com",
61
+ "https://www.futurelearn.com",
62
+ "https://www.lynda.com",
63
+ "https://www.skillshare.com",
64
+ "https://www.codecademy.com",
65
+ "https://www.brilliant.org",
66
+ "https://www.duolingo.com",
67
+ "https://www.ted.com/talks",
68
+ "https://ocw.mit.edu",
69
+ "https://www.open.edu/openlearn",
70
+ "https://www.coursebuffet.com",
71
+ "https://www.academicearth.org",
72
+ "https://www.edutopia.org",
73
+ "https://www.saylor.org",
74
+ "https://www.openculture.com",
75
+ "https://www.gutenberg.org",
76
+ "https://www.archive.org",
77
+ "https://www.wolframalpha.com",
78
+ "https://www.quizlet.com",
79
+ "https://www.mathway.com",
80
+ "https://www.symbolab.com",
81
+ "https://www.lessonplanet.com",
82
+ "https://www.teacherspayteachers.com",
83
+ "https://www.brainpop.com",
84
+ "https://www.ck12.org"
85
+ ]
86
+
87
+ def search_web(query: str, num_results: int = 30, max_retries: int = 3) -> List[Dict[str, str]]:
88
+ user_agents = [
89
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
90
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15',
91
+ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'
92
+ ]
93
+
94
+ for attempt in range(max_retries):
95
+ try:
96
+ headers = {'User-Agent': random.choice(user_agents)}
97
+ service = build("customsearch", "v1", developerKey=GOOGLE_API_KEY)
98
+ res = service.cse().list(q=query, cx=GOOGLE_CSE_ID, num=num_results).execute()
99
+
100
+ results = []
101
+ if "items" in res:
102
+ for item in res["items"]:
103
+ result = {
104
+ "title": item["title"],
105
+ "link": item["link"],
106
+ "snippet": item.get("snippet", "")
107
+ }
108
+ results.append(result)
109
+
110
+ return results
111
+ except Exception as e:
112
+ print(f"An error occurred: {e}. Attempt {attempt + 1} of {max_retries}")
113
+ time.sleep(2 ** attempt)
114
+
115
+ print("Max retries reached. No results found.")
116
+ return []
117
+
118
+ def scrape_webpage(url: str) -> str:
119
+ try:
120
+ response = requests.get(url, timeout=10)
121
+ soup = BeautifulSoup(response.content, 'html.parser')
122
+ return soup.get_text()
123
+ except Exception as e:
124
+ print(f"Error scraping {url}: {e}")
125
+ return ""
126
+
127
+ def process_documents(uploaded_files):
128
+ documents = []
129
+ for uploaded_file in uploaded_files:
130
+ file_extension = os.path.splitext(uploaded_file.name)[1].lower()
131
+
132
+ if file_extension == '.pdf':
133
+ loader = PyPDFLoader(uploaded_file)
134
+ elif file_extension in ['.txt', '.md']:
135
+ loader = TextLoader(uploaded_file)
136
+ elif file_extension in ['.doc', '.docx']:
137
+ loader = UnstructuredWordDocumentLoader(uploaded_file)
138
+ else:
139
+ st.warning(f"Unsupported file type: {file_extension}")
140
+ continue
141
+
142
+ documents.extend(loader.load())
143
+
144
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
145
+ texts = text_splitter.split_documents(documents)
146
+
147
+ vectorstore = FAISS.from_documents(texts, embeddings)
148
+ graph = NetworkxEntityGraph()
149
+ graph.add_documents(texts)
150
+
151
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
152
+
153
+ qa_chain = RetrievalQA.from_chain_type(
154
+ llm=chat,
155
+ chain_type="stuff",
156
+ retriever=retriever,
157
+ return_source_documents=True
158
+ )
159
+
160
+ return qa_chain, graph
161
+
162
+ def generate_questions(topic, difficulty, num_questions, include_answers, qa_chain=None, graph=None):
163
+ system_prompt = f"""You are an expert exam question generator. Generate {num_questions} {difficulty}-level questions about {topic}.
164
+ {"Each question should be followed by its correct answer." if include_answers else "Do not include answers."}
165
+ Format your response as follows:
166
+
167
+ Q1. [Question]
168
+ {"A1. [Answer]" if include_answers else ""}
169
+
170
+ Q2. [Question]
171
+ {"A2. [Answer]" if include_answers else ""}
172
+
173
+ ... and so on.
174
+ """
175
+
176
+ if qa_chain and graph:
177
+ context = graph.get_relevant_documents(topic)
178
+ context_text = "\n".join([doc.page_content for doc in context])
179
+
180
+ result = qa_chain({"query": system_prompt, "context": context_text})
181
+ questions = result['result']
182
+ else:
183
+ messages = [
184
+ SystemMessage(content=system_prompt),
185
+ HumanMessage(content=f"Please generate {num_questions} {difficulty} questions about {topic}.")
186
+ ]
187
+ questions = chat(messages).content
188
+
189
+ return questions
190
+
191
+ def gather_resources(field: str) -> List[Dict[str, str]]:
192
+ resources = []
193
+ for resource_url in EDUCATIONAL_RESOURCES:
194
+ search_results = search_web(f"site:{resource_url} {field}", num_results=1)
195
+ if search_results:
196
+ result = search_results[0]
197
+ content = scrape_webpage(result['link'])
198
+ resources.append({
199
+ "title": result['title'],
200
+ "link": result['link'],
201
+ "content": content[:500] + "..." if len(content) > 500 else content
202
+ })
203
+
204
+ # YouTube search
205
+ youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
206
+ youtube_results = youtube.search().list(q=field, type='video', part='id,snippet', maxResults=5).execute()
207
+ for item in youtube_results.get('items', []):
208
+ video_id = item['id']['videoId']
209
+ resources.append({
210
+ "title": item['snippet']['title'],
211
+ "link": f"https://www.youtube.com/watch?v={video_id}",
212
+ "content": item['snippet']['description'],
213
+ "thumbnail": item['snippet']['thumbnails']['medium']['url']
214
+ })
215
+
216
+ return resources
217
+
218
+ def main():
219
+ st.set_page_config(page_title="Advanced Exam Preparation System", layout="wide")
220
+
221
+ st.sidebar.title("Advanced Exam Prep")
222
+ st.sidebar.markdown("""
223
+ Welcome to our advanced exam preparation system!
224
+ Here you can generate practice questions, explore educational resources,
225
+ and interact with an AI tutor to enhance your learning experience.
226
+ """)
227
+
228
+ # Main area tabs
229
+ tab1, tab2, tab3 = st.tabs(["Question Generator", "Resource Explorer", "Academic Tutor"])
230
+
231
+ with tab1:
232
+ st.header("Question Generator")
233
+ col1, col2 = st.columns(2)
234
+ with col1:
235
+ topic = st.text_input("Enter the exam topic:")
236
+ exam_type = st.selectbox("Select exam type:", ["General", "STEM", "Humanities", "Business", "Custom"])
237
+ with col2:
238
+ difficulty = st.select_slider(
239
+ "Select difficulty level:",
240
+ options=["Super Easy", "Easy", "Beginner", "Intermediate", "Higher Intermediate", "Master", "Advanced"]
241
+ )
242
+ num_questions = st.number_input("Number of questions:", min_value=1, max_value=50, value=5)
243
+ include_answers = st.checkbox("Include answers", value=True)
244
+
245
+ if st.button("Generate Questions", key="generate_questions"):
246
+ if topic:
247
+ with st.spinner("Generating questions..."):
248
+ questions = generate_questions(topic, difficulty, num_questions, include_answers)
249
+ st.success("Questions generated successfully!")
250
+ st.markdown(questions)
251
+ else:
252
+ st.warning("Please enter a topic.")
253
+
254
+ with tab2:
255
+ st.header("Resource Explorer")
256
+ selected_field = st.selectbox("Select a field to explore:", FIELDS)
257
+ if st.button("Explore Resources", key="explore_resources"):
258
+ with st.spinner("Gathering resources..."):
259
+ resources = gather_resources(selected_field)
260
+ st.success(f"Found {len(resources)} resources!")
261
+
262
+ for i, resource in enumerate(resources):
263
+ col1, col2 = st.columns([1, 3])
264
+ with col1:
265
+ if "thumbnail" in resource:
266
+ st.image(resource["thumbnail"], use_column_width=True)
267
+ else:
268
+ st.image("https://via.placeholder.com/150", use_column_width=True)
269
+ with col2:
270
+ st.subheader(f"[{resource['title']}]({resource['link']})")
271
+ st.write(resource['content'])
272
+ st.markdown("---")
273
+
274
+ with tab3:
275
+ st.header("Academic Tutor")
276
+ uploaded_files = st.file_uploader("Upload documents (PDF, TXT, MD, DOC, DOCX)", type=["pdf", "txt", "md", "doc", "docx"], accept_multiple_files=True)
277
+
278
+ if uploaded_files:
279
+ qa_chain, graph = process_documents(uploaded_files)
280
+ st.success("Documents processed successfully!")
281
+ else:
282
+ qa_chain, graph = None, None
283
+
284
+ st.subheader("Chat with AI Tutor")
285
+ if 'chat_history' not in st.session_state:
286
+ st.session_state.chat_history = []
287
+
288
+ chat_container = st.container()
289
+ with chat_container:
290
+ for i, (role, message) in enumerate(st.session_state.chat_history):
291
+ with st.chat_message(role):
292
+ st.write(message)
293
+
294
+ user_input = st.chat_input("Ask a question or type 'search: your query' to perform a web search:")
295
+ if user_input:
296
+ st.session_state.chat_history.append(("user", user_input))
297
+ with st.chat_message("user"):
298
+ st.write(user_input)
299
+
300
+ with st.chat_message("assistant"):
301
+ if user_input.lower().startswith("search:"):
302
+ search_query = user_input[7:].strip()
303
+ search_results = search_web(search_query, num_results=3)
304
+ response = f"Here are some search results for '{search_query}':\n\n"
305
+ for result in search_results:
306
+ response += f"- [{result['title']}]({result['link']})\n {result['snippet']}\n\n"
307
+ else:
308
+ response = chat([HumanMessage(content=user_input)]).content
309
+ st.write(response)
310
+ st.session_state.chat_history.append(("assistant", response))
311
+
312
+ # Scroll to bottom of chat
313
+ js = f"""
314
+ <script>
315
+ function scroll_to_bottom() {{
316
+ var chatElement = window.parent.document.querySelector('.stChatFloatingInputContainer');
317
+ chatElement.scrollIntoView({{behavior: 'smooth'}});
318
+ }}
319
+ scroll_to_bottom();
320
+ </script>
321
+ """
322
+ st.components.v1.html(js)
323
+
324
+ if __name__ == "__main__":
325
+ main()