Johan713 commited on
Commit
5347681
Β·
verified Β·
1 Parent(s): 5094dd7

Upload 13 files

Browse files
app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import importlib
4
+
5
+ # Custom CSS for improved styling
6
+ def local_css(file_name):
7
+ with open(file_name, "r") as f:
8
+ st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
9
+
10
+ st.set_page_config(
11
+ page_title="S.H.E.R.L.O.C.K.",
12
+ page_icon="πŸ•΅οΈ",
13
+ layout="wide",
14
+ initial_sidebar_state="expanded"
15
+ )
16
+
17
+ # Define the pages with icons
18
+ PAGES = {
19
+ "Home": {"icon": "🏠", "module": None},
20
+ "Web RAG Powered Chatbot": {"icon": "πŸ’¬", "module": "chatbot"},
21
+ "Notes Generation": {"icon": "πŸ“", "module": "notes_generation"},
22
+ "Exam Preparation": {"icon": "πŸ“š", "module": "exam_preparation"},
23
+ "Mnemonics Generation": {"icon": "🧠", "module": "mnemonics_generation"},
24
+ "Study Roadmap": {"icon": "πŸ—ΊοΈ", "module": "study_roadmap"},
25
+ "Interview Preparation": {"icon": "🎀", "module": "interview_preparation"},
26
+ "AI Buddy": {"icon": "πŸ€–πŸ§˜", "module": "ai_buddy"},
27
+ "Mind Palace Builder": {"icon": "πŸ›οΈ", "module": "mind_palace"},
28
+ "Sherlock Style Observation": {"icon": "πŸ”", "module": "sherlock_observation"},
29
+ "Research Paper Finder": {"icon": "πŸ›οΈ", "module": "research_paper_finder"},
30
+ "Lecture Finder": {"icon": "πŸ”", "module": "lecture_finder"},
31
+ "Resume Generator": {"icon": "πŸ“", "module": "resume_generator"}
32
+ }
33
+
34
+ def load_module(module_name):
35
+ if module_name is None:
36
+ return None
37
+ try:
38
+ return importlib.import_module(f"pages.{module_name}")
39
+ except ImportError:
40
+ st.error(f"Unable to load module: {module_name}. Make sure the file exists in the 'pages' directory.")
41
+ return None
42
+
43
+ def main():
44
+ st.image("https://upload.wikimedia.org/wikipedia/commons/c/cd/Sherlock_Holmes_Portrait_Paget.jpg", use_column_width=True)
45
+ st.sidebar.title("S.H.E.R.L.O.C.K. πŸ•΅οΈ")
46
+ st.sidebar.markdown("*Study Helper & Educational Resource for Learning & Observational Knowledge*")
47
+
48
+ # Apply custom CSS
49
+ st.markdown("""
50
+ <style>
51
+ body {
52
+ font-family: 'Roboto', sans-serif;
53
+ background-color: #f0f2f6;
54
+ }
55
+ .stButton button {
56
+ background-color: #0e1117;
57
+ color: white;
58
+ border-radius: 20px;
59
+ padding: 10px 20px;
60
+ font-weight: bold;
61
+ transition: all 0.3s ease;
62
+ }
63
+ .stButton button:hover {
64
+ background-color: #2e7d32;
65
+ box-shadow: 0 4px 8px rgba(0,0,0,0.1);
66
+ }
67
+ .sidebar .sidebar-content {
68
+ background-color: #0e1117;
69
+ color: white;
70
+ }
71
+ h1, h2, h3 {
72
+ color: #1e3a8a;
73
+ }
74
+ .stRadio > label {
75
+ font-weight: bold;
76
+ color: #333;
77
+ }
78
+ .feature-card {
79
+ background-color: white;
80
+ border-radius: 10px;
81
+ padding: 20px;
82
+ margin: 10px;
83
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
84
+ transition: all 0.3s ease;
85
+ }
86
+ .feature-card:hover {
87
+ transform: translateY(-5px);
88
+ box-shadow: 0 6px 8px rgba(0,0,0,0.15);
89
+ }
90
+ .feature-icon {
91
+ font-size: 2em;
92
+ margin-bottom: 10px;
93
+ }
94
+ </style>
95
+ """, unsafe_allow_html=True)
96
+
97
+ selection = st.sidebar.radio(
98
+ "Navigate",
99
+ list(PAGES.keys()),
100
+ format_func=lambda x: f"{PAGES[x]['icon']} {x}"
101
+ )
102
+
103
+ st.sidebar.markdown("---")
104
+ st.sidebar.info(
105
+ "This app is part of the S.H.E.R.L.O.C.K. project. "
106
+ "For more information, visit [our website](https://sherlock.vercel.app/)."
107
+ )
108
+ st.sidebar.text("Version 1.0")
109
+
110
+ # Main content area
111
+ if selection == "Home":
112
+ st.title("Welcome to S.H.E.R.L.O.C.K. πŸ•΅οΈ")
113
+ st.markdown("""
114
+ *Systematic Holistic Educational Resource for Learning and Optimizing Cognitive Knowledge*
115
+
116
+ S.H.E.R.L.O.C.K. is an advanced AI-powered personalized learning assistant designed to revolutionize your educational journey. By combining cutting-edge artificial intelligence with time-tested learning techniques, S.H.E.R.L.O.C.K. aims to enhance your cognitive abilities, strengthen your memory, and deepen your subject-specific knowledge.
117
+
118
+ Our platform offers a comprehensive suite of tools and features that cater to various aspects of learning and personal development. From AI-driven chatbots and customized study plans to innovative memory techniques and mindfulness practices, S.H.E.R.L.O.C.K. is your all-in-one companion for academic success and personal growth.
119
+
120
+ Explore our features below and embark on a journey to unlock your full learning potential!
121
+ """)
122
+ st.markdown("## Features")
123
+ cols = st.columns(3)
124
+ for idx, (feature, details) in enumerate(list(PAGES.items())[1:]): # Skip "Home"
125
+ with cols[idx % 3]:
126
+ st.markdown(f"""
127
+ <div class="feature-card">
128
+ <div class="feature-icon">{details['icon']}</div>
129
+ <h3>{feature}</h3>
130
+ <p>{get_feature_description(feature)}</p>
131
+ </div>
132
+ """, unsafe_allow_html=True)
133
+ else:
134
+ st.title(f"{PAGES[selection]['icon']} {selection}")
135
+ st.markdown(f"*{get_feature_description(selection)}*")
136
+ st.markdown("---")
137
+
138
+ # Load and run the selected module
139
+ module = load_module(PAGES[selection]['module'])
140
+ if module and hasattr(module, 'main'):
141
+ module.main()
142
+ else:
143
+ st.error(f"Unable to load the {selection} feature. Please check the module implementation.")
144
+
145
+ def get_feature_description(feature):
146
+ descriptions = {
147
+ "Web RAG Powered Chatbot": "Engage with our state-of-the-art AI-powered chatbot that leverages Retrieval-Augmented Generation (RAG) technology. This intelligent assistant provides interactive learning experiences by retrieving and synthesizing information from vast web-based resources, offering you accurate, context-aware responses to your queries.",
148
+ "Notes Generation": "Transform complex documents and lengthy lectures into concise, easy-to-understand notes. Our AI analyzes the content, extracts key points, and presents them in a clear, structured format, helping you grasp essential concepts quickly and efficiently.",
149
+ "Exam Preparation": "Ace your exams with our intelligent test preparation system. Generate custom question papers, practice tests, and quizzes tailored to your specific syllabus and learning progress. Receive instant feedback and targeted recommendations to improve your performance.",
150
+ "Mnemonics Generation": "Boost your memory and retention with personalized mnemonic devices. Our AI creates custom memory aids, including acronyms, rhymes, and vivid imagery, helping you remember complex information effortlessly and enhancing your long-term recall abilities.",
151
+ "Study Roadmap": "Navigate your learning journey with a personalized, adaptive study plan. Our AI analyzes your goals, strengths, and areas for improvement to create a tailored learning path, optimizing your study time and ensuring efficient progress towards your educational objectives.",
152
+ "Interview Preparation": "Master the art of interviewing with our advanced simulation system. Experience realistic interview scenarios, receive real-time feedback on your responses, and gain insights into improving your communication skills, body language, and overall performance.",
153
+ "AI Buddy": "Connect with a compassionate AI companion designed to provide emotional support, motivation, and guidance. Cultivate mental clarity and emotional balance with our curated collection of meditation and mindfulness resources. Engage in thoughtful conversations, receive personalized advice, and enjoy a judgment-free space for self-reflection and personal growth.",
154
+ "Mind Palace Builder": "Construct powerful mental frameworks to enhance your memory and learning capabilities. Our interactive system guides you through creating and populating your own virtual 'mind palace', allowing you to organize and recall vast amounts of information with ease.",
155
+ "Sherlock Style Observation": "Sharpen your critical thinking and observational skills using techniques inspired by the legendary detective. Learn to notice details, make logical deductions, and approach problems from unique perspectives, enhancing your analytical abilities across various subjects.",
156
+ "Research Paper Finder": "Discover relevant academic literature effortlessly with our intelligent research paper finder. Input your topic of interest, and our AI will scour databases to present you with a curated list of papers, saving you valuable research time and ensuring you stay up-to-date with the latest findings in your field.",
157
+ "Lecture Finder": "Expand your knowledge horizons with our smart lecture discovery tool. Find high-quality video lectures from reputable sources on YouTube, covering any topic you wish to explore. Our AI curates content based on your learning preferences and academic level.",
158
+ "Resume Generator": "Create a standout resume tailored to your unique skills and experiences. Our AI-powered resume generator analyzes your input and crafts a professional, ATS-friendly document that highlights your strengths and aligns with industry standards, increasing your chances of landing your dream job."
159
+ }
160
+ return descriptions.get(feature, "Description not available.")
161
+
162
+ if __name__ == "__main__":
163
+ main()
pages/ai_buddy.py ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ import os
6
+ from dotenv import load_dotenv
7
+ import pandas as pd
8
+ from datetime import datetime
9
+ import plotly.express as px
10
+ import json
11
+ import tempfile
12
+ import time
13
+ import numpy as np
14
+ import threading
15
+ from playsound import playsound
16
+ import pygame
17
+ import sounddevice as sd
18
+ from scipy.io import wavfile
19
+
20
+ pygame.mixer.init()
21
+
22
+ # Load environment variables
23
+ load_dotenv()
24
+
25
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
26
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
27
+
28
+ # Initialize the Falcon model
29
+ chat = ChatOpenAI(
30
+ model="tiiuae/falcon-180B-chat",
31
+ api_key=AI71_API_KEY,
32
+ base_url=AI71_BASE_URL,
33
+ streaming=True,
34
+ )
35
+
36
+ # Expanded Therapy techniques
37
+ THERAPY_TECHNIQUES = {
38
+ "CBT": "Use Cognitive Behavioral Therapy techniques to help the user identify and change negative thought patterns.",
39
+ "Mindfulness": "Guide the user through mindfulness exercises to promote present-moment awareness and reduce stress.",
40
+ "Solution-Focused": "Focus on the user's strengths and resources to help them find solutions to their problems.",
41
+ "Emotion-Focused": "Help the user identify, experience, and regulate their emotions more effectively.",
42
+ "Psychodynamic": "Explore the user's past experiences and unconscious patterns to gain insight into current issues.",
43
+ "ACT": "Use Acceptance and Commitment Therapy to help the user accept their thoughts and feelings while committing to positive changes.",
44
+ "DBT": "Apply Dialectical Behavior Therapy techniques to help the user manage intense emotions and improve relationships.",
45
+ "Gestalt": "Use Gestalt therapy techniques to focus on the present moment and increase self-awareness.",
46
+ "Existential": "Explore existential themes such as meaning, freedom, and responsibility to help the user find purpose.",
47
+ "Narrative": "Use storytelling and narrative techniques to help the user reframe their life experiences and create new meaning.",
48
+ }
49
+
50
+ def get_ai_response(user_input, buddy_config, therapy_technique=None):
51
+ system_message = f"You are {buddy_config['name']}, an AI companion with the following personality: {buddy_config['personality']}. "
52
+ system_message += f"Additional details about you: {buddy_config['details']}. "
53
+
54
+ if therapy_technique:
55
+ system_message += f"In this conversation, {THERAPY_TECHNIQUES[therapy_technique]}"
56
+
57
+ messages = [
58
+ SystemMessage(content=system_message),
59
+ HumanMessage(content=user_input)
60
+ ]
61
+ response = chat.invoke(messages).content
62
+ return response
63
+
64
+ def play_sound_loop(sound_file, stop_event):
65
+ while not stop_event.is_set():
66
+ playsound(sound_file)
67
+
68
+ def play_sound_for_duration(sound_file, duration):
69
+ start_time = time.time()
70
+ while time.time() - start_time < duration:
71
+ playsound(sound_file, block=False)
72
+ time.sleep(0.1) # Short sleep to prevent excessive CPU usage
73
+ # Ensure the sound stops after the duration
74
+ pygame.mixer.quit()
75
+
76
+ def get_sound_files(directory):
77
+ return [f for f in os.listdir(directory) if f.endswith('.mp3')]
78
+
79
+ def get_sound_file_path(sound_name, sound_dir):
80
+ # Convert the sound name to a filename
81
+ filename = f"{sound_name.lower().replace(' ', '_')}.mp3"
82
+ return os.path.join(sound_dir, filename)
83
+
84
+ SOUND_OPTIONS = [
85
+ "Gentle Rain", "Ocean Waves", "Forest Ambience", "Soft Wind Chimes",
86
+ "Tibetan Singing Bowls", "Humming Song", "Crackling Fireplace",
87
+ "Birdsong", "White Noise", "Zen River", "Heartbeat", "Deep Space",
88
+ "Whale Songs", "Bamboo Flute", "Thunderstorm", "Cat Purring",
89
+ "Campfire", "Windchimes", "Waterfall", "Beach Waves", "Cicadas",
90
+ "Coffee Shop Ambience", "Grandfather Clock", "Rainstorm on Tent",
91
+ "Tropical Birds", "Subway Train", "Washing Machine", "Fan White Noise",
92
+ "Tibetan Bells", "Wind in Trees", "Meditation Bowl", "Meditation Bowl2", "Birds Singing Rainy Day"
93
+ ]
94
+
95
+ def show_meditation_timer():
96
+ st.subheader("πŸ§˜β€β™€οΈ Enhanced Meditation Timer")
97
+
98
+ sound_dir = os.path.join(os.path.dirname(__file__), "..", "sounds")
99
+
100
+ col1, col2 = st.columns(2)
101
+
102
+ with col1:
103
+ duration = st.slider("Select duration (minutes)", 1, 60, 5)
104
+ background_sound = st.selectbox("Background Sound", SOUND_OPTIONS)
105
+
106
+ with col2:
107
+ interval_options = ["None", "Every 5 minutes", "Every 10 minutes"]
108
+ interval_reminder = st.selectbox("Interval Reminders", interval_options)
109
+ end_sound = st.selectbox("End of Session Sound", SOUND_OPTIONS)
110
+
111
+ if st.button("Start Meditation", key="start_meditation"):
112
+ progress_bar = st.progress(0)
113
+ status_text = st.empty()
114
+
115
+ # Initialize pygame mixer
116
+ pygame.mixer.init()
117
+
118
+ # Load background sound
119
+ background_sound_file = get_sound_file_path(background_sound, sound_dir)
120
+ if not os.path.exists(background_sound_file):
121
+ st.error(f"Background sound file not found: {background_sound_file}")
122
+ return
123
+
124
+ # Load end of session sound
125
+ end_sound_file = get_sound_file_path(end_sound, sound_dir)
126
+ if not os.path.exists(end_sound_file):
127
+ st.error(f"End sound file not found: {end_sound_file}")
128
+ return
129
+
130
+ # Play background sound on loop
131
+ pygame.mixer.music.load(background_sound_file)
132
+ pygame.mixer.music.play(-1) # -1 means loop indefinitely
133
+
134
+ start_time = time.time()
135
+ end_time = start_time + (duration * 60)
136
+
137
+ try:
138
+ while time.time() < end_time:
139
+ elapsed_time = time.time() - start_time
140
+ progress = elapsed_time / (duration * 60)
141
+ progress_bar.progress(progress)
142
+
143
+ remaining_time = end_time - time.time()
144
+ mins, secs = divmod(int(remaining_time), 60)
145
+ status_text.text(f"Time remaining: {mins:02d}:{secs:02d}")
146
+
147
+ if interval_reminder != "None":
148
+ interval = 5 if interval_reminder == "Every 5 minutes" else 10
149
+ if int(elapsed_time) > 0 and int(elapsed_time) % (interval * 60) == 0:
150
+ st.toast(f"{interval} minutes passed", icon="⏰")
151
+
152
+ # Check if 10 seconds remaining
153
+ if remaining_time <= 10 and remaining_time > 9:
154
+ pygame.mixer.music.stop() # Stop background sound
155
+ pygame.mixer.Sound(end_sound_file).play() # Play end sound
156
+
157
+ if remaining_time <= 0:
158
+ break
159
+
160
+ time.sleep(0.1) # Update more frequently for smoother countdown
161
+ finally:
162
+ # Stop all sounds
163
+ pygame.mixer.quit()
164
+
165
+ # Ensure the progress bar is full and time remaining shows 00:00
166
+ progress_bar.progress(1.0)
167
+ status_text.text("Time remaining: 00:00")
168
+
169
+ st.success("Meditation complete!")
170
+ st.balloons()
171
+
172
+ if 'achievements' not in st.session_state:
173
+ st.session_state.achievements = set()
174
+ st.session_state.achievements.add("Zen Master")
175
+ st.success("Achievement Unlocked: Zen Master πŸ§˜β€β™€οΈ")
176
+
177
+ def show_personalized_recommendations():
178
+ st.subheader("🎯 Personalized Recommendations")
179
+
180
+ recommendation_categories = [
181
+ "Mental Health",
182
+ "Physical Health",
183
+ "Personal Development",
184
+ "Relationships",
185
+ "Career",
186
+ "Hobbies",
187
+ ]
188
+
189
+ selected_category = st.selectbox("Choose a category", recommendation_categories)
190
+
191
+ recommendations = {
192
+ "Mental Health": [
193
+ "Practice daily gratitude journaling",
194
+ "Try a guided meditation for stress relief",
195
+ "Explore cognitive behavioral therapy techniques",
196
+ "Start a mood tracking journal",
197
+ "Learn about mindfulness practices",
198
+ ],
199
+ "Physical Health": [
200
+ "Start a 30-day yoga challenge",
201
+ "Try intermittent fasting",
202
+ "Begin a couch to 5K running program",
203
+ "Experiment with new healthy recipes",
204
+ "Create a sleep hygiene routine",
205
+ ],
206
+ "Personal Development": [
207
+ "Start learning a new language",
208
+ "Read personal development books",
209
+ "Take an online course in a subject you're interested in",
210
+ "Practice public speaking",
211
+ "Start a daily writing habit",
212
+ ],
213
+ "Relationships": [
214
+ "Practice active listening techniques",
215
+ "Plan regular date nights or friend meetups",
216
+ "Learn about love languages",
217
+ "Practice expressing gratitude to loved ones",
218
+ "Join a local community or interest group",
219
+ ],
220
+ "Career": [
221
+ "Update your resume and LinkedIn profile",
222
+ "Network with professionals in your industry",
223
+ "Set SMART career goals",
224
+ "Learn a new skill relevant to your field",
225
+ "Start a side project or freelance work",
226
+ ],
227
+ "Hobbies": [
228
+ "Start a garden or learn about plant care",
229
+ "Try a new art form like painting or sculpting",
230
+ "Learn to play a musical instrument",
231
+ "Start a DIY home improvement project",
232
+ "Explore photography or videography",
233
+ ],
234
+ }
235
+
236
+ st.write("Here are some personalized recommendations for you:")
237
+ for recommendation in recommendations[selected_category]:
238
+ st.markdown(f"- {recommendation}")
239
+
240
+ if st.button("Get More Recommendations"):
241
+ st.write("More tailored recommendations:")
242
+ additional_recs = random.sample(recommendations[selected_category], 3)
243
+ for rec in additional_recs:
244
+ st.markdown(f"- {rec}")
245
+
246
+ def generate_binaural_beat(freq1, freq2, duration_seconds, sample_rate=44100):
247
+ t = np.linspace(0, duration_seconds, int(sample_rate * duration_seconds), False)
248
+ left_channel = np.sin(2 * np.pi * freq1 * t)
249
+ right_channel = np.sin(2 * np.pi * freq2 * t)
250
+ stereo_audio = np.vstack((left_channel, right_channel)).T
251
+ return (stereo_audio * 32767).astype(np.int16)
252
+
253
+ def get_binary_file_downloader_html(bin_file, file_label='File'):
254
+ b64 = base64.b64encode(bin_file).decode()
255
+ return f'<a href="data:application/octet-stream;base64,{b64}" download="{file_label}.wav" class="download-link">Download {file_label}</a>'
256
+
257
+ def show_binaural_beats():
258
+ st.subheader("🎡 Binaural Beats Generator")
259
+
260
+ st.markdown("""
261
+ <style>
262
+ .stButton>button {
263
+ background-color: #4CAF50;
264
+ color: white;
265
+ font-weight: bold;
266
+ }
267
+ .download-link {
268
+ background-color: #008CBA;
269
+ color: white;
270
+ padding: 10px 15px;
271
+ text-align: center;
272
+ text-decoration: none;
273
+ display: inline-block;
274
+ font-size: 16px;
275
+ margin: 4px 2px;
276
+ cursor: pointer;
277
+ border-radius: 4px;
278
+ }
279
+ .stop-button {
280
+ background-color: #f44336;
281
+ color: white;
282
+ font-weight: bold;
283
+ }
284
+ </style>
285
+ """, unsafe_allow_html=True)
286
+
287
+ st.write("Binaural beats are created when two slightly different frequencies are played in each ear, potentially influencing brainwave activity.")
288
+
289
+ preset_beats = {
290
+ "Deep Relaxation (Delta)": {"base": 100, "beat": 2},
291
+ "Meditation (Theta)": {"base": 150, "beat": 6},
292
+ "Relaxation (Alpha)": {"base": 200, "beat": 10},
293
+ "Light Focus (Low Beta)": {"base": 250, "beat": 14},
294
+ "High Focus (Mid Beta)": {"base": 300, "beat": 20},
295
+ "Alertness (High Beta)": {"base": 350, "beat": 30},
296
+ "Gamma Consciousness": {"base": 400, "beat": 40},
297
+ "Lucid Dreaming": {"base": 180, "beat": 3},
298
+ "Memory Enhancement": {"base": 270, "beat": 12},
299
+ "Creativity Boost": {"base": 220, "beat": 8},
300
+ "Pain Relief": {"base": 130, "beat": 4},
301
+ "Mood Elevation": {"base": 315, "beat": 18}
302
+ }
303
+
304
+ col1, col2 = st.columns(2)
305
+
306
+ with col1:
307
+ beat_type = st.selectbox("Choose a preset or custom:", ["Custom"] + list(preset_beats.keys()))
308
+
309
+ with col2:
310
+ duration = st.slider("Duration (minutes):", 1, 60, 15)
311
+
312
+ if beat_type == "Custom":
313
+ col3, col4 = st.columns(2)
314
+ with col3:
315
+ base_freq = st.slider("Base Frequency (Hz):", 100, 500, 200)
316
+ with col4:
317
+ beat_freq = st.slider("Desired Beat Frequency (Hz):", 1, 40, 10)
318
+ else:
319
+ base_freq = preset_beats[beat_type]["base"]
320
+ beat_freq = preset_beats[beat_type]["beat"]
321
+ st.info(f"Base Frequency: {base_freq} Hz, Beat Frequency: {beat_freq} Hz")
322
+
323
+ if 'audio_playing' not in st.session_state:
324
+ st.session_state.audio_playing = False
325
+
326
+ if 'start_time' not in st.session_state:
327
+ st.session_state.start_time = None
328
+
329
+ if 'end_time' not in st.session_state:
330
+ st.session_state.end_time = None
331
+
332
+ # Create persistent placeholders for UI elements
333
+ progress_bar = st.empty()
334
+ status_text = st.empty()
335
+ stop_button = st.empty()
336
+
337
+ generate_button = st.button("Generate and Play Binaural Beat")
338
+
339
+ if generate_button:
340
+ try:
341
+ # Stop any currently playing audio
342
+ if st.session_state.audio_playing:
343
+ pygame.mixer.music.stop()
344
+ st.session_state.audio_playing = False
345
+
346
+ audio_data = generate_binaural_beat(base_freq, base_freq + beat_freq, duration * 60)
347
+
348
+ # Save the generated audio to a temporary file
349
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_file:
350
+ temp_filename = temp_file.name
351
+ wavfile.write(temp_filename, 44100, audio_data)
352
+
353
+ # Initialize pygame mixer
354
+ pygame.mixer.init(frequency=44100, size=-16, channels=2)
355
+
356
+ # Load and play the audio
357
+ pygame.mixer.music.load(temp_filename)
358
+ pygame.mixer.music.play()
359
+ st.session_state.audio_playing = True
360
+
361
+ st.session_state.start_time = time.time()
362
+ st.session_state.end_time = st.session_state.start_time + (duration * 60)
363
+
364
+ except Exception as e:
365
+ st.error(f"An error occurred: {str(e)}")
366
+ st.info("Please try again or contact support if the issue persists.")
367
+
368
+ if st.session_state.audio_playing:
369
+ stop_button_active = stop_button.button("Stop Binaural Beat", key="stop_binaural", type="primary")
370
+ current_time = time.time()
371
+
372
+ if stop_button_active:
373
+ pygame.mixer.music.stop()
374
+ st.session_state.audio_playing = False
375
+ st.session_state.start_time = None
376
+ st.session_state.end_time = None
377
+
378
+ elif current_time < st.session_state.end_time:
379
+ elapsed_time = current_time - st.session_state.start_time
380
+ progress = elapsed_time / (st.session_state.end_time - st.session_state.start_time)
381
+ progress_bar.progress(progress)
382
+
383
+ remaining_time = st.session_state.end_time - current_time
384
+ mins, secs = divmod(int(remaining_time), 60)
385
+ status_text.text(f"Time remaining: {mins:02d}:{secs:02d}")
386
+ else:
387
+ pygame.mixer.music.stop()
388
+ st.session_state.audio_playing = False
389
+ st.session_state.start_time = None
390
+ st.session_state.end_time = None
391
+ progress_bar.empty()
392
+ status_text.text("Binaural beat session complete!")
393
+
394
+ # Offer download of the generated audio
395
+ if not st.session_state.audio_playing and 'audio_data' in locals():
396
+ with io.BytesIO() as buffer:
397
+ wavfile.write(buffer, 44100, audio_data)
398
+ st.markdown(get_binary_file_downloader_html(buffer.getvalue(), f"binaural_beat_{base_freq}_{beat_freq}Hz"), unsafe_allow_html=True)
399
+
400
+ # Ensure the app updates every second
401
+ if st.session_state.audio_playing:
402
+ time.sleep(1)
403
+ st.experimental_rerun()
404
+
405
+ def main():
406
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. AI Buddy", page_icon="πŸ•΅οΈ", layout="wide")
407
+
408
+ # Custom CSS for improved styling
409
+ st.markdown("""
410
+ <style>
411
+ .stApp {
412
+ background-color: #f0f2f6;
413
+ }
414
+ .stButton>button {
415
+ background-color: #4CAF50;
416
+ color: white;
417
+ font-weight: bold;
418
+ }
419
+ .stTextInput>div>div>input {
420
+ background-color: #ffffff;
421
+ }
422
+ </style>
423
+ """, unsafe_allow_html=True)
424
+
425
+ st.title("πŸ•΅οΈ S.H.E.R.L.O.C.K. AI Buddy")
426
+ st.markdown("Your personalized AI companion for conversation, therapy, and personal growth.")
427
+
428
+ # Initialize session state
429
+ if 'buddy_name' not in st.session_state:
430
+ st.session_state.buddy_name = "Sherlock"
431
+ if 'buddy_personality' not in st.session_state:
432
+ st.session_state.buddy_personality = "Friendly, empathetic, and insightful"
433
+ if 'buddy_details' not in st.session_state:
434
+ st.session_state.buddy_details = "Knowledgeable about various therapy techniques and always ready to listen"
435
+ if 'messages' not in st.session_state:
436
+ st.session_state.messages = []
437
+
438
+ # Sidebar for AI Buddy configuration and additional features
439
+ with st.sidebar:
440
+ st.header("πŸ€– Configure Your AI Buddy")
441
+ st.session_state.buddy_name = st.text_input("Name your AI Buddy", value=st.session_state.buddy_name)
442
+ st.session_state.buddy_personality = st.text_area("Describe your buddy's personality", value=st.session_state.buddy_personality)
443
+ st.session_state.buddy_details = st.text_area("Additional details about your buddy", value=st.session_state.buddy_details)
444
+
445
+ st.header("🧘 Therapy Session")
446
+ therapy_mode = st.checkbox("Enable Therapy Mode")
447
+ if therapy_mode:
448
+ therapy_technique = st.selectbox("Select Therapy Technique", list(THERAPY_TECHNIQUES.keys()))
449
+ else:
450
+ therapy_technique = None
451
+
452
+ st.markdown("---")
453
+
454
+ st.subheader("πŸ“‹ Todo List")
455
+ if 'todos' not in st.session_state:
456
+ st.session_state.todos = []
457
+
458
+ new_todo = st.text_input("Add a new todo:")
459
+ if st.button("Add Todo", key="add_todo"):
460
+ if new_todo:
461
+ st.session_state.todos.append({"task": new_todo, "completed": False})
462
+ st.success("Todo added successfully!")
463
+ else:
464
+ st.warning("Please enter a todo item.")
465
+
466
+ for i, todo in enumerate(st.session_state.todos):
467
+ col1, col2, col3 = st.columns([0.05, 0.8, 0.15])
468
+ with col1:
469
+ todo['completed'] = st.checkbox("", todo['completed'], key=f"todo_{i}")
470
+ with col2:
471
+ st.write(todo['task'], key=f"todo_text_{i}")
472
+ with col3:
473
+ if st.button("πŸ—‘οΈ", key=f"delete_{i}", help="Delete todo"):
474
+ st.session_state.todos.pop(i)
475
+ st.experimental_rerun()
476
+
477
+ st.subheader("⏱️ Pomodoro Timer")
478
+ pomodoro_duration = st.slider("Pomodoro Duration (minutes)", 1, 60, 25)
479
+ if st.button("Start Pomodoro"):
480
+ progress_bar = st.progress(0)
481
+ for i in range(pomodoro_duration * 60):
482
+ time.sleep(1)
483
+ progress_bar.progress((i + 1) / (pomodoro_duration * 60))
484
+ st.success("Pomodoro completed!")
485
+ if 'achievements' not in st.session_state:
486
+ st.session_state.achievements = set()
487
+ st.session_state.achievements.add("Consistent Learner")
488
+
489
+ st.markdown("---")
490
+ st.markdown("Powered by Falcon-180B and Streamlit")
491
+
492
+ st.markdown("---")
493
+ st.header("πŸ“” Daily Journal")
494
+ journal_entry = st.text_area("Write your thoughts for today")
495
+ if st.button("Save Journal Entry"):
496
+ if 'journal_entries' not in st.session_state:
497
+ st.session_state.journal_entries = []
498
+ st.session_state.journal_entries.append({
499
+ 'date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
500
+ 'entry': journal_entry
501
+ })
502
+ st.success("Journal entry saved!")
503
+ st.toast("Journal entry saved successfully!", icon="βœ…")
504
+
505
+ if 'journal_entries' in st.session_state and st.session_state.journal_entries:
506
+ st.subheader("Previous Entries")
507
+ for entry in st.session_state.journal_entries[-5:]: # Show last 5 entries
508
+ st.text(entry['date'])
509
+ st.write(entry['entry'])
510
+ st.markdown("---")
511
+
512
+ # Main content area
513
+ tab1, tab2 = st.tabs(["Chat", "Tools"])
514
+
515
+ with tab1:
516
+ # Chat interface
517
+ st.header("πŸ—¨οΈ Chat with Your AI Buddy")
518
+
519
+ # Display chat history
520
+ chat_container = st.container()
521
+ with chat_container:
522
+ for message in st.session_state.messages:
523
+ with st.chat_message(message["role"]):
524
+ st.markdown(message["content"])
525
+
526
+ # User input
527
+ prompt = st.chat_input("What's on your mind?")
528
+
529
+ # Clear chat history button
530
+ if st.button("Clear Chat History"):
531
+ st.session_state.messages = []
532
+ st.experimental_rerun()
533
+
534
+ if prompt:
535
+ st.session_state.messages.append({"role": "user", "content": prompt})
536
+ with st.chat_message("user"):
537
+ st.markdown(prompt)
538
+
539
+ buddy_config = {
540
+ "name": st.session_state.buddy_name,
541
+ "personality": st.session_state.buddy_personality,
542
+ "details": st.session_state.buddy_details
543
+ }
544
+
545
+ with st.chat_message("assistant"):
546
+ message_placeholder = st.empty()
547
+ full_response = ""
548
+ for chunk in chat.stream(get_ai_response(prompt, buddy_config, therapy_technique)):
549
+ full_response += chunk.content
550
+ message_placeholder.markdown(full_response + "β–Œ")
551
+ message_placeholder.markdown(full_response)
552
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
553
+
554
+ # Force a rerun to update the chat history immediately
555
+ st.experimental_rerun()
556
+
557
+ with tab2:
558
+ tool_choice = st.selectbox("Select a tool", ["Meditation Timer", "Binaural Beats", "Recommendations"])
559
+ if tool_choice == "Meditation Timer":
560
+ show_meditation_timer()
561
+ elif tool_choice == "Recommendations":
562
+ show_personalized_recommendations()
563
+ elif tool_choice == "Binaural Beats":
564
+ show_binaural_beats()
565
+
566
+ # Mood tracker
567
+ st.sidebar.markdown("---")
568
+ st.sidebar.header("😊 Mood Tracker")
569
+ mood = st.sidebar.slider("How are you feeling today?", 1, 10, 5)
570
+ if st.sidebar.button("Log Mood"):
571
+ st.sidebar.success(f"Mood logged: {mood}/10")
572
+ st.balloons()
573
+
574
+ # Resources and Emergency Contact
575
+ st.sidebar.markdown("---")
576
+ st.sidebar.header("πŸ†˜ Resources")
577
+ st.sidebar.info("If you're in crisis, please reach out for help:")
578
+ st.sidebar.markdown("- [Mental Health Resources](https://www.mentalhealth.gov/get-help/immediate-help)")
579
+ st.sidebar.markdown("- Emergency Contact: 911 or your local emergency number")
580
+
581
+ # Inspiration Quote
582
+ st.sidebar.markdown("---")
583
+ st.sidebar.header("πŸ’‘ Daily Inspiration")
584
+ if st.sidebar.button("Get Inspirational Quote"):
585
+ quotes = [
586
+ "The only way to do great work is to love what you do. - Steve Jobs",
587
+ "Believe you can and you're halfway there. - Theodore Roosevelt",
588
+ "The future belongs to those who believe in the beauty of their dreams. - Eleanor Roosevelt",
589
+ "Strive not to be a success, but rather to be of value. - Albert Einstein",
590
+ "The only limit to our realization of tomorrow will be our doubts of today. - Franklin D. Roosevelt",
591
+ "Do not wait to strike till the iron is hot; but make it hot by striking. - William Butler Yeats",
592
+ "What lies behind us and what lies before us are tiny matters compared to what lies within us. - Ralph Waldo Emerson",
593
+ "Success is not final, failure is not fatal: It is the courage to continue that counts. - Winston Churchill",
594
+ "Life is what happens when you're busy making other plans. - John Lennon",
595
+ "You miss 100% of the shots you don't take. - Wayne Gretzky",
596
+ "The best way to predict the future is to create it. - Peter Drucker",
597
+ "It is not the strongest of the species that survive, nor the most intelligent, but the one most responsive to change. - Charles Darwin",
598
+ "Whether you think you can or you think you can't, you're right. - Henry Ford",
599
+ "The only place where success comes before work is in the dictionary. - Vidal Sassoon",
600
+ "Do what you can, with what you have, where you are. - Theodore Roosevelt",
601
+ "The purpose of our lives is to be happy. - Dalai Lama",
602
+ "Success usually comes to those who are too busy to be looking for it. - Henry David Thoreau",
603
+ "Your time is limited, so don't waste it living someone else's life. - Steve Jobs",
604
+ "Don't be afraid to give up the good to go for the great. - John D. Rockefeller",
605
+ "I find that the harder I work, the more luck I seem to have. - Thomas Jefferson",
606
+ "Success is not the key to happiness. Happiness is the key to success. - Albert Schweitzer",
607
+ "It does not matter how slowly you go, as long as you do not stop. - Confucius",
608
+ "If you set your goals ridiculously high and it's a failure, you will fail above everyone else's success. - James Cameron",
609
+ "Don't watch the clock; do what it does. Keep going. - Sam Levenson",
610
+ "Hardships often prepare ordinary people for an extraordinary destiny. - C.S. Lewis",
611
+ "Don't count the days, make the days count. - Muhammad Ali",
612
+ "The best revenge is massive success. - Frank Sinatra",
613
+ "The only impossible journey is the one you never begin. - Tony Robbins",
614
+ "Act as if what you do makes a difference. It does. - William James",
615
+ "You are never too old to set another goal or to dream a new dream. - C.S. Lewis",
616
+ "If you're going through hell, keep going. - Winston Churchill",
617
+ "Dream big and dare to fail. - Norman Vaughan",
618
+ "In the middle of every difficulty lies opportunity. - Albert Einstein",
619
+ "What we achieve inwardly will change outer reality. - Plutarch",
620
+ "I have not failed. I've just found 10,000 ways that won't work. - Thomas Edison",
621
+ "It always seems impossible until it's done. - Nelson Mandela",
622
+ "The future depends on what you do today. - Mahatma Gandhi",
623
+ "Don't wait. The time will never be just right. - Napoleon Hill",
624
+ "Quality is not an act, it is a habit. - Aristotle",
625
+ "Your life does not get better by chance, it gets better by change. - Jim Rohn",
626
+ "The only thing standing between you and your goal is the story you keep telling yourself as to why you can't achieve it. - Jordan Belfort",
627
+ "Challenges are what make life interesting; overcoming them is what makes life meaningful. - Joshua J. Marine",
628
+ "Opportunities don't happen, you create them. - Chris Grosser",
629
+ "I can't change the direction of the wind, but I can adjust my sails to always reach my destination. - Jimmy Dean",
630
+ "Start where you are. Use what you have. Do what you can. - Arthur Ashe",
631
+ "The secret of getting ahead is getting started. - Mark Twain",
632
+ "You don’t have to be great to start, but you have to start to be great. - Zig Ziglar",
633
+ "Keep your eyes on the stars, and your feet on the ground. - Theodore Roosevelt",
634
+ "The only way to achieve the impossible is to believe it is possible. - Charles Kingsleigh"
635
+ ]
636
+
637
+ random_quote = random.choice(quotes)
638
+ st.sidebar.success(random_quote)
639
+
640
+ # Chat Export
641
+ st.sidebar.markdown("---")
642
+ if st.sidebar.button("Export Chat History"):
643
+ chat_history = "\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.messages])
644
+ st.sidebar.download_button(
645
+ label="Download Chat History",
646
+ data=chat_history,
647
+ file_name="ai_buddy_chat_history.txt",
648
+ mime="text/plain"
649
+ )
650
+
651
+ st.sidebar.success("Chat history ready for download!")
652
+
653
+ # Display achievements
654
+ if 'achievements' in st.session_state and st.session_state.achievements:
655
+ st.sidebar.markdown("---")
656
+ st.sidebar.header("πŸ† Achievements")
657
+ for achievement in st.session_state.achievements:
658
+ st.sidebar.success(f"Unlocked: {achievement}")
659
+
660
+ if __name__ == "__main__":
661
+ main()
pages/chatbot.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ from langchain.document_loaders import TextLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain.embeddings import HuggingFaceEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.chains import RetrievalQA
10
+ import os
11
+ from dotenv import load_dotenv
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+ import pandas as pd
15
+ from googleapiclient.discovery import build
16
+ from googleapiclient.errors import HttpError
17
+ import time
18
+ from langchain.schema import Document
19
+ from docx import Document as DocxDocument
20
+ from PyPDF2 import PdfReader
21
+ import io
22
+
23
+ # Load environment variables
24
+ load_dotenv()
25
+
26
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
27
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
28
+
29
+ # Initialize session state variables
30
+ if "custom_personality" not in st.session_state:
31
+ st.session_state.custom_personality = ""
32
+ if "messages" not in st.session_state:
33
+ st.session_state.messages = []
34
+
35
+ # Initialize the Falcon model
36
+ @st.cache_resource
37
+ def get_llm():
38
+ return ChatOpenAI(
39
+ model="tiiuae/falcon-180B-chat",
40
+ api_key=AI71_API_KEY,
41
+ base_url=AI71_BASE_URL,
42
+ streaming=True,
43
+ )
44
+
45
+ # Initialize embeddings
46
+ @st.cache_resource
47
+ def get_embeddings():
48
+ return HuggingFaceEmbeddings()
49
+
50
+ def process_documents(uploaded_files):
51
+ documents = []
52
+ for uploaded_file in uploaded_files:
53
+ file_extension = os.path.splitext(uploaded_file.name)[1].lower()
54
+ try:
55
+ if file_extension in [".txt", ".md"]:
56
+ content = uploaded_file.getvalue().decode("utf-8")
57
+ documents.append(Document(page_content=content, metadata={"source": uploaded_file.name}))
58
+ elif file_extension == ".docx":
59
+ docx_file = io.BytesIO(uploaded_file.getvalue())
60
+ doc = DocxDocument(docx_file)
61
+ content = "\n".join([para.text for para in doc.paragraphs])
62
+ documents.append(Document(page_content=content, metadata={"source": uploaded_file.name}))
63
+ elif file_extension == ".pdf":
64
+ pdf_file = io.BytesIO(uploaded_file.getvalue())
65
+ pdf_reader = PdfReader(pdf_file)
66
+ content = ""
67
+ for page in pdf_reader.pages:
68
+ content += page.extract_text()
69
+ documents.append(Document(page_content=content, metadata={"source": uploaded_file.name}))
70
+ else:
71
+ st.warning(f"Unsupported file type: {file_extension}")
72
+ except Exception as e:
73
+ st.error(f"Error processing file {uploaded_file.name}: {str(e)}")
74
+
75
+ if not documents:
76
+ return None
77
+
78
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
79
+ texts = text_splitter.split_documents(documents)
80
+
81
+ vectorstore = FAISS.from_documents(texts, get_embeddings())
82
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
83
+
84
+ qa_chain = RetrievalQA.from_chain_type(
85
+ llm=get_llm(),
86
+ chain_type="stuff",
87
+ retriever=retriever,
88
+ return_source_documents=True,
89
+ )
90
+
91
+ return qa_chain
92
+
93
+ def get_chatbot_response(user_input, qa_chain=None, personality="default", web_search=False):
94
+ system_message = get_personality_prompt(personality)
95
+
96
+ web_info = ""
97
+ if web_search:
98
+ web_results = search_web_duckduckgo(user_input)
99
+ web_info = "\n\n".join([f"Title: {result['title']}\nLink: {result['link']}\nSnippet: {result['snippet']}" for result in web_results])
100
+ user_input += f"\n\nWeb search results:\n{web_info}"
101
+
102
+ if qa_chain:
103
+ result = qa_chain({"query": user_input})
104
+ response = result['result']
105
+ source_docs = result.get('source_documents', [])
106
+ else:
107
+ messages = [
108
+ SystemMessage(content=system_message),
109
+ HumanMessage(content=user_input)
110
+ ]
111
+ response = get_llm().invoke(messages).content
112
+ source_docs = []
113
+
114
+ return response, source_docs, web_results if web_search else None
115
+
116
+ def get_personality_prompt(personality):
117
+ personalities = {
118
+ "default": "You are a helpful assistant.",
119
+ "sherlock": "You are Sherlock Holmes, the world's greatest detective. Respond with keen observation and deductive reasoning.",
120
+ "yoda": "Wise and cryptic, you are. Like Yoda from Star Wars, speak you must.",
121
+ "shakespeare": "Thou art the Bard himself. In iambic pentameter, respond with eloquence and poetic flair.",
122
+ "custom": st.session_state.custom_personality
123
+ }
124
+ return personalities.get(personality, personalities["default"])
125
+
126
+ def search_web_duckduckgo(query: str, num_results: int = 3, max_retries: int = 3):
127
+ api_key = os.getenv('api_key')
128
+ cse_id = os.getenv('cse_id')
129
+
130
+ for attempt in range(max_retries):
131
+ try:
132
+ service = build("customsearch", "v1", developerKey=api_key)
133
+ res = service.cse().list(q=query, cx=cse_id, num=num_results).execute()
134
+ results = []
135
+ if "items" in res:
136
+ for item in res["items"]:
137
+ result = {
138
+ "title": item["title"],
139
+ "link": item["link"],
140
+ "snippet": item.get("snippet", "")
141
+ }
142
+ results.append(result)
143
+ return results
144
+ except HttpError as e:
145
+ print(f"HTTP error occurred: {e}. Attempt {attempt + 1} of {max_retries}")
146
+ except Exception as e:
147
+ print(f"An unexpected error occurred: {e}. Attempt {attempt + 1} of {max_retries}")
148
+ time.sleep(2 ** attempt)
149
+ print("Max retries reached. No results found.")
150
+ return []
151
+
152
+ def main():
153
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Chatbot", page_icon="πŸ•΅οΈ", layout="wide")
154
+
155
+ st.title("S.H.E.R.L.O.C.K. Chatbot")
156
+
157
+ # Sidebar
158
+ with st.sidebar:
159
+ st.image("data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBw8QEBUQEBAVFRUVFRYXFhcVFhUVFRUXFRUWFhUYGBYYHSggGBolGxcVITEhJSkrLi4uFx8zODYtNygtLisBCgoKDg0OGhAQGismHSUtLS0tLS0tLS0tLjArLS0rLS8vLS0tLS0tLS0rLS0tLS0tLS0tLS0tLS0tKy0tLS0tK//AABEIALcBEwMBIgACEQEDEQH/xAAcAAABBQEBAQAAAAAAAAAAAAAAAQIDBAUGBwj/xABAEAABAwIDBQUFBgUDBAMAAAABAAIRAyEEEjEFQVFhcQYTIoGRMkKhscEHFCNSctEzYoKy8JKi4VNjc8IVJJP/xAAZAQADAQEBAAAAAAAAAAAAAAAAAQIDBAX/xAAtEQACAgEDBAEBBwUAAAAAAAAAAQIRAxIhMQRBUWETIjJxsdHh8PEFFCNikf/aAAwDAQACEQMRAD8A8gQhKukxESoQgAhCEIAEqEIAEJWtJsASeQlTswNZ2lJ5/pd+ydCK6FtN2EXYapWa4l1KC5uWBk3uDp1GpHAFYyGqBOwSpE5jCTABJOgFyegSGIpaFBzzDRPHcBzJNgOZVqjgmtvUMn8rSP8Ac7QdBJ/SVcD7AAAAaNFgOfM8zJ5qlGwI8PgqbLu8bv8AYPLV3nA5FXZJ1P8AwOAG4clCwKZqtKhFTbf8Ol+ur/bQUWxAczjBsBPK8SeCl23/AA6f66v9tBQ7EfleYMGLRrz+aUfthLg6RgcDTIN4sQYn8R8gHjfRPrFjnOD/AAGXXAtr7zd3VvoUwkHKCCDFyBA/iP8AaaNOo9EzE17vDhmEujiNdDw5GQt2ZRZo43EVGueyqGvGZ4aTfKcxnI8GQAfdmOIlUGlS42oW1aoa6QXvm387tQbTwKgaUkqNNVlzD0/ERIEB+vJrlbwLGBxFWm5wyOMsuWw10OEGHNBgnprrNGm8ZydxD/i1wHzVrZ20H0ScsEOa5pafZOZpb9fgm7rYEMmD4SbGx0PI8lZD2lozC8nxDXRuo37+CpzwUoJyjhJ9YbP09UxovtYW0ybFvHxEe0BedDY6QdVEGg6GDwP0P7/FSYKtkBdE2IIvcFzdZtv3cLqSq2i4ZmHKd4P7ft6JWVRBBFjZT92YkXHEfXgoXNI18t4PQpzDCLGkStUrUtE03WdLTucLt82/UehTqlFzLm4Ojhdp6H6apat6L09wQkQmFHjaVCFyGQIQhABCVCEAC0djbIfiXZWkNAjM4zYHfA1jVZy2djYeu2S0hoc286mQQCBusT6qoq2TK62NRuFZgnGn7VVpuYsOBAOtuNr2B1TnY+o7V7o4ZiqAoYh9YUiHPquIgTmc6RIud0eg4Qu52Ps7AYamKlUNr1MxaXOP4Ac0AuFNp/iBtvFGvDRaoznl0r36MXZfZuvjmltKi8tMkvaQweriGv8A0/LVGH7H0/x8PiWihVpsBY4ugEn2XSf4jD1t106LbHas1WdzRc5skZiyWANbcgEXvCXbG2D/APG0quIcC+niCxrnQahb3eaAdZ0vyBOkoa7syU5v0eUtwLmn8XwRu1eejfqYHCVYa8NEMGUHXe4/qdw5CByUdaqXvc86ucXHq4yfmnNWaR1WSMUzVGwKVqoRKwKVoUbFK1MaKe2x+HS/XV/toLJaYuCtfbn8Ol+ur/bQWQFk+SmauD2w5uUPkhuhGovP1PqtB1YPzOabGT6rm1JRquaZaY+R6jetY5PJDh4O2rmi+pUD5pu7x8OElh8Rs5uo6iddFTIgkawYtp5cll4XaTXWfY8dx893mtFpWip8EtlqlUEQ5sjiLOH7+flCk7q0tOYctR1G7rpzU+DxDe67upSm1QsdcXDS4j4buUqCm2TNMmeHveRHteV+SYJ2I0qxSrkDLYtmYOkmB1Gg0T/vFOp/Fbld+dgsT/OzTzEHkVHVolu8EHQtMgwAeu8agJJ9maUX2NDqZy21sTr4mzG7hwmRqq2inZkdTPumNY8J8Q4S718oCWjUyWqMD2kcbxxY8f8AI5JXRaQ2lUI6bxuPkpm5TpY8Dp5H9/VSfcQ8TQdn4sNqo8vf6t9AqrUKSfBWlonClaVA0qRpTGTITZQmB4+hCFyGIIQhAChCEIAVuq9CxmJod2DT91rHQTcHMGOETYEOH+gLzwCVpsxzw3K6DoOcDSdyuNCZ1+1K7WYem9kCpXz03PYIIpUzJY3SJc+535YWNTpHc4H4H0KtbaxDn08Jmv8A/Xzk8O8qOj4NCrUCtkc8FSLeEYRcq12sBGz8O25L69SpxMNYGT8UuzcI+q8NG8rfoPo4l7qTxk7lwpsdGWtSgjJUEkh7S4THNog6qcstMS4Q1zS/fo8uYpWhd52own3nMHUWDEsJZmpCDVe0xBE+IOEEbxmGtweHdTc0lrgWkGCCCCCNQQbgqS1fdUxzQpmqNgUzAgZI1SsUbQp6bCdEykZ+3v4dL9dX+2isdae2sSxwYxjg7KXkkaS8MEA74yai11lrJ8jY9ASBKgQqtYXGvp2FxwP04KoHDj/g1TgqTobVnT7M2oCTlNyCC12hlpGmjomVeo0w8wC1hO5xhun5jp5+q4sLSwe1XNs/xDj7w/daqfkjTR02JbUactQEOHEXI3X94cDfkm5zAG4En1gH5BJgdp5mZQRUZ+V3uzw3sPTXmiLTFiSBv0gx6EKlfce3Yu067C2HAydXCN1x4bT6gqUVHNDdHNAji10ucbg6HxEcbWWe1WWVyMpbYtaR1lznekOiEmi4stjI67Dkd+Um39L93Q+pT8TiXvgVLuHvEQ86RmO/zVY1Gu3ZTy9k+XunpbkE6nVtBuOB3dDu+SlLua2PaVK0puGoOqEhjSTBMb4HAbz0QFVhRNKE0ITGeRoQhcpzghCEACsChEF5yzcD3iOMbhzPlKrO0PQq9jzNap/5H/3FADAdzRHxJ8966HC9kMQQHPygb2gy+N40gHzWNgKdMmakkcGmCf6oMLrNm4DuMmMwZe6jmLcRScZLbEkzvsCRvkRvhRk1pfSb4Fjb+v8Aj37MXaOMqMLWvpAtYA0NdbIBoGPbDvUuE81s7MGCfT711Z1MAw5rruDomGua2X24CeS3ttbOwjnsrWqUz7TQSDEeLS+niHNsb1xlfDCmXU2OzNDjB3OvE9SAFfT5vkjqX/DPqun+LJp7c/ebGP7RNYzJg6bmXBNV1nGCD4W3MGNSdJEBaeOxQxBw+LaMhewsqnRssAIcTuA4ncBwWRtDGfeG02Npgd2HXi5zRYngI+JVHbtYNw1HDbw51Q/2j5u9FtJOjGD3TaOlr7Vp16uZr5ZNiBvDWtJte+UKDav3TFuHeOLKosHt8bnjc17d8bjIgW4Rx2EqugsBIvI6xBHmPkFcwFnibSD8kopaVFGkpXLWyGpRLHFpixi2h5hPYFLtHK12ZxABA84GWw1JssqvtI6U5aOPvn09nyvzSbolGjXxDKXtm/5R7Xnub534ArKxe0H1PD7LfyjTlmOrj18gFUQobsoEIQkIexpJAAJJIAAEkkmAAN5JXY9lKOz8K9lXaNM1ZuKYAcGc3MNnc58tFzew8R3ddr5gjNB4FzHNB+PkYKKtYueSTefluRLZG2KKfJ79gcIzaLs/eYPEbPygMo9we8Y7+aXeAi27yGq4r7QPsxZSpvxWzw6GAuqUCS4hupfTcbkC5ymbaaZVyvY/tJUwGIbVa7wEgVG7nNm8jeRqP+V6Jt6pWrPx2z6tV1XNQbi8G7wtOVviNMFoEgwW3kwLyslJmzxpnixw7w3MWmOMcePBMC3KVYHW8gg8wRBWIWkWIWmOerkyy4tFUPpVHNMtJB5Lawm2pAZUtBJzDiQ0GRu9kLDCVaptGNHY0HtMGZHIj4K45hIaWkOAbu1AzOPibqNddNLri8Lin0z4T1BuD5LdwG1KbyA7wnhMT0ctFKxo1TG74pzSrFBjC3xaS3xNALwPFMtm/wAOJTq+Ac1udpFSn+dlwJ/MNWHr8UOSTpmkVa2HYJjXGDUyH3SQcs8yLt6wVo1qhkNxTCSdKjYzEccw8NQdfULGaruFxr2DLZzDqx12ny3HmIKzlF8msWWfuYPs1qZG6SWnzaRZCYX0DeKjeQLSB0JukRcv2h7HjqEIWZxglSJUANfoehV3Hfxan/kf/cVTdoei1amGc6o9+U5e8eZ4+I258EDoTZ7SINwdea6vYWLM1mmwdSc525uendro059SszBYVpjPb5qvR2w/vRTYQKZe1ptJc0uuCTo2STAhXlVQpBha+RNnUbPk0abSIlrBHCQBCzcW2m94c2YLWSIiHZQHeUhLj9q5PCyQ+SNJiLE9Vm0sQ8kC3kCD6FYdJFx1OS5Ovr5wk4xi7pGrSgQNJMW5rB2/m+8vDhEQAL+yAMuvEX810WzsBWqPGUXF76N4F3ASqfbygKL6Fy6KLWZtM5YTMn+oLaeaOpROddPLQ59kc9TB6c9I89y7TbG2cPVouyvEio0Nc5pgSYLgNSMua68+fWLiJ0BmBop3Yy0AW+vPiOSiUIyab7BjyyhGUVwzR7XiiMRlovLgGAOJ4y4gC2mUhYiVziTJMk6pFTdsyWyBCEJDBCVCAA6LvcVsOlXY0s8JDQA4DcBoRvC4OF6v2YoMxeEbA9psGGg+NvhcYcIJzCbhZ5ZaVZ29HFSbTOB2ls12Ge0PMgiQYgW1Hlb1Xp+Bc/ucFjHgitTodyzK5ozMnLmfnEG8tDd9+S08f2Sw1TCFlTMDl9wCZF2hrQLutoF5ue39UYdtBuHbmptNNlR7iSGhxLc1GI7xsm8xyO8xNPcfUfQ9jLxlFtGtUogginUewdGuI+iVjwdQCN4NwscV3akyTckkkknUk71Yw2JF80RHqplifIQzR4ZW6JU1OC6TjHBOCYE8IGaGB2pUp2nM21ibiNIK6jZW2g45mPIdviz435m6PH+SuICewkGQYI3jVVfZlLY77E1g90hjW2vlkNJn2g33egskYCdFzWA20R4aokfmGvmN66PB4sEZmOBHI6fUFNV2NE7JJQmOeJSJlnlqEqRYHGCVCVADXaHousxUsc4OGh063HzC5N2i7ftGAWUakXLIPOGtI+ZSbVqLXJpCMqcovdGPicSDTcGyDF40IsCJ81FsBrPvDDUALQ5sg6eIhonoTPkoSPC7p/7NUeHNiNMwgdQQf3VuNqiIyqWo7TF0MPVcWYgFlRpguBgui0mZBBtqPNPoYLA4cZ34gvG5oIvHJpJPlC5urtbEFw8QLratDiToLEEHpC6HEPqVqXc1WsbiWtFVvdtDJDblj2i2fKMwjpvXJHFljs3t6O+fUYJO4xWr3+hNS2u51QMYCymXtBFgQHHKNNLkc+ay8ZjS+pXe6o0jvC0UXNLmva05egED16qPA4c942o9w8Lg4+RBsqW0fDiKo/7j/wDc6VvGMI5KXg58ksk8Ny4bMjaWHFOq5rfZsW/ocA5nnlcPOVWWptttqc65XD+mczZHV7vRWOzPZTGbQflw9Pwgw6o61NvnvPIfBW1TOW0lbMNdr2Q+zbG46KlQGhR/M8eNw/lZ9THQrusJ2J2fsei3EYhv3iuTDc3hbmAk5W6NA43O6brrOzfaU1srHUWAF+QZDGURI8J13aHidyVOrROpvjY4P7SeyGC2dskDD0hn76mDVdDqh1nxbuggX0XkC+gftpw1SrgWUqTC9zsRThouT4XH05rzCj2No0mj77iSyoRIZSGctG7MYv8ADqU4pyQ47bM45C0dq7LNBxyu7xm54Bb/AKmn2T6jms9FGhYwGFdWqspN1e4DoN58hJ8l7Zs+h3OHIoCCxoFMa2br1JuvPexOyHtecRVaW2hgNic3tOjda3mV3wxmUQNfkFksfyzt8L8f0O7HP4If7S/BfmXNgYlz64qYioSQCGTZocbWAsLTdQ/aL2DoVI2gykM7ROIpguaKo/6hyEHM3eQRIudL6XZnEMFSHMbJu10CQRrdab8VnqOJvci/DSPRYTl8DTZvOH9xxtR4ljOy7Kl8I7K7/o1HDxcqdUxf+V/H2iuZr0X03Fj2lrmmC1wIIPMHRel7bwIoYh9NvszLf0uEgeWnks7a2FbiaeWpdzR4H+83kT7zeR8oXeqkrR50sTicGnBNIIJB1Fj5JwSIFCcE0JwQMcE4JgTgmMepsPiH0zmY4g/PqN6hSoKRfqbWrOM5gOQAj4oVFCY7MZCAhZGAqEITAQhdhjsfSr4djZh1MgERNvZJHHpyXIJ/eO4nyt8lLim0/BcZuKaXdUaeNqimzuxlLjcuAeCBaxzRa3DeswklIhW3Zmkarmk02VGm8C+8OBj5iVNhcW8PFTMe8Ds2bU5pmTxuoNl1vw3s841kGM0DkWtKfs5ryT3YvxsCJMWJ39E07HJad0aON2mW1nN7toBALBMRIktPMSR/SqVA1alUugtJkkmQGiNSdR+8cVQrkFxN+AnW1vnKdSruFs0DfrB4SBqohjjB2jTJmnkjpkw2uCKrmH3LTx3z8V7Z9h9UHZrm/lrP+IB+q8KqmSTxXtP2I1IwVUf94/Fo/ZU1dnPk+ybvavDYipi2OcD3dIEsOWWw5rRM7zmBPLwrR7N4QMcaz2gADw7ogRm9LTzTdudscNhAWF+d4H8NpEj9btGDrfkVwWMr7T2qZfUFDCn2YDgHji1vtVt17M5hCtxolRbeo3O2/bPDktZRIqvY8uJB8A8Lmxm3+1u4Lg8dtB+IfneGzEDKItc+eq7PZ2yMNhRDKIeSCHOqwXvabOAOlMXjw3Fpc7Rc1t3Ypw7u8phzqDz4HEeyde7fweINt8SLLWDSVG0YrkpUKRfaJ48PNSYPZGFov7zKC4XAuQ39Ldx/wQjD4hwhrRr6k8eanLHtPjaQdYcCDffBWU4uWz4O3HpW/LKO0e2FJvhpMc4yM2YFkAG9jfN1AW5g8SHw4GQ6CDxB0WRtHY9LEDxeF8WeNeh/MP8ALKLCUqmGptZUIMSARoRJLfhFuSrBFQWlGXUvJKWtnc7Pc4OB4ELaNTxSN91weC208awfgfVdLsfG96HcQRbhI/4XJ/UYf49Xg7ugyXPT5M3tgPx2O40x8HOWFiHQx3T5kD6rb7Vn8Vn6P/YrnNp1MtInmPhf6LTpneGJn1Cqcji6zpc48XE+pKaEgSrc84cnBNCUJFDk4JoTgmMcnBNShBSFQhCBmOhCVQYAhCEACVIlCAFCUJE6nqOo+aYEzPDpqN+9WHY18RmO/wCO88SnVcLq4G2sR4tdI9b6WVZtJzvdMb+QS1IbhIkpUczZm4PrPyP7ow+Eq1H93Tpuc7gB89wHM2Wv2d2KMQ8uc/K1sSBGZ3rYC3xXfYLC06TctNoaOA/y61jik/uMpZopV3OFGwW4dzRiG5nOEhod+HwMkQSRbfF960MftmtggcLhH9zTqhj3EAlwJBBDXXIbbdfUSrvarCB1am4uc05CAWxuM6EEbxuVgdmKeJw7cry2o0uOZ3izzlHi0IENERESbKHikp7cGyyQliSa3NfZHZXCYXLVcRiXvaHtqvymkQ6HB1KmZDt3jfmvuatl9V1R8NzPe7hLnH14c7DeYIWH2VoYvDNOFxdJ1ShJdTqUXMc+i/UhofEsdvBEA34ldfs6g6uwhpGHoSQadF+au8ixFauLtP8AKwyPzRZD+lbhDHrexVpUWtf3b2mvWEfgUTanpBrVjanbdMkEjxhag2N3mU41zXhplmHpiKDDe5BvVdc3dA4NCv4SgykwU6LAxo3NEa6nrzRiK9OkAajoJ9kCS5x4NaLuPRZObZ1LEol2iWkAQBFgBuA+QWftXYuFxbvxRmewAS1xDgDJAdG7WJ5podWrazRZ+VpHeu/U4WZ0bJ/mCuUxSoM91jRfqTqeJPxKXBSKGH7HYBpnui79T3n4AwrWOpYDD0nMqUqQY4QWZW+Icx9SuP7QfadQY/uMLNR+hc0Ahsa3Nvn5Lkm4ivjcZTDqpGZtUgm4GXLfLOu6Sq07apOkiG23pW7IsXhB94LMO0kOLixupa3MYBPIRcrrNi7NNBpzOlzomNBEwBx11VepTdhz3eHYyY8dSoSST0Fz6gCVibVxeMa/LUrWIkBgyNPpfyJWOeObqI/Sqh75Z0dPLD07qTufrhFntZtCm0ucbim1oMcS68f6lyu18YypQmm4EX01HgdqNRqtB2DbWYWPmDwMGQZBXOY/ZL8NmkhzS05SNehbu+S6IRUIqC7HPllJty7MykqQJVRyihOCaE4JlIcEoSBKEDHBKE1OCBoVKkQgoyEqEKDAEqEIoYJYTSUkoESMbJhW6dIDcqmHMOCugqomc2xXYl7RlOm4/sdx6Kdm0R3ZDhLoI5Hmog7dqOB0TO5Z/N0kfNKeJT5NMfUOF0TYPHvpnOwwRcjcRMfVdXhu0L6gDB4XcYmf2/zRchU9kgCBbTrvO9TsJFwtk3wc7rmjrq2y8XXLZzSDYvPhE6/TRdXsTBOo08r3BziZJFhusFxew+0r6UMqeJvxC7zAYunVbmpuBHxHVWkGtvYvU08UXB/e0nZKm8xLXgaCo33hwNiNxF5bTVliiRtB09ib7/iXjK2m2kfeeSKn/wCYET1dH6Sp8JhWsJfcuPtPeZeerjoOQgDcFnbT2vQwlPvK9QMG6dXHg0auPILzHtN9otfEE08NNKnPtH23fRvxPRYOKXB1fJe8mejdpu3WEwIy5s9Tcxtz58BzMDqvO8f2qxGPGZ7srTIyA2HInfu4DkuFcSSSSSSZJNySdSTvK1NhVLuZxgjysfp6KscUnuRPK2qWyL+ApAVKvHOB5ZQfmStvYlUtxVFw1FOp8cqwGPLMU4HSoAR1A/4cFbxYeKfes93K13IPdr6tA81pSdpkptK1ydnidpU6cl7pdrA18+CoY/O9or1w2jSE5XVDlDp/KDd56BcXS7Q1aZmk1rXDRxAcRzANgesrPxeNq1nmpWqOqPIgueS4xMwJ0HIWTlO9kZxWl2ufJvbQ7SMHhw7Sf53jL/pZr6+iwziH1M7nuJOU69dw3KtCkpe9+krNsttvkjCVIE5AgCcE0JwQMUJQkCUIGOCVNCcgYqEiEx2ZaEIUGYqQlCQoYAhCEhApqVchQoQI0GPBTws5riFapV+KtSM5Q8E9X2T/AJvUrVDUcMuvD5hSGq0a+itE02iw1WMFtx+FdNN0neBp/nJY9bEudYWHD91Chz8FrGu565sXtpharR3jgx2+SA0czJsPhzVXtF9o9GlNPAt71+hrPBFNvHIzV55mBvuvLv8APiEilystRLO0MfWxFQ1a9R1R53uPwA0aOQsq8ISqShWMJIABJNgBqSul2PssU35arHh5Eh4g0wBctzDQ9fLnibKxncVRUyh0SI013g7itDavaB9ZpYxuRp1vLnDhyCuOlbsiV3SL9emyvUbXh4w9EwXsaXFxkE5QPcEXd1hS18c2lQqM1NQBreGrpd5W+CbsztcyjQbT7klzGgC4DDG8nUTroudqYh1R+ZxuZPIW0A3BU2uUON00ysE4BKGp4asyhoansAuOR+RShieGoAgyJIVktTSxFDIEqeWJsIARKkSoGKnJqVMBUJEIGZqEIUGYFIlQkAiEqEACEoanAIoKEa1PQhMYqVNSpgKhCUIAB9PqEJR9PqEIEASoSoASEsJQE8NTAYApGC48/iE5rU8NQMaGp4anAJYTAQBOhEJYQMSEQnQiExjCEwsU0JIQIrlibCswmliVDK6VPLE2EACRLCRAzOlCEKDMEIQkgFhKAhCoBUqEIGCVCEDFSoQgTBKhCYhR9PqEqEIAUJwCEIAkaE4BCEAPATgEITGOShCExipQhCBioQhAhEQhCYwhJCEIAQhNLUISAbkQhCBn/9k=", use_column_width=True)
160
+
161
+ st.subheader("πŸ“ Document Upload")
162
+ uploaded_files = st.file_uploader("Upload documents", type=["txt", "md", "docx", "pdf"], accept_multiple_files=True)
163
+
164
+ st.subheader("🎭 Chatbot Personality")
165
+ personality = st.selectbox("Choose chatbot personality", ["default", "sherlock", "yoda", "shakespeare", "custom"])
166
+
167
+ if personality == "custom":
168
+ st.session_state.custom_personality = st.text_area("Enter custom personality details:", value=st.session_state.custom_personality)
169
+
170
+ st.subheader("🌐 Web Search")
171
+ web_search = st.checkbox("Enable web search")
172
+
173
+ st.subheader("πŸ’¬ Chat Mode")
174
+ chat_mode = st.radio("Select chat mode", ["General Chat", "Document Chat"])
175
+
176
+ if st.button("Clear Chat History"):
177
+ st.session_state.messages = []
178
+ st.rerun()
179
+
180
+ # Main content
181
+ if uploaded_files:
182
+ qa_chain = process_documents(uploaded_files)
183
+ if qa_chain:
184
+ st.success("Documents processed successfully!")
185
+ else:
186
+ st.warning("No valid documents were uploaded or processed.")
187
+ else:
188
+ qa_chain = None
189
+
190
+ # Chat interface
191
+ for message in st.session_state.messages:
192
+ with st.chat_message(message["role"]):
193
+ st.markdown(message["content"])
194
+
195
+ if prompt := st.chat_input("What is your question?"):
196
+ st.chat_message("user").markdown(prompt)
197
+ st.session_state.messages.append({"role": "user", "content": prompt})
198
+
199
+ if chat_mode == "General Chat" or not qa_chain:
200
+ response, _, web_results = get_chatbot_response(prompt, personality=personality, web_search=web_search)
201
+ else:
202
+ response, source_docs, web_results = get_chatbot_response(prompt, qa_chain, personality, web_search)
203
+
204
+ with st.chat_message("assistant"):
205
+ st.markdown(response)
206
+ if chat_mode == "Document Chat" and qa_chain and source_docs:
207
+ with st.expander("Source Documents"):
208
+ for doc in source_docs:
209
+ st.markdown(f"**Source:** {doc.metadata.get('source', 'Unknown')}")
210
+ st.markdown(doc.page_content[:200] + "...")
211
+
212
+ if web_search and web_results:
213
+ with st.expander("Web Search Results"):
214
+ for result in web_results:
215
+ st.markdown(f"**[{result['title']}]({result['link']})**")
216
+ st.markdown(result['snippet'])
217
+
218
+ st.session_state.messages.append({"role": "assistant", "content": response})
219
+
220
+ # Chat history and download
221
+ with st.sidebar:
222
+ st.subheader("πŸ“œ Chat History")
223
+ history_expander = st.expander("View Chat History")
224
+ with history_expander:
225
+ for message in st.session_state.messages:
226
+ st.text(f"{message['role']}: {message['content'][:50]}...")
227
+
228
+ if st.session_state.messages:
229
+ chat_history_df = pd.DataFrame(st.session_state.messages)
230
+ csv = chat_history_df.to_csv(index=False)
231
+ st.download_button(
232
+ label="πŸ“₯ Download Chat History",
233
+ data=csv,
234
+ file_name="chat_history.csv",
235
+ mime="text/csv",
236
+ )
237
+
238
+ st.sidebar.markdown("---")
239
+ st.sidebar.markdown("Powered by Falcon-180B and Streamlit")
240
+
241
+ if __name__ == "__main__":
242
+ main()
pages/exam_prepration.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ import time
4
+ from typing import List, Dict
5
+ from langchain_community.chat_models import ChatOpenAI
6
+ from langchain.schema import HumanMessage, SystemMessage
7
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredWordDocumentLoader
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain_huggingface import HuggingFaceEmbeddings
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain.chains import RetrievalQA
12
+ from langchain_community.graphs import NetworkxEntityGraph
13
+ from googleapiclient.discovery import build
14
+ from googleapiclient.errors import HttpError
15
+ import os
16
+ from dotenv import load_dotenv
17
+ import requests
18
+ from bs4 import BeautifulSoup
19
+
20
+ # Load environment variables
21
+ load_dotenv()
22
+
23
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
24
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
25
+ GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
26
+ GOOGLE_CSE_ID = os.getenv('GOOGLE_CSE_ID')
27
+ YOUTUBE_API_KEY = os.getenv('YOUTUBE_API_KEY')
28
+
29
+ # Initialize the Falcon model
30
+ chat = ChatOpenAI(
31
+ model="tiiuae/falcon-180B-chat",
32
+ api_key=AI71_API_KEY,
33
+ base_url=AI71_BASE_URL,
34
+ streaming=True,
35
+ )
36
+
37
+ # Initialize embeddings
38
+ embeddings = HuggingFaceEmbeddings()
39
+
40
+ FIELDS = [
41
+ "Mathematics", "Physics", "Chemistry", "Biology", "Computer Science",
42
+ "History", "Geography", "Literature", "Philosophy", "Psychology",
43
+ "Sociology", "Economics", "Business", "Finance", "Accounting",
44
+ "Law", "Political Science", "Environmental Science", "Astronomy", "Geology",
45
+ "Linguistics", "Anthropology", "Art History", "Music Theory", "Film Studies",
46
+ "Medical Science", "Nursing", "Public Health", "Nutrition", "Physical Education",
47
+ "Engineering", "Architecture", "Urban Planning", "Agriculture", "Veterinary Science",
48
+ "Oceanography", "Meteorology", "Statistics", "Data Science", "Artificial Intelligence",
49
+ "Cybersecurity", "Renewable Energy", "Quantum Physics", "Neuroscience", "Genetics",
50
+ "Biotechnology", "Nanotechnology", "Robotics", "Space Exploration", "Cryptography"
51
+ ]
52
+
53
+ # List of educational resources
54
+ EDUCATIONAL_RESOURCES = [
55
+ "https://www.coursera.org",
56
+ "https://www.khanacademy.org",
57
+ "https://scholar.google.com",
58
+ "https://www.edx.org",
59
+ "https://www.udacity.com",
60
+ "https://www.udemy.com",
61
+ "https://www.futurelearn.com",
62
+ "https://www.lynda.com",
63
+ "https://www.skillshare.com",
64
+ "https://www.codecademy.com",
65
+ "https://www.brilliant.org",
66
+ "https://www.duolingo.com",
67
+ "https://www.ted.com/talks",
68
+ "https://ocw.mit.edu",
69
+ "https://www.open.edu/openlearn",
70
+ "https://www.coursebuffet.com",
71
+ "https://www.academicearth.org",
72
+ "https://www.edutopia.org",
73
+ "https://www.saylor.org",
74
+ "https://www.openculture.com",
75
+ "https://www.gutenberg.org",
76
+ "https://www.archive.org",
77
+ "https://www.wolframalpha.com",
78
+ "https://www.quizlet.com",
79
+ "https://www.mathway.com",
80
+ "https://www.symbolab.com",
81
+ "https://www.lessonplanet.com",
82
+ "https://www.teacherspayteachers.com",
83
+ "https://www.brainpop.com",
84
+ "https://www.ck12.org"
85
+ ]
86
+
87
+ def search_web(query: str, num_results: int = 30, max_retries: int = 3) -> List[Dict[str, str]]:
88
+ user_agents = [
89
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
90
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15',
91
+ 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'
92
+ ]
93
+
94
+ for attempt in range(max_retries):
95
+ try:
96
+ headers = {'User-Agent': random.choice(user_agents)}
97
+ service = build("customsearch", "v1", developerKey=GOOGLE_API_KEY)
98
+ res = service.cse().list(q=query, cx=GOOGLE_CSE_ID, num=num_results).execute()
99
+
100
+ results = []
101
+ if "items" in res:
102
+ for item in res["items"]:
103
+ result = {
104
+ "title": item["title"],
105
+ "link": item["link"],
106
+ "snippet": item.get("snippet", "")
107
+ }
108
+ results.append(result)
109
+
110
+ return results
111
+ except Exception as e:
112
+ print(f"An error occurred: {e}. Attempt {attempt + 1} of {max_retries}")
113
+ time.sleep(2 ** attempt)
114
+
115
+ print("Max retries reached. No results found.")
116
+ return []
117
+
118
+ def scrape_webpage(url: str) -> str:
119
+ try:
120
+ response = requests.get(url, timeout=10)
121
+ soup = BeautifulSoup(response.content, 'html.parser')
122
+ return soup.get_text()
123
+ except Exception as e:
124
+ print(f"Error scraping {url}: {e}")
125
+ return ""
126
+
127
+ def process_documents(uploaded_files):
128
+ documents = []
129
+ for uploaded_file in uploaded_files:
130
+ file_extension = os.path.splitext(uploaded_file.name)[1].lower()
131
+
132
+ if file_extension == '.pdf':
133
+ loader = PyPDFLoader(uploaded_file)
134
+ elif file_extension in ['.txt', '.md']:
135
+ loader = TextLoader(uploaded_file)
136
+ elif file_extension in ['.doc', '.docx']:
137
+ loader = UnstructuredWordDocumentLoader(uploaded_file)
138
+ else:
139
+ st.warning(f"Unsupported file type: {file_extension}")
140
+ continue
141
+
142
+ documents.extend(loader.load())
143
+
144
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
145
+ texts = text_splitter.split_documents(documents)
146
+
147
+ vectorstore = FAISS.from_documents(texts, embeddings)
148
+ graph = NetworkxEntityGraph()
149
+ graph.add_documents(texts)
150
+
151
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
152
+
153
+ qa_chain = RetrievalQA.from_chain_type(
154
+ llm=chat,
155
+ chain_type="stuff",
156
+ retriever=retriever,
157
+ return_source_documents=True
158
+ )
159
+
160
+ return qa_chain, graph
161
+
162
+ def generate_questions(topic, difficulty, num_questions, include_answers, qa_chain=None, graph=None):
163
+ system_prompt = f"""You are an expert exam question generator. Generate {num_questions} {difficulty}-level questions about {topic}.
164
+ {"Each question should be followed by its correct answer." if include_answers else "Do not include answers."}
165
+ Format your response as follows:
166
+
167
+ Q1. [Question]
168
+ {"A1. [Answer]" if include_answers else ""}
169
+
170
+ Q2. [Question]
171
+ {"A2. [Answer]" if include_answers else ""}
172
+
173
+ ... and so on.
174
+ """
175
+
176
+ if qa_chain and graph:
177
+ context = graph.get_relevant_documents(topic)
178
+ context_text = "\n".join([doc.page_content for doc in context])
179
+
180
+ result = qa_chain({"query": system_prompt, "context": context_text})
181
+ questions = result['result']
182
+ else:
183
+ messages = [
184
+ SystemMessage(content=system_prompt),
185
+ HumanMessage(content=f"Please generate {num_questions} {difficulty} questions about {topic}.")
186
+ ]
187
+ questions = chat(messages).content
188
+
189
+ return questions
190
+
191
+ def gather_resources(field: str) -> List[Dict[str, str]]:
192
+ resources = []
193
+ for resource_url in EDUCATIONAL_RESOURCES:
194
+ search_results = search_web(f"site:{resource_url} {field}", num_results=1)
195
+ if search_results:
196
+ result = search_results[0]
197
+ content = scrape_webpage(result['link'])
198
+ resources.append({
199
+ "title": result['title'],
200
+ "link": result['link'],
201
+ "content": content[:500] + "..." if len(content) > 500 else content
202
+ })
203
+
204
+ # YouTube search
205
+ youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
206
+ youtube_results = youtube.search().list(q=field, type='video', part='id,snippet', maxResults=5).execute()
207
+ for item in youtube_results.get('items', []):
208
+ video_id = item['id']['videoId']
209
+ resources.append({
210
+ "title": item['snippet']['title'],
211
+ "link": f"https://www.youtube.com/watch?v={video_id}",
212
+ "content": item['snippet']['description'],
213
+ "thumbnail": item['snippet']['thumbnails']['medium']['url']
214
+ })
215
+
216
+ return resources
217
+
218
+ def main():
219
+ st.set_page_config(page_title="Advanced Exam Preparation System", layout="wide")
220
+
221
+ st.sidebar.title("Advanced Exam Prep")
222
+ st.sidebar.markdown("""
223
+ Welcome to our advanced exam preparation system!
224
+ Here you can generate practice questions, explore educational resources,
225
+ and interact with an AI tutor to enhance your learning experience.
226
+ """)
227
+
228
+ # Main area tabs
229
+ tab1, tab2, tab3 = st.tabs(["Question Generator", "Resource Explorer", "Academic Tutor"])
230
+
231
+ with tab1:
232
+ st.header("Question Generator")
233
+ col1, col2 = st.columns(2)
234
+ with col1:
235
+ topic = st.text_input("Enter the exam topic:")
236
+ exam_type = st.selectbox("Select exam type:", ["General", "STEM", "Humanities", "Business", "Custom"])
237
+ with col2:
238
+ difficulty = st.select_slider(
239
+ "Select difficulty level:",
240
+ options=["Super Easy", "Easy", "Beginner", "Intermediate", "Higher Intermediate", "Master", "Advanced"]
241
+ )
242
+ num_questions = st.number_input("Number of questions:", min_value=1, max_value=50, value=5)
243
+ include_answers = st.checkbox("Include answers", value=True)
244
+
245
+ if st.button("Generate Questions", key="generate_questions"):
246
+ if topic:
247
+ with st.spinner("Generating questions..."):
248
+ questions = generate_questions(topic, difficulty, num_questions, include_answers)
249
+ st.success("Questions generated successfully!")
250
+ st.markdown(questions)
251
+ else:
252
+ st.warning("Please enter a topic.")
253
+
254
+ with tab2:
255
+ st.header("Resource Explorer")
256
+ selected_field = st.selectbox("Select a field to explore:", FIELDS)
257
+ if st.button("Explore Resources", key="explore_resources"):
258
+ with st.spinner("Gathering resources..."):
259
+ resources = gather_resources(selected_field)
260
+ st.success(f"Found {len(resources)} resources!")
261
+
262
+ for i, resource in enumerate(resources):
263
+ col1, col2 = st.columns([1, 3])
264
+ with col1:
265
+ if "thumbnail" in resource:
266
+ st.image(resource["thumbnail"], use_column_width=True)
267
+ else:
268
+ st.image("https://via.placeholder.com/150", use_column_width=True)
269
+ with col2:
270
+ st.subheader(f"[{resource['title']}]({resource['link']})")
271
+ st.write(resource['content'])
272
+ st.markdown("---")
273
+
274
+ with tab3:
275
+ st.header("Academic Tutor")
276
+ uploaded_files = st.file_uploader("Upload documents (PDF, TXT, MD, DOC, DOCX)", type=["pdf", "txt", "md", "doc", "docx"], accept_multiple_files=True)
277
+
278
+ if uploaded_files:
279
+ qa_chain, graph = process_documents(uploaded_files)
280
+ st.success("Documents processed successfully!")
281
+ else:
282
+ qa_chain, graph = None, None
283
+
284
+ st.subheader("Chat with AI Tutor")
285
+ if 'chat_history' not in st.session_state:
286
+ st.session_state.chat_history = []
287
+
288
+ chat_container = st.container()
289
+ with chat_container:
290
+ for i, (role, message) in enumerate(st.session_state.chat_history):
291
+ with st.chat_message(role):
292
+ st.write(message)
293
+
294
+ user_input = st.chat_input("Ask a question or type 'search: your query' to perform a web search:")
295
+ if user_input:
296
+ st.session_state.chat_history.append(("user", user_input))
297
+ with st.chat_message("user"):
298
+ st.write(user_input)
299
+
300
+ with st.chat_message("assistant"):
301
+ if user_input.lower().startswith("search:"):
302
+ search_query = user_input[7:].strip()
303
+ search_results = search_web(search_query, num_results=3)
304
+ response = f"Here are some search results for '{search_query}':\n\n"
305
+ for result in search_results:
306
+ response += f"- [{result['title']}]({result['link']})\n {result['snippet']}\n\n"
307
+ else:
308
+ response = chat([HumanMessage(content=user_input)]).content
309
+ st.write(response)
310
+ st.session_state.chat_history.append(("assistant", response))
311
+
312
+ # Scroll to bottom of chat
313
+ js = f"""
314
+ <script>
315
+ function scroll_to_bottom() {{
316
+ var chatElement = window.parent.document.querySelector('.stChatFloatingInputContainer');
317
+ chatElement.scrollIntoView({{behavior: 'smooth'}});
318
+ }}
319
+ scroll_to_bottom();
320
+ </script>
321
+ """
322
+ st.components.v1.html(js)
323
+
324
+ if __name__ == "__main__":
325
+ main()
pages/interview_prepration.py ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ import os
6
+ from dotenv import load_dotenv
7
+ import base64
8
+ import cv2
9
+ import numpy as np
10
+ from PIL import Image
11
+ import io
12
+ import time
13
+ import PyPDF2
14
+ import docx
15
+ import markdown
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+
20
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
21
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
22
+
23
+ # Initialize the Falcon model
24
+ chat = ChatOpenAI(
25
+ model="tiiuae/falcon-180B-chat",
26
+ api_key=AI71_API_KEY,
27
+ base_url=AI71_BASE_URL,
28
+ streaming=True,
29
+ timeout=60,
30
+ )
31
+
32
+ # Expanded list of roles
33
+ roles = [
34
+ "Software Engineer", "Data Scientist", "Product Manager", "UX Designer", "Marketing Manager",
35
+ "Sales Representative", "Human Resources Manager", "Financial Analyst", "Project Manager",
36
+ "Business Analyst", "Content Writer", "Graphic Designer", "Customer Service Representative",
37
+ "Operations Manager", "Research Scientist", "Legal Counsel", "Network Administrator",
38
+ "Quality Assurance Tester", "Supply Chain Manager", "Public Relations Specialist"
39
+ ]
40
+
41
+ def generate_interview_questions(role):
42
+ system_message = f"""You are an experienced interviewer for the role of {role}.
43
+ Generate 5 challenging and relevant interview questions for this position.
44
+ The questions should cover a range of skills and experiences required for the role."""
45
+
46
+ messages = [
47
+ SystemMessage(content=system_message),
48
+ HumanMessage(content="Please provide 5 interview questions for this role.")
49
+ ]
50
+
51
+ response = chat.invoke(messages).content
52
+ questions = response.split('\n')
53
+ return [q.strip() for q in questions if q.strip()]
54
+
55
+ def get_interview_response(role, question, answer):
56
+ system_message = f"""You are an experienced interviewer for the role of {role}.
57
+ Your task is to evaluate the candidate's response to the following question: '{question}'
58
+
59
+ The candidate's answer was: '{answer}'
60
+
61
+ Please provide:
62
+ 1. A brief evaluation of the answer (2-3 sentences)
63
+ 2. Specific feedback on how to improve (if needed) or praise for a good answer
64
+ 3. A follow-up question based on their response
65
+ 4. A score out of 10 for their answer
66
+
67
+ Format your response as follows:
68
+ Evaluation: [Your evaluation here]
69
+ Feedback: [Your specific feedback or praise here]
70
+ Follow-up: [Your follow-up question here]
71
+ Score: [Score out of 10]
72
+ """
73
+
74
+ messages = [
75
+ SystemMessage(content=system_message),
76
+ HumanMessage(content="Please provide your evaluation, feedback, follow-up question, and score.")
77
+ ]
78
+
79
+ response = chat.invoke(messages).content
80
+ return response
81
+
82
+ def analyze_appearance(image):
83
+ # Convert PIL Image to OpenCV format
84
+ cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
85
+
86
+ # Load pre-trained face detection model
87
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
88
+
89
+ # Convert to grayscale for face detection
90
+ gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
91
+
92
+ # Detect faces
93
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
94
+
95
+ analysis = []
96
+
97
+ if len(faces) == 0:
98
+ analysis.append("No face detected in the image. Please ensure your face is clearly visible.")
99
+ else:
100
+ analysis.append(f"Detected {len(faces)} face(s) in the image.")
101
+
102
+ # Analyze facial positioning
103
+ for (x, y, w, h) in faces:
104
+ face_center = (x + w//2, y + h//2)
105
+ image_center = (cv_image.shape[1]//2, cv_image.shape[0]//2)
106
+
107
+ if abs(face_center[0] - image_center[0]) > cv_image.shape[1]//8:
108
+ analysis.append("Your face is not centered horizontally. Try to position yourself in the middle of the frame.")
109
+
110
+ if abs(face_center[1] - image_center[1]) > cv_image.shape[0]//8:
111
+ analysis.append("Your face is not centered vertically. Adjust your camera or seating position.")
112
+
113
+ if w * h < (cv_image.shape[0] * cv_image.shape[1]) // 16:
114
+ analysis.append("Your face appears too small in the frame. Consider moving closer to the camera.")
115
+ elif w * h > (cv_image.shape[0] * cv_image.shape[1]) // 4:
116
+ analysis.append("Your face appears too large in the frame. Consider moving slightly away from the camera.")
117
+
118
+ # Analyze image brightness
119
+ brightness = np.mean(gray)
120
+ if brightness < 100:
121
+ analysis.append("The image appears too dark. Consider improving your lighting for better visibility.")
122
+ elif brightness > 200:
123
+ analysis.append("The image appears too bright. You might want to reduce harsh lighting or adjust your camera settings.")
124
+
125
+ # Analyze image contrast
126
+ contrast = np.std(gray)
127
+ if contrast < 20:
128
+ analysis.append("The image lacks contrast. This might make it difficult to see details. Consider adjusting your lighting or camera settings.")
129
+
130
+ return "\n".join(analysis)
131
+
132
+ def extract_text_from_file(file):
133
+ file_extension = file.name.split('.')[-1].lower()
134
+
135
+ if file_extension == 'pdf':
136
+ pdf_reader = PyPDF2.PdfReader(file)
137
+ text = ""
138
+ for page in pdf_reader.pages:
139
+ text += page.extract_text()
140
+ elif file_extension == 'docx':
141
+ doc = docx.Document(file)
142
+ text = "\n".join([paragraph.text for paragraph in doc.paragraphs])
143
+ elif file_extension == 'txt':
144
+ text = file.read().decode()
145
+ elif file_extension == 'md':
146
+ md_text = file.read().decode()
147
+ text = markdown.markdown(md_text)
148
+ else:
149
+ raise ValueError(f"Unsupported file format: {file_extension}")
150
+
151
+ return text
152
+
153
+ def analyze_cv(cv_text):
154
+ system_message = """You are an expert CV reviewer with extensive experience in various industries.
155
+ Analyze the given CV and provide:
156
+ 1. An overall assessment of the CV's strengths
157
+ 2. Areas that need improvement
158
+ 3. Specific suggestions for enhancing the CV
159
+ 4. Tips for tailoring the CV to specific job applications
160
+
161
+ Be thorough, constructive, and provide actionable advice."""
162
+
163
+ messages = [
164
+ SystemMessage(content=system_message),
165
+ HumanMessage(content=f"Here's the text of the CV to review:\n\n{cv_text}\n\nPlease provide your analysis and suggestions.")
166
+ ]
167
+
168
+ response = chat.invoke(messages).content
169
+ return response
170
+
171
+ def resize_image(image, max_size=800):
172
+ """Resize image while maintaining aspect ratio"""
173
+ ratio = max_size / max(image.size)
174
+ new_size = tuple([int(x*ratio) for x in image.size])
175
+ return image.resize(new_size, Image.LANCZOS)
176
+
177
+ def get_mock_interview_tips():
178
+ tips = [
179
+ "Research the company and role thoroughly before the interview.",
180
+ "Practice common interview questions with a friend or family member.",
181
+ "Prepare specific examples to illustrate your skills and experiences.",
182
+ "Dress professionally and ensure your background is tidy for video interviews.",
183
+ "Have questions prepared to ask the interviewer about the role and company.",
184
+ "Use the STAR method (Situation, Task, Action, Result) to structure your answers.",
185
+ "Be aware of your body language and maintain good eye contact.",
186
+ "Listen carefully to each question and take a moment to gather your thoughts before answering.",
187
+ "Be honest about your experiences and skills, but focus on your strengths.",
188
+ "Follow up with a thank-you note or email after the interview.",
189
+ ]
190
+ return tips
191
+
192
+ def get_interview_resources():
193
+ resources = [
194
+ {"name": "Glassdoor Interview Questions & Reviews", "url": "https://www.glassdoor.com/Interview/index.htm"},
195
+ {"name": "LinkedIn Interview Preparation", "url": "https://www.linkedin.com/interview-prep/"},
196
+ {"name": "Indeed Career Guide", "url": "https://www.indeed.com/career-advice"},
197
+ {"name": "Coursera - How to Succeed in an Interview", "url": "https://www.coursera.org/learn/interview-preparation"},
198
+ {"name": "Harvard Business Review - Interview Tips", "url": "https://hbr.org/topic/interviewing"},
199
+ ]
200
+ return resources
201
+
202
+ def main():
203
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Interview Preparation", page_icon="πŸŽ™οΈ", layout="wide")
204
+
205
+ st.title("πŸŽ™οΈ S.H.E.R.L.O.C.K. Interview Preparation")
206
+ st.markdown("### Streamlined Help for Enhancing Responsive Learning and Optimizing Career Knowledge")
207
+
208
+ # Sidebar for user details and interview settings
209
+ with st.sidebar:
210
+ st.header("Interview Settings")
211
+ name = st.text_input("Your Name")
212
+ role = st.selectbox("Interview Role", roles)
213
+ experience = st.slider("Years of Experience", 0, 20, 5)
214
+
215
+ st.header("Quick Tips")
216
+ if st.button("Get Mock Interview Tips"):
217
+ tips = get_mock_interview_tips()
218
+ for tip in tips:
219
+ st.info(tip)
220
+
221
+ st.header("Useful Resources")
222
+ resources = get_interview_resources()
223
+ for resource in resources:
224
+ st.markdown(f"[{resource['name']}]({resource['url']})")
225
+
226
+ # Appearance Analysis
227
+ st.header("Appearance Analysis")
228
+ uploaded_image = st.file_uploader("Upload your interview outfit image", type=["jpg", "jpeg", "png"])
229
+ if uploaded_image is not None:
230
+ try:
231
+ image = Image.open(uploaded_image)
232
+ image = resize_image(image)
233
+ st.image(image, caption="Your uploaded image", use_column_width=True)
234
+ if st.button("Analyze Appearance"):
235
+ with st.spinner("Analyzing your appearance..."):
236
+ appearance_feedback = analyze_appearance(image)
237
+ st.write(appearance_feedback)
238
+
239
+ st.write("\nGeneral tips for professional appearance in video interviews:")
240
+ tips = [
241
+ "Dress professionally from head to toe, even if only your upper body is visible.",
242
+ "Choose solid colors over busy patterns for a less distracting appearance.",
243
+ "Ensure your background is tidy and professional.",
244
+ "Position your camera at eye level for the most flattering angle.",
245
+ "Use soft, diffused lighting to avoid harsh shadows.",
246
+ "Make eye contact by looking directly into the camera when speaking.",
247
+ ]
248
+ for tip in tips:
249
+ st.write(f"- {tip}")
250
+ except Exception as e:
251
+ st.error(f"An error occurred while processing the image: {str(e)}")
252
+ st.info("Please make sure you've uploaded a valid image file.")
253
+
254
+ # CV Analysis
255
+ st.header("CV Analysis")
256
+ uploaded_cv = st.file_uploader("Upload your CV", type=["pdf", "docx", "txt", "md"])
257
+ if uploaded_cv is not None:
258
+ try:
259
+ cv_text = extract_text_from_file(uploaded_cv)
260
+ if st.button("Analyze CV"):
261
+ with st.spinner("Analyzing your CV..."):
262
+ cv_feedback = analyze_cv(cv_text)
263
+ st.write(cv_feedback)
264
+ except Exception as e:
265
+ st.error(f"An error occurred while processing the CV: {str(e)}")
266
+
267
+ # Initialize session state variables
268
+ if 'interview_started' not in st.session_state:
269
+ st.session_state.interview_started = False
270
+ if 'current_question' not in st.session_state:
271
+ st.session_state.current_question = 0
272
+ if 'questions' not in st.session_state:
273
+ st.session_state.questions = []
274
+ if 'answers' not in st.session_state:
275
+ st.session_state.answers = []
276
+ if 'feedback' not in st.session_state:
277
+ st.session_state.feedback = []
278
+ if 'scores' not in st.session_state:
279
+ st.session_state.scores = []
280
+ if 'chat_history' not in st.session_state:
281
+ st.session_state.chat_history = []
282
+
283
+ # Start Interview button
284
+ if not st.session_state.interview_started:
285
+ if st.button("Start Mock Interview"):
286
+ if name and role:
287
+ st.session_state.interview_started = True
288
+ with st.spinner("Generating interview questions..."):
289
+ st.session_state.questions = generate_interview_questions(role)
290
+ st.rerun()
291
+ else:
292
+ st.warning("Please enter your name and select a role before starting the interview.")
293
+
294
+ # Interview in progress
295
+ if st.session_state.interview_started:
296
+ st.header("Mock Interview")
297
+ if st.session_state.current_question < len(st.session_state.questions):
298
+ st.subheader(f"Question {st.session_state.current_question + 1}")
299
+ st.write(st.session_state.questions[st.session_state.current_question])
300
+
301
+ # Display chat history
302
+ for i, (q, a, f) in enumerate(st.session_state.chat_history):
303
+ with st.expander(f"Question {i+1}"):
304
+ st.write(f"Q: {q}")
305
+ st.write(f"Your Answer: {a}")
306
+ st.write(f"Feedback: {f}")
307
+
308
+ answer = st.text_area("Your Answer", key=f"answer_{st.session_state.current_question}")
309
+
310
+ col1, col2 = st.columns(2)
311
+ with col1:
312
+ if st.button("Submit Answer"):
313
+ if answer:
314
+ with st.spinner("Evaluating your answer..."):
315
+ response = get_interview_response(role, st.session_state.questions[st.session_state.current_question], answer)
316
+ st.session_state.answers.append(answer)
317
+ st.session_state.feedback.append(response)
318
+
319
+ # Extract score from response
320
+ score_lines = [line for line in response.split('\n') if line.startswith('Score:')]
321
+ if score_lines:
322
+ score_str = score_lines[0].split(':')[1].strip()
323
+ try:
324
+ score = int(score_str)
325
+ except ValueError:
326
+ # If the score is a fraction like "6/10", extract the numerator
327
+ score = int(score_str.split('/')[0])
328
+ else:
329
+ # If no score is found, use a default value
330
+ score = 5 # or any other default value you prefer
331
+ st.warning("No score was provided in the response. Using a default score of 5.")
332
+
333
+ st.session_state.scores.append(score)
334
+
335
+ # Update chat history
336
+ st.session_state.chat_history.append((
337
+ st.session_state.questions[st.session_state.current_question],
338
+ answer,
339
+ response
340
+ ))
341
+
342
+ st.session_state.current_question += 1
343
+ if st.session_state.current_question < len(st.session_state.questions):
344
+ st.rerun()
345
+ else:
346
+ st.warning("Please provide an answer before submitting.")
347
+ with col2:
348
+ if st.button("Skip Question"):
349
+ st.session_state.current_question += 1
350
+ if st.session_state.current_question < len(st.session_state.questions):
351
+ st.rerun()
352
+
353
+ else:
354
+ st.success("Interview Completed!")
355
+ total_score = sum(st.session_state.scores)
356
+ average_score = total_score / len(st.session_state.scores)
357
+
358
+ st.header("Interview Summary")
359
+ st.subheader(f"Overall Score: {average_score:.2f}/10")
360
+
361
+ for i, (q, a, f) in enumerate(st.session_state.chat_history):
362
+ with st.expander(f"Question {i+1}"):
363
+ st.write(f"Q: {q}")
364
+ st.write(f"Your Answer: {a}")
365
+ st.write(f"Feedback: {f}")
366
+
367
+ # Generate overall feedback
368
+ overall_feedback_prompt = f"""
369
+ You are an experienced career coach. Based on the candidate's performance in the interview for the role of {role},
370
+ with {experience} years of experience, please provide:
371
+ 1. A summary of their strengths (2-3 points)
372
+ 2. Areas for improvement (2-3 points)
373
+ 3. Advice for future interviews (2-3 tips)
374
+ 4. Personalized tips for improving their professional appearance and body language
375
+ 5. Strategies for managing interview anxiety
376
+
377
+ Their overall score was {average_score:.2f}/10.
378
+
379
+ Format your response as follows:
380
+ Strengths:
381
+ - [Strength 1]
382
+ - [Strength 2]
383
+ - [Strength 3]
384
+
385
+ Areas for Improvement:
386
+ - [Area 1]
387
+ - [Area 2]
388
+ - [Area 3]
389
+
390
+ Tips for Future Interviews:
391
+ - [Tip 1]
392
+ - [Tip 2]
393
+ - [Tip 3]
394
+
395
+ Professional Appearance and Body Language:
396
+ - [Tip 1]
397
+ - [Tip 2]
398
+ - [Tip 3]
399
+
400
+ Managing Interview Anxiety:
401
+ - [Strategy 1]
402
+ - [Strategy 2]
403
+ - [Strategy 3]
404
+ """
405
+
406
+ messages = [
407
+ SystemMessage(content=overall_feedback_prompt),
408
+ HumanMessage(content="Please provide the overall feedback for the interview.")
409
+ ]
410
+
411
+ with st.spinner("Generating overall feedback..."):
412
+ overall_feedback = chat.invoke(messages).content
413
+
414
+ st.subheader("Overall Feedback")
415
+ st.write(overall_feedback)
416
+
417
+ if st.button("Start New Interview"):
418
+ st.session_state.interview_started = False
419
+ st.session_state.current_question = 0
420
+ st.session_state.questions = []
421
+ st.session_state.answers = []
422
+ st.session_state.feedback = []
423
+ st.session_state.scores = []
424
+ st.session_state.chat_history = []
425
+ st.rerun()
426
+
427
+ # Footer
428
+ st.markdown("---")
429
+ st.markdown("Powered by Falcon-180B and Streamlit")
430
+
431
+ # Interview Preparation Checklist
432
+ st.sidebar.header("Interview Preparation Checklist")
433
+ checklist_items = [
434
+ "Research the company",
435
+ "Review the job description",
436
+ "Prepare your elevator pitch",
437
+ "Practice common interview questions",
438
+ "Prepare questions for the interviewer",
439
+ "Choose appropriate attire",
440
+ "Test your technology (for virtual interviews)",
441
+ "Gather necessary documents (resume, portfolio, etc.)",
442
+ "Plan your route or set up your interview space",
443
+ "Get a good night's sleep"
444
+ ]
445
+ for item in checklist_items:
446
+ st.sidebar.checkbox(item)
447
+
448
+ # Interview Timer
449
+ if st.session_state.interview_started:
450
+ st.sidebar.header("Interview Timer")
451
+ if 'start_time' not in st.session_state:
452
+ st.session_state.start_time = time.time()
453
+
454
+ elapsed_time = int(time.time() - st.session_state.start_time)
455
+ minutes, seconds = divmod(elapsed_time, 60)
456
+ st.sidebar.write(f"Elapsed Time: {minutes:02d}:{seconds:02d}")
457
+
458
+ # Confidence Boost
459
+ st.sidebar.header("Confidence Boost")
460
+ if st.sidebar.button("Get a Confidence Boost"):
461
+ confidence_boosters = [
462
+ "You've got this! Your preparation will pay off.",
463
+ "Remember, the interviewer wants you to succeed too.",
464
+ "Take deep breaths and stay calm. You're well-prepared.",
465
+ "Your unique experiences make you a valuable candidate.",
466
+ "Every interview is a learning opportunity. Embrace it!",
467
+ "Believe in yourself. Your skills and knowledge are valuable.",
468
+ "Stay positive and confident. Your attitude shines through.",
469
+ "You've overcome challenges before. This is just another opportunity to shine.",
470
+ "Focus on your strengths and what you can bring to the role.",
471
+ "Remember your past successes. You're capable of greatness!"
472
+ ]
473
+ st.sidebar.success(random.choice(confidence_boosters))
474
+
475
+ # Interview Do's and Don'ts
476
+ st.sidebar.header("Interview Do's and Don'ts")
477
+ dos_and_donts = {
478
+ "Do": [
479
+ "Arrive early or log in on time",
480
+ "Maintain good eye contact",
481
+ "Listen actively and ask thoughtful questions",
482
+ "Show enthusiasm for the role and company",
483
+ "Provide specific examples to support your answers"
484
+ ],
485
+ "Don't": [
486
+ "Speak negatively about past employers",
487
+ "Interrupt the interviewer",
488
+ "Use filler words excessively (um, like, you know)",
489
+ "Check your phone or watch frequently",
490
+ "Provide vague or generic answers"
491
+ ]
492
+ }
493
+ dos_tab, donts_tab = st.sidebar.tabs(["Do's", "Don'ts"])
494
+ with dos_tab:
495
+ for do_item in dos_and_donts["Do"]:
496
+ st.write(f"βœ… {do_item}")
497
+ with donts_tab:
498
+ for dont_item in dos_and_donts["Don't"]:
499
+ st.write(f"❌ {dont_item}")
500
+
501
+ # Personal Notes
502
+ st.sidebar.header("Personal Notes")
503
+ personal_notes = st.sidebar.text_area("Jot down your thoughts or reminders here:")
504
+
505
+ # Initialize session state for saved notes if it doesn't exist
506
+ if 'saved_notes' not in st.session_state:
507
+ st.session_state.saved_notes = []
508
+
509
+ # Save Notes button
510
+ if st.sidebar.button("Save Notes"):
511
+ if personal_notes.strip(): # Check if the note is not empty
512
+ st.session_state.saved_notes.append(personal_notes)
513
+ st.sidebar.success("Note saved successfully!")
514
+ # Clear the text area after saving
515
+ personal_notes = ""
516
+ else:
517
+ st.sidebar.warning("Please enter a note before saving.")
518
+
519
+ # Display saved notes as checkboxes
520
+ st.sidebar.subheader("Saved Notes")
521
+ for i, note in enumerate(st.session_state.saved_notes):
522
+ col1, col2 = st.sidebar.columns([3, 1])
523
+ with col1:
524
+ st.checkbox(note, key=f"note_{i}")
525
+ with col2:
526
+ if st.button("Delete", key=f"delete_{i}"):
527
+ del st.session_state.saved_notes[i]
528
+ st.rerun()
529
+
530
+ # Follow-up Email Template
531
+ if st.session_state.interview_started and st.session_state.current_question >= len(st.session_state.questions):
532
+ st.header("Follow-up Email Template")
533
+ interviewer_name = st.text_input("Interviewer's Name")
534
+ company_name = st.text_input("Company Name")
535
+ specific_topic = st.text_input("Specific topic discussed during the interview")
536
+
537
+ if interviewer_name and company_name and specific_topic:
538
+ email_template = f"""
539
+ Subject: Thank you for the interview - {role} position
540
+
541
+ Dear {interviewer_name},
542
+
543
+ I hope this email finds you well. I wanted to express my sincere gratitude for taking the time to interview me for the {role} position at {company_name}. I thoroughly enjoyed our conversation and learning more about the role and the company.
544
+
545
+ Our discussion about {specific_topic} was particularly interesting, and it reinforced my enthusiasm for the position. I am excited about the possibility of bringing my skills and experience to your team and contributing to {company_name}'s success.
546
+
547
+ If you need any additional information or have any further questions, please don't hesitate to contact me. I look forward to hearing about the next steps in the process.
548
+
549
+ Thank you again for your time and consideration.
550
+
551
+ Best regards,
552
+ {name}
553
+ """
554
+ st.text_area("Follow-up Email Template", email_template, height=300)
555
+ if st.button("Copy to Clipboard"):
556
+ st.write("Email template copied to clipboard!")
557
+ # Note: In a web app, you'd use JavaScript to copy to clipboard
558
+
559
+ if __name__ == "__main__":
560
+ main()
pages/lecture_finder.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import googleapiclient.discovery
3
+ from dotenv import load_dotenv
4
+ from datetime import timedelta
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+
9
+ # Set up YouTube API client
10
+ api_service_name = "youtube"
11
+ api_version = "v3"
12
+ DEVELOPER_KEY = os.getenv('DEVELOPER_KEY')
13
+ youtube = googleapiclient.discovery.build(api_service_name, api_version, developerKey=DEVELOPER_KEY)
14
+
15
+ def search_youtube(query, max_results=50):
16
+ try:
17
+ request = youtube.search().list(
18
+ q=query,
19
+ type="video",
20
+ part="id,snippet",
21
+ maxResults=max_results,
22
+ fields="items(id(videoId),snippet(title,description,thumbnails))"
23
+ )
24
+ response = request.execute()
25
+ return response.get('items', [])
26
+ except googleapiclient.errors.HttpError as e:
27
+ st.error(f"An error occurred: {e}")
28
+ return []
29
+
30
+ def get_video_details(video_id):
31
+ try:
32
+ request = youtube.videos().list(
33
+ part="contentDetails,statistics",
34
+ id=video_id,
35
+ fields="items(contentDetails(duration),statistics(viewCount))"
36
+ )
37
+ response = request.execute()
38
+ return response['items'][0] if response['items'] else None
39
+ except googleapiclient.errors.HttpError as e:
40
+ st.error(f"An error occurred while fetching video details: {e}")
41
+ return None
42
+
43
+ def format_duration(duration):
44
+ duration = duration.replace('PT', '')
45
+ hours = 0
46
+ minutes = 0
47
+ seconds = 0
48
+ if 'H' in duration:
49
+ hours, duration = duration.split('H')
50
+ hours = int(hours)
51
+ if 'M' in duration:
52
+ minutes, duration = duration.split('M')
53
+ minutes = int(minutes)
54
+ if 'S' in duration:
55
+ seconds = int(duration.replace('S', ''))
56
+ return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
57
+
58
+ def parse_duration(duration_str):
59
+ parts = duration_str.split(':')
60
+ if len(parts) == 3:
61
+ return timedelta(hours=int(parts[0]), minutes=int(parts[1]), seconds=int(parts[2]))
62
+ elif len(parts) == 2:
63
+ return timedelta(minutes=int(parts[0]), seconds=int(parts[1]))
64
+ else:
65
+ return timedelta(seconds=int(parts[0]))
66
+
67
+ def main():
68
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Learning Assistant", page_icon="πŸ•΅οΈ", layout="wide")
69
+ st.sidebar.title("S.H.E.R.L.O.C.K.")
70
+ st.sidebar.markdown("""
71
+ **S**ystematic **H**olistic **E**ducational **R**esource for **L**earning and **O**ptimizing **C**ognitive **K**nowledge
72
+
73
+ Enhance your cognitive abilities, memory techniques, and subject-specific knowledge with AI-powered personalized learning.
74
+ """)
75
+
76
+ query = st.sidebar.text_input("What would you like to learn about?", "")
77
+
78
+ min_duration = st.sidebar.selectbox(
79
+ "Minimum video duration",
80
+ ["Any", "5:00", "10:00", "15:00", "30:00", "45:00", "1:00:00"],
81
+ index=0
82
+ )
83
+
84
+ search_button = st.sidebar.button("Search for Learning Resources")
85
+
86
+ st.title("Learning Resources")
87
+
88
+ if search_button and query:
89
+ with st.spinner("Searching for the best learning resources..."):
90
+ results = search_youtube(query)
91
+
92
+ if results:
93
+ filtered_results = []
94
+ for item in results:
95
+ video_id = item['id']['videoId']
96
+ video_details = get_video_details(video_id)
97
+
98
+ if video_details:
99
+ duration = video_details['contentDetails']['duration']
100
+ formatted_duration = format_duration(duration)
101
+ views = int(video_details['statistics']['viewCount'])
102
+
103
+ if min_duration == "Any" or parse_duration(formatted_duration) >= parse_duration(min_duration):
104
+ filtered_results.append((item, formatted_duration, views))
105
+
106
+ if filtered_results:
107
+ for item, duration, views in filtered_results:
108
+ col1, col2 = st.columns([1, 3])
109
+ with col1:
110
+ st.image(item['snippet']['thumbnails']['medium']['url'], use_column_width=True)
111
+ with col2:
112
+ st.markdown(f"### [{item['snippet']['title']}](https://www.youtube.com/watch?v={item['id']['videoId']})")
113
+ st.markdown(f"**Duration:** {duration} | **Views:** {views:,}")
114
+ st.markdown(item['snippet']['description'])
115
+
116
+ st.markdown("---")
117
+ else:
118
+ st.warning("No results found matching your duration criteria. Try adjusting the minimum duration or search query.")
119
+ else:
120
+ st.warning("No results found. Please try a different search query.")
121
+
122
+ if __name__ == "__main__":
123
+ main()
pages/mind_palace.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ from langchain.document_loaders import TextLoader, UnstructuredFileLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain.embeddings import HuggingFaceEmbeddings
8
+ from langchain.vectorstores import FAISS
9
+ import os
10
+ from dotenv import load_dotenv
11
+ import json
12
+ from tenacity import retry, stop_after_attempt, wait_fixed
13
+ from streamlit_chat import message
14
+ from gtts import gTTS
15
+ import io
16
+ from PyPDF2 import PdfReader
17
+ import docx2txt
18
+ import logging
19
+ import tempfile
20
+
21
+ logging.basicConfig(level=logging.INFO)
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Load environment variables
25
+ load_dotenv()
26
+
27
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
28
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
29
+
30
+ # Initialize the models
31
+ chat = ChatOpenAI(
32
+ model="tiiuae/falcon-180B-chat",
33
+ api_key=AI71_API_KEY,
34
+ base_url=AI71_BASE_URL,
35
+ streaming=True,
36
+ )
37
+
38
+ # Use SentenceTransformers for embeddings
39
+ embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
40
+
41
+ def process_document(file):
42
+ content = ""
43
+ file_extension = file.name.split('.')[-1].lower()
44
+
45
+ if file_extension == 'txt':
46
+ content = file.getvalue().decode('utf-8')
47
+ elif file_extension == 'pdf':
48
+ try:
49
+ pdf_reader = PdfReader(io.BytesIO(file.getvalue()))
50
+ for page in pdf_reader.pages:
51
+ content += page.extract_text()
52
+ except Exception as e:
53
+ st.error(f"Error processing PDF: {str(e)}")
54
+ return None
55
+ elif file_extension == 'docx':
56
+ content = docx2txt.process(io.BytesIO(file.getvalue()))
57
+ else:
58
+ st.error(f"Unsupported file type: {file_extension}")
59
+ return None
60
+
61
+ if not content.strip():
62
+ st.warning("The uploaded file appears to be empty or unreadable. Please check the file and try again.")
63
+ return None
64
+
65
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
66
+ chunks = text_splitter.split_text(content)
67
+
68
+ if not chunks:
69
+ st.warning("Unable to extract meaningful content from the file. Please try a different file.")
70
+ return None
71
+
72
+ vectorstore = FAISS.from_texts(chunks, embeddings)
73
+
74
+ return vectorstore, content
75
+
76
+ @retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
77
+ def generate_mind_palace(topic, learning_style, user_preferences, content=None):
78
+ system_message = f"""
79
+ You are an expert in creating memorable and personalized mind palaces to aid in learning and retention.
80
+ The user wants to learn about '{topic}' and their preferred learning style is '{learning_style}'.
81
+ Their personal preferences are: {user_preferences}
82
+ Create a vivid and easy-to-remember mind palace description that incorporates the topic, caters to the user's learning style, and aligns with their preferences.
83
+ The mind palace should have 5-7 interconnected rooms or areas, each representing a key aspect of the topic.
84
+ For each room, provide:
85
+ 1. A catchy and memorable name related to the topic
86
+ 2. A vivid description that incorporates the user's preferences and makes use of multiple senses
87
+ 3. 3-5 key elements or objects in the room that represent important information
88
+ 4. How these elements relate to the topic
89
+ 5. A simple and effective memory technique or association specific to the user's learning style
90
+
91
+ Ensure that the mind palace is coherent, with a logical flow between rooms. Use vivid imagery, familiar concepts, and emotional connections to make it more memorable.
92
+
93
+ Format your response as a JSON object with the following structure:
94
+ {{
95
+ "palace_name": "Catchy Name of the Mind Palace",
96
+ "rooms": [
97
+ {{
98
+ "name": "Memorable Room Name",
99
+ "description": "Vivid description of the room",
100
+ "elements": [
101
+ {{
102
+ "name": "Striking Element Name",
103
+ "description": "How this element relates to the topic",
104
+ "memory_technique": "A simple and effective memory technique or association"
105
+ }}
106
+ ]
107
+ }}
108
+ ]
109
+ }}
110
+
111
+ Ensure that your response is a valid JSON object. Do not include any text before or after the JSON object.
112
+ """
113
+
114
+ messages = [
115
+ SystemMessage(content=system_message),
116
+ HumanMessage(content=f"Create a memorable mind palace for the topic: {topic}")
117
+ ]
118
+
119
+ if content:
120
+ messages.append(HumanMessage(content=f"Use this additional context to enhance the mind palace, focusing on the most important and memorable aspects: {content[:2000]}"))
121
+
122
+ try:
123
+ response = chat.invoke(messages)
124
+ json_response = json.loads(response.content)
125
+ return json_response
126
+ except json.JSONDecodeError as e:
127
+ st.error(f"Error decoding JSON response: {str(e)}")
128
+ st.error("Raw response content:")
129
+ st.error(response.content)
130
+ raise
131
+
132
+ def generate_audio_description(mind_palace_data):
133
+ description = f"Welcome to your personalized and memorable mind palace: {mind_palace_data['palace_name']}. Let's take a journey through your palace, using vivid imagery and your preferred learning style to make it unforgettable. "
134
+ for room in mind_palace_data['rooms']:
135
+ description += f"We're entering the {room['name']}. {room['description']} "
136
+ for element in room['elements']:
137
+ description += f"Focus on the {element['name']}. {element['description']} To remember this, use this simple technique: {element['memory_technique']} Take a moment to really visualize and feel this connection. "
138
+ description += "Now, let's move to the next room, carrying these vivid images with us. "
139
+ description += "We've completed our tour of your mind palace. Take a deep breath and recall the journey we've just taken, visualizing each room and its striking elements."
140
+
141
+ tts = gTTS(text=description, lang='en', slow=False)
142
+ fp = io.BytesIO()
143
+ tts.write_to_fp(fp)
144
+ fp.seek(0)
145
+
146
+ return fp
147
+
148
+ def main():
149
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Memorable Mind Palace Generator", layout="wide")
150
+
151
+ # Custom CSS for dark theme
152
+ st.markdown("""
153
+ <style>
154
+ .main {
155
+ background-color: #1E1E1E;
156
+ color: #FFFFFF;
157
+ }
158
+ .stButton>button {
159
+ background-color: #4CAF50;
160
+ color: white;
161
+ font-weight: bold;
162
+ }
163
+ .stTextInput>div>div>input, .stTextArea textarea {
164
+ background-color: #2E2E2E;
165
+ color: #FFFFFF;
166
+ }
167
+ .room-expander {
168
+ background-color: #2E2E2E;
169
+ border-radius: 5px;
170
+ padding: 10px;
171
+ margin-bottom: 10px;
172
+ }
173
+ .stSelectbox>div>div>select {
174
+ background-color: #2E2E2E;
175
+ color: #FFFFFF;
176
+ }
177
+ </style>
178
+ """, unsafe_allow_html=True)
179
+
180
+ # Sidebar
181
+ with st.sidebar:
182
+ st.title("S.H.E.R.L.O.C.K.")
183
+ st.subheader("Memorable Mind Palace Generator")
184
+
185
+ st.markdown("---")
186
+ st.markdown("How to use your Memorable Mind Palace:")
187
+ st.markdown("""
188
+ 1. Choose to enter a topic or upload a document.
189
+ 2. Select your learning style and enter your preferences.
190
+ 3. Generate your personalized, easy-to-remember mind palace.
191
+ 4. Listen to the vivid audio description and imagine each room.
192
+ 5. Explore the detailed text description, focusing on the striking elements.
193
+ 6. Use the chat to reinforce your understanding of the mind palace.
194
+ 7. Practice recalling information by mentally walking through your palace, using the memory techniques provided.
195
+ """)
196
+
197
+ st.markdown("---")
198
+ st.markdown("Powered by Falcon-180B and SentenceTransformers")
199
+
200
+ # Main content
201
+ st.title("S.H.E.R.L.O.C.K. Memorable Mind Palace Generator")
202
+
203
+ st.write("""
204
+ Welcome to the Memorable Mind Palace Generator! This tool will help you create a vivid and easy-to-remember
205
+ mind palace to enhance your learning and memory retention. Choose to enter a topic or upload a document,
206
+ select your preferred learning style, and enter your personal preferences. We'll generate a unique,
207
+ unforgettable mind palace tailored just for you.
208
+ """)
209
+
210
+ input_method = st.radio("Choose your input method:", ["Enter a topic", "Upload a document"])
211
+
212
+ if input_method == "Enter a topic":
213
+ topic = st.text_input("Enter the topic you want to learn:")
214
+ uploaded_file = None
215
+ else:
216
+ topic = None
217
+ uploaded_file = st.file_uploader("Upload a document to memorize", type=['txt', 'md', 'pdf', 'docx'])
218
+
219
+ learning_style = st.selectbox("Choose your preferred learning style:",
220
+ ["Visual", "Auditory", "Kinesthetic", "Reading/Writing"])
221
+
222
+ st.write("""
223
+ Learning Styles:
224
+ - Visual: You learn best through images, diagrams, and spatial understanding. We'll create vivid mental pictures.
225
+ - Auditory: You prefer learning through listening and speaking. We'll focus on memorable sounds and verbal associations.
226
+ - Kinesthetic: You learn by doing, moving, and touching. We'll incorporate imaginary physical sensations and movements.
227
+ - Reading/Writing: You learn best through words. We'll use powerful written descriptions and word associations.
228
+ """)
229
+
230
+ user_preferences = st.text_area("Enter your personal preferences (e.g., favorite places, hobbies, movies, or anything that resonates with you):")
231
+
232
+ if st.button("Generate Memorable Mind Palace"):
233
+ with st.spinner("Crafting your unforgettable mind palace..."):
234
+ content = None
235
+ if uploaded_file is not None:
236
+ vectorstore, content = process_document(uploaded_file)
237
+ if vectorstore is None:
238
+ st.error("Failed to process the uploaded document. Please try again with a different file.")
239
+ return
240
+ topic = "Document Content"
241
+ elif topic is None or topic.strip() == "":
242
+ st.error("Please enter a topic or upload a document.")
243
+ return
244
+
245
+ try:
246
+ mind_palace_data = generate_mind_palace(topic, learning_style, user_preferences, content)
247
+ if mind_palace_data is None:
248
+ st.error("Failed to generate the mind palace. Please try again.")
249
+ return
250
+
251
+ st.session_state.mind_palace = mind_palace_data
252
+ st.session_state.chat_history = []
253
+
254
+ # Generate audio description with selected voice
255
+ with st.spinner("Creating a vivid audio guide for your mind palace..."):
256
+ audio_fp = generate_audio_description(mind_palace_data)
257
+ st.session_state.mind_palace_audio = audio_fp
258
+ except Exception as e:
259
+ logger.error(f"An error occurred while generating the mind palace: {str(e)}")
260
+ st.error(f"An error occurred while generating the mind palace. Please try again.")
261
+ return
262
+
263
+ if 'mind_palace' in st.session_state:
264
+ mind_palace_data = st.session_state.mind_palace
265
+
266
+ st.subheader(f"Your Memorable Mind Palace: {mind_palace_data['palace_name']}")
267
+
268
+ # Audio player
269
+ if 'mind_palace_audio' in st.session_state:
270
+ try:
271
+ st.audio(st.session_state.mind_palace_audio, format='audio/wav')
272
+ st.write("πŸ‘† Listen to the vivid audio guide and imagine your mind palace. Close your eyes and immerse yourself in this mental journey.")
273
+ except Exception as e:
274
+ logger.error(f"Error playing audio: {str(e)}")
275
+ st.warning("There was an issue playing the audio. You can still explore the text description of your mind palace.")
276
+
277
+ # Text description
278
+ for room in mind_palace_data['rooms']:
279
+ with st.expander(f"Room: {room['name']}", expanded=True):
280
+ st.markdown(f"**Description:** {room['description']}")
281
+ st.markdown("**Key Elements:**")
282
+ for element in room['elements']:
283
+ st.markdown(f"- **{element['name']}:** {element['description']}")
284
+ st.markdown(f" *Memory Technique:* {element['memory_technique']}")
285
+
286
+ st.success("Your memorable mind palace has been generated successfully! Take some time to walk through it mentally, focusing on the vivid details and connections.")
287
+
288
+ # Chat interface
289
+ st.subheader("Reinforce Your Mind Palace")
290
+
291
+ # Initialize input key if not present
292
+ if 'input_key' not in st.session_state:
293
+ st.session_state.input_key = 0
294
+
295
+ # Display chat history
296
+ if 'chat_history' not in st.session_state:
297
+ st.session_state.chat_history = []
298
+
299
+ for i, (sender, message_text) in enumerate(st.session_state.chat_history):
300
+ if sender == "user":
301
+ message(message_text, is_user=True, key=f"{i}_user")
302
+ else:
303
+ message(message_text, key=f"{i}_assistant")
304
+
305
+ # User input text box with dynamic key
306
+ user_input = st.text_input("Ask a question or request a memory reinforcement exercise:", key=f"user_input_{st.session_state.input_key}")
307
+
308
+ # Ask button below the text input
309
+ ask_button = st.button("Ask")
310
+
311
+ if ask_button and user_input:
312
+ with st.spinner("Generating response to enhance your memory..."):
313
+ # Prepare context for the AI
314
+ context = f"Mind Palace Data: {json.dumps(mind_palace_data)}\n\n"
315
+ if 'uploaded_content' in st.session_state:
316
+ context += f"Uploaded Document Content: {st.session_state.uploaded_content}\n\n"
317
+
318
+ system_message = f"""
319
+ You are an AI assistant helping the user understand and remember their personalized mind palace.
320
+ Use the following context to provide responses that reinforce the vivid imagery and memory techniques used in the mind palace.
321
+ If asked about specific content from the uploaded document, refer to it in your response.
322
+
323
+ {context}
324
+ """
325
+
326
+ response = chat.invoke([
327
+ SystemMessage(content=system_message),
328
+ HumanMessage(content=user_input)
329
+ ])
330
+
331
+ st.session_state.chat_history.append(("user", user_input))
332
+ st.session_state.chat_history.append(("assistant", response.content))
333
+
334
+ # Increment the input key to reset the input field
335
+ st.session_state.input_key += 1
336
+
337
+ # Force a rerun to update the chat history display and reset the input
338
+ st.experimental_rerun()
339
+
340
+ if __name__ == "__main__":
341
+ main()
pages/mnemonics_generation.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredMarkdownLoader, UnstructuredWordDocumentLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.chains import RetrievalQA
10
+ import os
11
+ from dotenv import load_dotenv
12
+ import tempfile
13
+ from PIL import Image
14
+ import io
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
20
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
21
+
22
+ # Initialize the Falcon model
23
+ chat = ChatOpenAI(
24
+ model="tiiuae/falcon-180B-chat",
25
+ api_key=AI71_API_KEY,
26
+ base_url=AI71_BASE_URL,
27
+ streaming=True,
28
+ )
29
+
30
+ # Initialize embeddings
31
+ embeddings = HuggingFaceEmbeddings()
32
+
33
+ def process_documents(uploaded_files):
34
+ documents = []
35
+ for uploaded_file in uploaded_files:
36
+ file_extension = os.path.splitext(uploaded_file.name)[1].lower()
37
+ with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_file:
38
+ temp_file.write(uploaded_file.getvalue())
39
+ temp_file_path = temp_file.name
40
+
41
+ if file_extension == '.pdf':
42
+ loader = PyPDFLoader(temp_file_path)
43
+ elif file_extension == '.txt':
44
+ loader = TextLoader(temp_file_path)
45
+ elif file_extension == '.md':
46
+ loader = UnstructuredMarkdownLoader(temp_file_path)
47
+ elif file_extension in ['.doc', '.docx']:
48
+ loader = UnstructuredWordDocumentLoader(temp_file_path)
49
+ else:
50
+ st.warning(f"Unsupported file type: {file_extension}")
51
+ continue
52
+
53
+ documents.extend(loader.load())
54
+ os.unlink(temp_file_path)
55
+
56
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
57
+ texts = text_splitter.split_documents(documents)
58
+
59
+ vectorstore = FAISS.from_documents(texts, embeddings)
60
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
61
+
62
+ qa_chain = RetrievalQA.from_chain_type(
63
+ llm=chat,
64
+ chain_type="stuff",
65
+ retriever=retriever,
66
+ return_source_documents=True
67
+ )
68
+
69
+ return qa_chain
70
+
71
+ def generate_mnemonic(topic, user_preferences):
72
+ prompt = f"""
73
+ Generate a memorable mnemonic for the topic: {topic}.
74
+ Consider the user's preferences: {user_preferences}.
75
+ The mnemonic should be easy to remember and relate to the topic.
76
+ Also provide a brief explanation of how the mnemonic relates to the topic.
77
+ """
78
+ response = chat.invoke([HumanMessage(content=prompt)])
79
+ return response.content
80
+
81
+ def generate_quiz_question(mnemonic):
82
+ quiz_prompt = f"""
83
+ Create a quiz question based on the mnemonic: {mnemonic}
84
+ Format your response as follows:
85
+ Question: [Your question here]
86
+ Answer: [Your answer here]
87
+ """
88
+ quiz_response = chat.invoke([HumanMessage(content=quiz_prompt)])
89
+ content = quiz_response.content.strip()
90
+
91
+ try:
92
+ question_part, answer_part = content.split("Answer:", 1)
93
+ question = question_part.replace("Question:", "").strip()
94
+ answer = answer_part.strip()
95
+ except ValueError:
96
+ question = content
97
+ answer = "Unable to generate a specific answer. Please refer to the mnemonic."
98
+
99
+ return question, answer
100
+
101
+ def generate_image_prompt(mnemonic):
102
+ prompt = f"""
103
+ Create a detailed image prompt for Midjourney based on the mnemonic: {mnemonic}
104
+ The image should visually represent the key elements of the mnemonic.
105
+ """
106
+ response = chat.invoke([HumanMessage(content=prompt)])
107
+ return response.content
108
+
109
+ def main():
110
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Mnemonic Generator", page_icon="🧠", layout="wide")
111
+
112
+ # Custom CSS
113
+ st.markdown("""
114
+ <style>
115
+ .stApp {
116
+ background-color: #f0f2f6;
117
+ }
118
+ .main .block-container {
119
+ padding-top: 2rem;
120
+ }
121
+ .stButton>button {
122
+ background-color: #4CAF50;
123
+ color: white;
124
+ font-weight: bold;
125
+ }
126
+ .stTextInput>div>div>input {
127
+ background-color: #ffffff;
128
+ }
129
+ </style>
130
+ """, unsafe_allow_html=True)
131
+
132
+ st.title("🧠 S.H.E.R.L.O.C.K. Mnemonic Generator")
133
+
134
+ # Initialize session state
135
+ if 'generated_mnemonic' not in st.session_state:
136
+ st.session_state.generated_mnemonic = None
137
+ if 'quiz_question' not in st.session_state:
138
+ st.session_state.quiz_question = None
139
+ if 'quiz_answer' not in st.session_state:
140
+ st.session_state.quiz_answer = None
141
+ if 'image_prompt' not in st.session_state:
142
+ st.session_state.image_prompt = None
143
+
144
+ # Sidebar
145
+ with st.sidebar:
146
+ st.header("οΏ½οΏ½οΏ½οΏ½ Document Upload")
147
+ uploaded_files = st.file_uploader("Upload documents (optional)", type=["pdf", "txt", "md", "doc", "docx"], accept_multiple_files=True)
148
+ if uploaded_files:
149
+ qa_chain = process_documents(uploaded_files)
150
+ st.success(f"{len(uploaded_files)} document(s) processed successfully!")
151
+ else:
152
+ qa_chain = None
153
+
154
+ st.header("🎨 User Preferences")
155
+ user_preferences = st.text_area("Enter your interests or preferences:")
156
+
157
+ # Main area
158
+ col1, col2 = st.columns([2, 1])
159
+
160
+ with col1:
161
+ st.header("πŸ” Generate Mnemonic")
162
+ topic = st.text_input("Enter the topic for your mnemonic:")
163
+
164
+ if st.button("Generate Mnemonic"):
165
+ if topic:
166
+ with st.spinner("Generating mnemonic..."):
167
+ mnemonic = generate_mnemonic(topic, user_preferences)
168
+ st.session_state.generated_mnemonic = mnemonic
169
+
170
+ with st.spinner("Generating quiz question..."):
171
+ question, answer = generate_quiz_question(mnemonic)
172
+ st.session_state.quiz_question = question
173
+ st.session_state.quiz_answer = answer
174
+
175
+ with st.spinner("Generating image prompt..."):
176
+ image_prompt = generate_image_prompt(mnemonic)
177
+ st.session_state.image_prompt = image_prompt
178
+ else:
179
+ st.warning("Please enter a topic to generate a mnemonic.")
180
+
181
+ with col2:
182
+ if st.session_state.generated_mnemonic:
183
+ st.header("πŸ“ Generated Mnemonic")
184
+ st.write(st.session_state.generated_mnemonic)
185
+
186
+ # Quiz section
187
+ if st.session_state.quiz_question:
188
+ st.header("🧠 Mnemonic Quiz")
189
+ st.write(st.session_state.quiz_question)
190
+ user_answer = st.text_input("Your answer:")
191
+ if st.button("Submit Answer"):
192
+ if user_answer.lower() == st.session_state.quiz_answer.lower():
193
+ st.success("πŸŽ‰ Correct! Well done.")
194
+ else:
195
+ st.error(f"❌ Not quite. The correct answer is: {st.session_state.quiz_answer}")
196
+
197
+ # Image prompt section
198
+ if st.session_state.image_prompt:
199
+ st.header("πŸ–ΌοΈ Image Prompt")
200
+ st.write(st.session_state.image_prompt)
201
+ st.info("You can use this prompt with Midjourney or other image generation tools to create a visual representation of your mnemonic.")
202
+
203
+ # Document Q&A section
204
+ if qa_chain:
205
+ st.header("πŸ“š Document Q&A")
206
+ user_question = st.text_input("Ask a question about the uploaded document(s):")
207
+ if st.button("Get Answer"):
208
+ with st.spinner("Searching for the answer..."):
209
+ result = qa_chain({"query": user_question})
210
+ st.subheader("Answer:")
211
+ st.write(result["result"])
212
+ st.subheader("Sources:")
213
+ for source in result["source_documents"]:
214
+ st.write(source.page_content)
215
+
216
+ # Mnemonic visualization
217
+ if st.session_state.generated_mnemonic:
218
+ st.header("🎨 Mnemonic Visualization")
219
+ visualization_type = st.selectbox("Choose visualization type:", ["Word Cloud", "Mind Map"])
220
+ if st.button("Generate Visualization"):
221
+ with st.spinner("Generating visualization..."):
222
+ visualization_prompt = f"""
223
+ Create a detailed description of a {visualization_type} based on the mnemonic:
224
+ {st.session_state.generated_mnemonic}
225
+ Describe the layout, key elements, and their relationships.
226
+ """
227
+ visualization_description = chat.invoke([HumanMessage(content=visualization_prompt)]).content
228
+ st.write(visualization_description)
229
+ st.info("You can use this description to create a visual representation of your mnemonic using tools like Canva or Mindmeister.")
230
+
231
+ # Export options
232
+ if st.session_state.generated_mnemonic:
233
+ st.header("πŸ“€ Export Options")
234
+ export_format = st.selectbox("Choose export format:", ["Text", "PDF", "Markdown"])
235
+ if st.button("Export Mnemonic"):
236
+ export_content = f"""
237
+ Topic: {topic}
238
+
239
+ Mnemonic:
240
+ {st.session_state.generated_mnemonic}
241
+
242
+ Quiz Question:
243
+ {st.session_state.quiz_question}
244
+
245
+ Quiz Answer:
246
+ {st.session_state.quiz_answer}
247
+
248
+ Image Prompt:
249
+ {st.session_state.image_prompt}
250
+ """
251
+
252
+ if export_format == "Text":
253
+ st.download_button("Download Text", export_content, file_name="mnemonic_export.txt")
254
+ elif export_format == "PDF":
255
+ # You'd need to implement PDF generation here, for example using reportlab
256
+ st.warning("PDF export not implemented in this example.")
257
+ elif export_format == "Markdown":
258
+ st.download_button("Download Markdown", export_content, file_name="mnemonic_export.md")
259
+
260
+ # Footer
261
+ st.sidebar.markdown("---")
262
+ st.sidebar.markdown("Powered by Falcon-180B and Streamlit")
263
+
264
+ if __name__ == "__main__":
265
+ main()
pages/notes_generation.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from langchain_community.chat_models import ChatOpenAI
5
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredMarkdownLoader, Docx2txtLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain_community.embeddings import HuggingFaceEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.prompts import PromptTemplate
11
+ import tempfile
12
+ from typing import List, Dict
13
+ import json
14
+ from datetime import datetime
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
20
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
21
+
22
+ # Initialize the Falcon model
23
+ @st.cache_resource
24
+ def get_llm():
25
+ return ChatOpenAI(
26
+ model="tiiuae/falcon-180B-chat",
27
+ api_key=AI71_API_KEY,
28
+ base_url=AI71_BASE_URL,
29
+ streaming=True,
30
+ )
31
+
32
+ # Initialize embeddings
33
+ @st.cache_resource
34
+ def get_embeddings():
35
+ return HuggingFaceEmbeddings()
36
+
37
+ def process_document(file_content, file_type):
38
+ with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_type}') as tmp_file:
39
+ if isinstance(file_content, str):
40
+ tmp_file.write(file_content.encode('utf-8'))
41
+ else:
42
+ tmp_file.write(file_content)
43
+ tmp_file_path = tmp_file.name
44
+
45
+ if file_type == 'pdf':
46
+ loader = PyPDFLoader(tmp_file_path)
47
+ elif file_type == 'txt':
48
+ loader = TextLoader(tmp_file_path)
49
+ elif file_type == 'md':
50
+ loader = UnstructuredMarkdownLoader(tmp_file_path)
51
+ elif file_type == 'docx':
52
+ loader = Docx2txtLoader(tmp_file_path)
53
+ else:
54
+ raise ValueError(f"Unsupported file type: {file_type}")
55
+
56
+ documents = loader.load()
57
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
58
+ texts = text_splitter.split_documents(documents)
59
+
60
+ vectorstore = FAISS.from_documents(texts, get_embeddings())
61
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
62
+
63
+ os.unlink(tmp_file_path)
64
+ return retriever
65
+
66
+ def generate_notes(retriever, topic, style, length):
67
+ prompt_template = f"""
68
+ You are an expert note-taker and summarizer. Your task is to create {style} and {length} notes on the given topic.
69
+ Use the following guidelines:
70
+ 1. Focus on key concepts and important details.
71
+ 2. Use bullet points or numbered lists for clarity.
72
+ 3. Include relevant examples or explanations where necessary.
73
+ 4. Organize the information in a logical and easy-to-follow structure.
74
+ 5. Aim for clarity without sacrificing important information.
75
+
76
+ Context: {{context}}
77
+ Topic: {{question}}
78
+
79
+ Notes:
80
+ """
81
+
82
+ PROMPT = PromptTemplate(
83
+ template=prompt_template,
84
+ input_variables=["context", "question"]
85
+ )
86
+
87
+ chain_type_kwargs = {"prompt": PROMPT}
88
+ qa_chain = RetrievalQA.from_chain_type(
89
+ llm=get_llm(),
90
+ chain_type="stuff",
91
+ retriever=retriever,
92
+ chain_type_kwargs=chain_type_kwargs
93
+ )
94
+
95
+ result = qa_chain({"query": topic})
96
+ return result['result']
97
+
98
+ def save_notes(notes: str, topic: str):
99
+ notes_data = load_notes_data()
100
+ timestamp = datetime.now().isoformat()
101
+ notes_data.append({"topic": topic, "notes": notes, "timestamp": timestamp})
102
+ with open("saved_notes.json", "w") as f:
103
+ json.dump(notes_data, f)
104
+
105
+ def load_notes_data() -> List[Dict]:
106
+ try:
107
+ with open("saved_notes.json", "r") as f:
108
+ return json.load(f)
109
+ except FileNotFoundError:
110
+ return []
111
+
112
+ def main():
113
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Notes Generator", layout="wide")
114
+
115
+ st.title("S.H.E.R.L.O.C.K. Notes Generator")
116
+
117
+ st.markdown("""
118
+ This tool helps you generate concise and relevant notes on specific topics.
119
+ You can upload a document or enter text directly.
120
+ """)
121
+
122
+ # Sidebar content
123
+ st.sidebar.title("About S.H.E.R.L.O.C.K.")
124
+ st.sidebar.markdown("""
125
+ S.H.E.R.L.O.C.K. (Summarizing Helper & Effective Research Liaison for Organizing Comprehensive Knowledge)
126
+ is an advanced AI-powered tool designed to assist you in generating comprehensive notes from various sources.
127
+
128
+ Key Features:
129
+ - Multi-format support (PDF, TXT, MD, DOCX)
130
+ - Customizable note generation
131
+ - Intelligent text processing
132
+ - Save and retrieve notes
133
+
134
+ How to use:
135
+ 1. Choose your input method
136
+ 2. Process your document or text
137
+ 3. Enter a topic and customize note style
138
+ 4. Generate and save your notes
139
+
140
+ Enjoy your enhanced note-taking experience!
141
+ """)
142
+
143
+ input_method = st.radio("Choose input method:", ("Upload Document", "Enter Text"))
144
+
145
+ if input_method == "Upload Document":
146
+ uploaded_file = st.file_uploader("Upload a document", type=["pdf", "txt", "md", "docx"])
147
+ if uploaded_file:
148
+ file_type = uploaded_file.name.split('.')[-1].lower()
149
+ file_content = uploaded_file.read()
150
+ st.success("Document uploaded successfully!")
151
+
152
+ with st.spinner("Processing document..."):
153
+ retriever = process_document(file_content, file_type)
154
+ st.session_state.retriever = retriever
155
+ st.success("Document processed!")
156
+ elif input_method == "Enter Text":
157
+ text_input = st.text_area("Enter your text here:", height=200)
158
+ if text_input:
159
+ with st.spinner("Processing text..."):
160
+ retriever = process_document(text_input, 'txt')
161
+ st.session_state.retriever = retriever
162
+ st.success("Text processed!")
163
+
164
+ topic = st.text_input("Enter the topic for note generation:")
165
+
166
+ col1, col2 = st.columns(2)
167
+ with col1:
168
+ style = st.selectbox("Note Style", ["Concise", "Detailed", "Academic", "Casual"])
169
+ with col2:
170
+ length = st.selectbox("Note Length", ["Short", "Medium", "Long"])
171
+
172
+ if st.button("Generate Notes"):
173
+ if topic and hasattr(st.session_state, 'retriever'):
174
+ with st.spinner("Generating notes..."):
175
+ try:
176
+ notes = generate_notes(st.session_state.retriever, topic, style, length)
177
+ st.subheader("Generated Notes:")
178
+ st.markdown(notes)
179
+
180
+ # Download button for the generated notes
181
+ st.download_button(
182
+ label="Download Notes",
183
+ data=notes,
184
+ file_name=f"{topic.replace(' ', '_')}_notes.txt",
185
+ mime="text/plain"
186
+ )
187
+
188
+ # Save notes
189
+ if st.button("Save Notes"):
190
+ save_notes(notes, topic)
191
+ st.success("Notes saved successfully!")
192
+ except Exception as e:
193
+ st.error(f"An error occurred while generating notes: {str(e)}")
194
+ else:
195
+ st.warning("Please upload a document or enter text, and specify a topic before generating notes.")
196
+
197
+ # Display saved notes
198
+ st.sidebar.subheader("Saved Notes")
199
+ saved_notes = load_notes_data()
200
+ for i, note in enumerate(saved_notes):
201
+ if st.sidebar.button(f"{note['topic']} - {note['timestamp'][:10]}", key=f"saved_note_{i}"):
202
+ st.subheader(f"Saved Notes: {note['topic']}")
203
+ st.markdown(note['notes'])
204
+
205
+ st.sidebar.markdown("---")
206
+ st.sidebar.markdown("Powered by Falcon-180B and Streamlit")
207
+
208
+ # Add a footer
209
+ st.markdown("---")
210
+ st.markdown("Created by Your Team Name | Β© 2024")
211
+
212
+ if __name__ == "__main__":
213
+ main()
pages/research_paper_finder.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from datetime import datetime, timedelta
4
+ import pandas as pd
5
+ from dotenv import load_dotenv
6
+ import os
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Scopus API key
12
+ SCOPUS_API_KEY = "8e94f85eb6044ef1cde06e8b5a426a09"
13
+
14
+ def search_scopus(query, start_year, end_year, max_results=50):
15
+ base_url = "https://api.elsevier.com/content/search/scopus"
16
+
17
+ params = {
18
+ "query": query,
19
+ "date": f"{start_year}-{end_year}",
20
+ "count": max_results,
21
+ "sort": "citedby-count desc",
22
+ "field": "title,author,year,publicationName,description,citedby-count,doi,eid"
23
+ }
24
+
25
+ headers = {
26
+ "X-ELS-APIKey": SCOPUS_API_KEY,
27
+ "Accept": "application/json"
28
+ }
29
+
30
+ try:
31
+ response = requests.get(base_url, params=params, headers=headers)
32
+ response.raise_for_status()
33
+ return response.json()["search-results"]["entry"]
34
+ except requests.exceptions.RequestException as e:
35
+ st.error(f"An error occurred while searching Scopus: {e}")
36
+ return []
37
+
38
+ def format_authors(author_info):
39
+ if isinstance(author_info, list):
40
+ return ", ".join([author.get("authname", "") for author in author_info])
41
+ elif isinstance(author_info, dict):
42
+ return author_info.get("authname", "")
43
+ else:
44
+ return "N/A"
45
+
46
+ def safe_get(dictionary, keys, default="N/A"):
47
+ for key in keys:
48
+ if isinstance(dictionary, dict) and key in dictionary:
49
+ dictionary = dictionary[key]
50
+ else:
51
+ return default
52
+ return dictionary
53
+
54
+ def get_paper_link(paper):
55
+ doi = safe_get(paper, ["prism:doi"])
56
+ if doi != "N/A":
57
+ return f"https://doi.org/{doi}"
58
+ eid = safe_get(paper, ["eid"])
59
+ if eid != "N/A":
60
+ return f"https://www.scopus.com/record/display.uri?eid={eid}&origin=resultslist"
61
+ return "#"
62
+
63
+ def main():
64
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Research Assistant", page_icon="πŸ”¬", layout="wide")
65
+
66
+ st.sidebar.title("S.H.E.R.L.O.C.K.")
67
+ st.sidebar.markdown("""
68
+ **S**ystematic **H**olistic **E**ducational **R**esource for **L**iterature and **O**ptimizing **C**ognitive **K**nowledge
69
+
70
+ Enhance your research capabilities with AI-powered literature search and analysis.
71
+ """)
72
+
73
+ query = st.sidebar.text_input("What topic would you like to research?", "")
74
+
75
+ current_year = datetime.now().year
76
+ start_year, end_year = st.sidebar.slider(
77
+ "Publication Year Range",
78
+ min_value=1900,
79
+ max_value=current_year,
80
+ value=(current_year-5, current_year)
81
+ )
82
+
83
+ max_results = st.sidebar.slider("Maximum number of results", 10, 100, 50)
84
+
85
+ search_button = st.sidebar.button("Search for Research Papers")
86
+
87
+ st.title("Research Papers and Articles")
88
+
89
+ if search_button and query:
90
+ with st.spinner("Searching for the most relevant research papers..."):
91
+ results = search_scopus(query, start_year, end_year, max_results)
92
+
93
+ if results:
94
+ papers = []
95
+ for paper in results:
96
+ papers.append({
97
+ "Title": safe_get(paper, ["dc:title"]),
98
+ "Authors": format_authors(safe_get(paper, ["author"])),
99
+ "Year": safe_get(paper, ["prism:coverDate"])[:4],
100
+ "Journal": safe_get(paper, ["prism:publicationName"]),
101
+ "Abstract": safe_get(paper, ["dc:description"]),
102
+ "Citations": safe_get(paper, ["citedby-count"], "0"),
103
+ "Link": get_paper_link(paper)
104
+ })
105
+
106
+ df = pd.DataFrame(papers)
107
+
108
+ st.markdown(f"### Found {len(results)} papers on '{query}'")
109
+
110
+ for _, paper in df.iterrows():
111
+ with st.container():
112
+ col1, col2 = st.columns([3, 1])
113
+ with col1:
114
+ st.markdown(f"#### [{paper['Title']}]({paper['Link']})")
115
+ st.markdown(f"**Authors:** {paper['Authors']}")
116
+ st.markdown(f"**Published in:** {paper['Journal']} ({paper['Year']})")
117
+ st.markdown(f"**Abstract:** {paper['Abstract']}")
118
+ with col2:
119
+ st.metric("Citations", paper["Citations"])
120
+
121
+ st.markdown("---")
122
+
123
+ # Download results as CSV
124
+ csv = df.to_csv(index=False).encode('utf-8')
125
+ st.download_button(
126
+ label="Download results as CSV",
127
+ data=csv,
128
+ file_name=f"{query.replace(' ', '_')}_research_papers.csv",
129
+ mime="text/csv",
130
+ )
131
+ else:
132
+ st.warning("No results found. Please try a different search query or adjust the year range.")
133
+
134
+ if __name__ == "__main__":
135
+ main()
pages/resume_generator.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import base64
3
+ from io import BytesIO
4
+ from datetime import datetime
5
+ import json
6
+ from docx import Document
7
+ from docx.shared import Inches, Pt
8
+ from docx.enum.text import WD_ALIGN_PARAGRAPH
9
+ from reportlab.lib.pagesizes import letter
10
+ from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, Image
11
+ from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
12
+ from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
13
+ from reportlab.lib import colors
14
+ from langchain.chat_models import ChatOpenAI
15
+ from langchain.schema import HumanMessage
16
+ from PIL import Image as PILImage
17
+
18
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
19
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
20
+
21
+ def get_llm():
22
+ return ChatOpenAI(
23
+ model="tiiuae/falcon-180B-chat",
24
+ api_key=AI71_API_KEY,
25
+ base_url=AI71_BASE_URL,
26
+ streaming=True,
27
+ )
28
+
29
+ def generate_resume_content(resume_data):
30
+ llm = get_llm()
31
+
32
+ prompt = f"""
33
+ Generate a highly professional and ATS-optimized resume based on the following information:
34
+
35
+ Name: {resume_data['name']}
36
+ Email: {resume_data['email']}
37
+ Phone: {resume_data['phone']}
38
+ Location: {resume_data['location']}
39
+
40
+ Work Experience:
41
+ {json.dumps(resume_data['work_experience'], indent=2)}
42
+
43
+ Education:
44
+ {json.dumps(resume_data['education'], indent=2)}
45
+
46
+ Skills: {', '.join(resume_data['skills'])}
47
+
48
+ Please generate a compelling professional summary and enhance the job descriptions.
49
+ Use action verbs, quantify achievements where possible, and highlight key skills.
50
+ Ensure the content is tailored for ATS optimization.
51
+ The output should be in JSON format with the following structure:
52
+ {{
53
+ "summary": "Professional summary here",
54
+ "work_experience": [
55
+ {{
56
+ "title": "Job title",
57
+ "company": "Company name",
58
+ "start_date": "Start date",
59
+ "end_date": "End date",
60
+ "description": "Enhanced job description with bullet points"
61
+ }}
62
+ ]
63
+ }}
64
+ """
65
+
66
+ try:
67
+ response = llm([HumanMessage(content=prompt)])
68
+ enhanced_content = json.loads(response.content)
69
+
70
+ resume_data['summary'] = enhanced_content['summary']
71
+ resume_data['work_experience'] = enhanced_content['work_experience']
72
+
73
+ return resume_data
74
+ except Exception as e:
75
+ st.error(f"An error occurred while generating AI content: {str(e)}")
76
+ return resume_data
77
+
78
+ def create_docx(resume_data):
79
+ doc = Document()
80
+
81
+ # Styles
82
+ styles = doc.styles
83
+ style = styles.add_style('Name', 1)
84
+ style.font.name = 'Calibri'
85
+ style.font.size = Pt(24)
86
+ style.font.bold = True
87
+ style.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
88
+
89
+ # Add photo if provided
90
+ if 'photo' in resume_data and resume_data['photo']:
91
+ image_stream = BytesIO(resume_data['photo'])
92
+ doc.add_picture(image_stream, width=Inches(2.0))
93
+
94
+ # Add name
95
+ doc.add_paragraph(resume_data['name'], style='Name')
96
+
97
+ # Add contact information
98
+ contact_info = doc.add_paragraph()
99
+ contact_info.alignment = WD_ALIGN_PARAGRAPH.CENTER
100
+ contact_info.add_run(f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}")
101
+
102
+ # Add summary
103
+ doc.add_heading('Professional Summary', level=1)
104
+ doc.add_paragraph(resume_data['summary'])
105
+
106
+ # Add work experience
107
+ doc.add_heading('Work Experience', level=1)
108
+ for job in resume_data['work_experience']:
109
+ p = doc.add_paragraph(f"{job['title']} at {job['company']}", style='Heading 2')
110
+ p.add_run(f"\n{job['start_date']} - {job['end_date']}")
111
+ for bullet in job['description'].split('\n'):
112
+ if bullet.strip():
113
+ doc.add_paragraph(bullet.strip(), style='List Bullet')
114
+
115
+ # Add education
116
+ doc.add_heading('Education', level=1)
117
+ for edu in resume_data['education']:
118
+ p = doc.add_paragraph(f"{edu['degree']} in {edu['field']}", style='Heading 2')
119
+ p.add_run(f"\n{edu['institution']}, {edu['graduation_date']}")
120
+
121
+ # Add skills
122
+ doc.add_heading('Skills', level=1)
123
+ doc.add_paragraph(', '.join(resume_data['skills']))
124
+
125
+ buffer = BytesIO()
126
+ doc.save(buffer)
127
+ buffer.seek(0)
128
+ return buffer
129
+
130
+ def create_pdf(resume_data):
131
+ buffer = BytesIO()
132
+ doc = SimpleDocTemplate(buffer, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18)
133
+
134
+ styles = getSampleStyleSheet()
135
+ styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
136
+ styles.add(ParagraphStyle(name='Center', alignment=TA_CENTER))
137
+
138
+ story = []
139
+
140
+ # Add photo if provided
141
+ if 'photo' in resume_data and resume_data['photo']:
142
+ image_stream = BytesIO(resume_data['photo'])
143
+ img = Image(image_stream, width=100, height=100)
144
+ story.append(img)
145
+
146
+ # Add name
147
+ story.append(Paragraph(resume_data['name'], styles['Title']))
148
+
149
+ # Add contact information
150
+ story.append(Paragraph(f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}", styles['Center']))
151
+ story.append(Spacer(1, 12))
152
+
153
+ # Add summary
154
+ story.append(Paragraph('Professional Summary', styles['Heading1']))
155
+ story.append(Paragraph(resume_data['summary'], styles['Justify']))
156
+ story.append(Spacer(1, 12))
157
+
158
+ # Add work experience
159
+ story.append(Paragraph('Work Experience', styles['Heading1']))
160
+ for job in resume_data['work_experience']:
161
+ story.append(Paragraph(f"{job['title']} at {job['company']}", styles['Heading2']))
162
+ story.append(Paragraph(f"{job['start_date']} - {job['end_date']}", styles['Normal']))
163
+ for bullet in job['description'].split('\n'):
164
+ if bullet.strip():
165
+ story.append(Paragraph(f"β€’ {bullet.strip()}", styles['Normal']))
166
+ story.append(Spacer(1, 12))
167
+
168
+ # Add education
169
+ story.append(Paragraph('Education', styles['Heading1']))
170
+ for edu in resume_data['education']:
171
+ story.append(Paragraph(f"{edu['degree']} in {edu['field']}", styles['Heading2']))
172
+ story.append(Paragraph(f"{edu['institution']}, {edu['graduation_date']}", styles['Normal']))
173
+ story.append(Spacer(1, 12))
174
+
175
+ # Add skills
176
+ story.append(Paragraph('Skills', styles['Heading1']))
177
+ story.append(Paragraph(', '.join(resume_data['skills']), styles['Normal']))
178
+
179
+ doc.build(story)
180
+ buffer.seek(0)
181
+ return buffer
182
+
183
+ def create_txt(resume_data):
184
+ txt_content = f"{resume_data['name']}\n"
185
+ txt_content += f"{resume_data['email']} | {resume_data['phone']} | {resume_data['location']}\n\n"
186
+
187
+ txt_content += "Professional Summary\n"
188
+ txt_content += f"{resume_data['summary']}\n\n"
189
+
190
+ txt_content += "Work Experience\n"
191
+ for job in resume_data['work_experience']:
192
+ txt_content += f"{job['title']} at {job['company']}\n"
193
+ txt_content += f"{job['start_date']} - {job['end_date']}\n"
194
+ for bullet in job['description'].split('\n'):
195
+ if bullet.strip():
196
+ txt_content += f"β€’ {bullet.strip()}\n"
197
+ txt_content += "\n"
198
+
199
+ txt_content += "Education\n"
200
+ for edu in resume_data['education']:
201
+ txt_content += f"{edu['degree']} in {edu['field']}\n"
202
+ txt_content += f"{edu['institution']}, {edu['graduation_date']}\n\n"
203
+
204
+ txt_content += "Skills\n"
205
+ txt_content += ', '.join(resume_data['skills'])
206
+
207
+ return txt_content.encode()
208
+
209
+ def calculate_ats_score(resume_data):
210
+ score = 0
211
+ max_score = 100
212
+
213
+ # Check for key sections
214
+ if resume_data['name']: score += 5
215
+ if resume_data['email']: score += 5
216
+ if resume_data['phone']: score += 5
217
+ if resume_data['location']: score += 5
218
+ if resume_data['summary']: score += 10
219
+ if resume_data['work_experience']: score += 20
220
+ if resume_data['education']: score += 15
221
+ if resume_data['skills']: score += 15
222
+
223
+ # Check content quality
224
+ if len(resume_data['summary'].split()) >= 50: score += 5
225
+ if len(resume_data['work_experience']) >= 2: score += 5
226
+ if len(resume_data['skills']) >= 5: score += 5
227
+
228
+ # Check for keywords (this is a simplified version, in reality, you'd want to check against job-specific keywords)
229
+ keywords = ['experience', 'skills', 'project', 'team', 'leadership', 'communication', 'achieved', 'improved', 'managed', 'developed']
230
+ resume_text = ' '.join([str(value) for value in resume_data.values() if isinstance(value, str)])
231
+ for keyword in keywords:
232
+ if keyword in resume_text.lower():
233
+ score += 1
234
+
235
+ return min(score, max_score)
236
+
237
+ def main():
238
+ st.set_page_config(page_title="AI-Enhanced Resume Builder", page_icon="πŸ“„", layout="wide")
239
+
240
+ st.markdown("""
241
+ <style>
242
+ .big-font {
243
+ font-size:30px !important;
244
+ font-weight: bold;
245
+ }
246
+ .stButton>button {
247
+ width: 100%;
248
+ }
249
+ </style>
250
+ """, unsafe_allow_html=True)
251
+
252
+ # Add sidebar
253
+ st.sidebar.title("About This Project")
254
+ st.sidebar.write("""
255
+ Welcome to the AI-Enhanced Resume Builder!
256
+
257
+ This project helps you create a professional, ATS-optimized resume with the power of AI. Here's what you can do:
258
+
259
+ 1. Input your personal information
260
+ 2. Add your work experience
261
+ 3. Include your education details
262
+ 4. List your skills
263
+ 5. Optionally upload a photo
264
+ 6. Generate AI-enhanced content
265
+ 7. Review and download your resume
266
+
267
+ The AI will help improve your resume content and provide an ATS compatibility score.
268
+
269
+ Get started by filling out the form and clicking 'Next' at each step!
270
+ """)
271
+
272
+ st.markdown('<p class="big-font">AI-Enhanced Resume Builder</p>', unsafe_allow_html=True)
273
+ st.write("Create a professional, ATS-optimized resume with AI-powered content enhancement")
274
+
275
+ # Initialize session state
276
+ if 'step' not in st.session_state:
277
+ st.session_state.step = 1
278
+
279
+ if 'resume_data' not in st.session_state:
280
+ st.session_state.resume_data = {
281
+ 'name': '', 'email': '', 'phone': '', 'location': '',
282
+ 'summary': '', 'work_experience': [], 'education': [], 'skills': [], 'photo': None
283
+ }
284
+
285
+ # Step 1: Personal Information
286
+ if st.session_state.step == 1:
287
+ st.subheader("Step 1: Personal Information")
288
+ name = st.text_input("Full Name", st.session_state.resume_data['name'])
289
+ email = st.text_input("Email", st.session_state.resume_data['email'])
290
+ phone = st.text_input("Phone", st.session_state.resume_data['phone'])
291
+ location = st.text_input("Location", st.session_state.resume_data['location'])
292
+
293
+ photo_upload = st.file_uploader("Upload a photo (optional)", type=['jpg', 'jpeg', 'png'])
294
+ if photo_upload:
295
+ image = PILImage.open(photo_upload)
296
+ st.image(image, caption='Uploaded Image', use_column_width=True)
297
+ buffered = BytesIO()
298
+ image.save(buffered, format="PNG")
299
+ st.session_state.resume_data['photo'] = buffered.getvalue()
300
+
301
+ if st.button("Next"):
302
+ if name and email and phone and location:
303
+ st.session_state.resume_data.update({
304
+ 'name': name,
305
+ 'email': email,
306
+ 'phone': phone,
307
+ 'location': location
308
+ })
309
+ st.session_state.step = 2
310
+ else:
311
+ st.error("Please fill in all required fields before proceeding.")
312
+
313
+ # Step 2: Work Experience
314
+ elif st.session_state.step == 2:
315
+ st.subheader("Step 2: Work Experience")
316
+ num_jobs = st.number_input("Number of jobs to add", min_value=1, max_value=10, value=len(st.session_state.resume_data['work_experience']) or 1)
317
+
318
+ work_experience = []
319
+ for i in range(num_jobs):
320
+ st.write(f"Job {i+1}")
321
+ job = {}
322
+ job['title'] = st.text_input(f"Job Title {i+1}", st.session_state.resume_data['work_experience'][i]['title'] if i < len(st.session_state.resume_data['work_experience']) else '')
323
+ job['company'] = st.text_input(f"Company {i+1}", st.session_state.resume_data['work_experience'][i]['company'] if i < len(st.session_state.resume_data['work_experience']) else '')
324
+ job['start_date'] = st.date_input(f"Start Date {i+1}", value=datetime.strptime(st.session_state.resume_data['work_experience'][i]['start_date'] if i < len(st.session_state.resume_data['work_experience']) else '2020-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
325
+ job['end_date'] = st.date_input(f"End Date {i+1}", value=datetime.strptime(st.session_state.resume_data['work_experience'][i]['end_date'] if i < len(st.session_state.resume_data['work_experience']) else '2023-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
326
+ job['description'] = st.text_area(f"Job Description {i+1}", st.session_state.resume_data['work_experience'][i]['description'] if i < len(st.session_state.resume_data['work_experience']) else '', height=100)
327
+ work_experience.append(job)
328
+
329
+ col1, col2 = st.columns(2)
330
+ if col1.button("Previous"):
331
+ st.session_state.step = 1
332
+ if col2.button("Next"):
333
+ if all(job['title'] and job['company'] and job['description'] for job in work_experience):
334
+ st.session_state.resume_data['work_experience'] = work_experience
335
+ st.session_state.step = 3
336
+ else:
337
+ st.error("Please fill in all required fields for each job before proceeding.")
338
+
339
+ # Step 3: Education
340
+ elif st.session_state.step == 3:
341
+ st.subheader("Step 3: Education")
342
+ num_edu = st.number_input("Number of education entries", min_value=1, max_value=5, value=len(st.session_state.resume_data['education']) or 1)
343
+
344
+ education = []
345
+ for i in range(num_edu):
346
+ st.write(f"Education {i+1}")
347
+ edu = {}
348
+ edu['degree'] = st.text_input(f"Degree {i+1}", st.session_state.resume_data['education'][i]['degree'] if i < len(st.session_state.resume_data['education']) else '')
349
+ edu['field'] = st.text_input(f"Field of Study {i+1}", st.session_state.resume_data['education'][i]['field'] if i < len(st.session_state.resume_data['education']) else '')
350
+ edu['institution'] = st.text_input(f"Institution {i+1}", st.session_state.resume_data['education'][i]['institution'] if i < len(st.session_state.resume_data['education']) else '')
351
+ edu['graduation_date'] = st.date_input(f"Graduation Date {i+1}", value=datetime.strptime(st.session_state.resume_data['education'][i]['graduation_date'] if i < len(st.session_state.resume_data['education']) else '2023-01-01', '%Y-%m-%d')).strftime('%Y-%m-%d')
352
+ education.append(edu)
353
+
354
+ col1, col2 = st.columns(2)
355
+ if col1.button("Previous"):
356
+ st.session_state.step = 2
357
+ if col2.button("Next"):
358
+ if all(edu['degree'] and edu['field'] and edu['institution'] for edu in education):
359
+ st.session_state.resume_data['education'] = education
360
+ st.session_state.step = 4
361
+ else:
362
+ st.error("Please fill in all required fields for each education entry before proceeding.")
363
+
364
+ # Step 4: Skills and Generation
365
+ elif st.session_state.step == 4:
366
+ st.subheader("Step 4: Skills and Resume Generation")
367
+ skills_input = st.text_input("Skills (comma-separated)", ', '.join(st.session_state.resume_data['skills']))
368
+
369
+ if st.button("Generate Resume"):
370
+ if skills_input.strip():
371
+ st.session_state.resume_data['skills'] = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
372
+ with st.spinner("Generating AI-enhanced resume content..."):
373
+ st.session_state.resume_data = generate_resume_content(st.session_state.resume_data)
374
+ st.session_state.step = 5
375
+ st.experimental_rerun()
376
+ else:
377
+ st.error("Please enter at least one skill before generating the resume.")
378
+
379
+ # Step 5: Review and Download
380
+ elif st.session_state.step == 5:
381
+ st.subheader("Generated Resume")
382
+
383
+ # Display resume content for review
384
+ st.write("### Personal Information")
385
+ st.write(f"**Name:** {st.session_state.resume_data['name']}")
386
+ st.write(f"**Email:** {st.session_state.resume_data['email']}")
387
+ st.write(f"**Phone:** {st.session_state.resume_data['phone']}")
388
+ st.write(f"**Location:** {st.session_state.resume_data['location']}")
389
+
390
+ if st.session_state.resume_data['photo']:
391
+ st.image(st.session_state.resume_data['photo'], caption='Your Photo', width=200)
392
+
393
+ st.write("### Professional Summary")
394
+ st.write(st.session_state.resume_data['summary'])
395
+
396
+ st.write("### Work Experience")
397
+ for job in st.session_state.resume_data['work_experience']:
398
+ st.write(f"**{job['title']} at {job['company']}**")
399
+ st.write(f"{job['start_date']} - {job['end_date']}")
400
+ st.write(job['description'])
401
+
402
+ st.write("### Education")
403
+ for edu in st.session_state.resume_data['education']:
404
+ st.write(f"**{edu['degree']} in {edu['field']}**")
405
+ st.write(f"{edu['institution']}, {edu['graduation_date']}")
406
+
407
+ st.write("### Skills")
408
+ st.write(', '.join(st.session_state.resume_data['skills']))
409
+
410
+ # Calculate and display ATS score
411
+ ats_score = calculate_ats_score(st.session_state.resume_data)
412
+ st.write(f"### ATS Compatibility Score: {ats_score}%")
413
+
414
+ # Download options
415
+ st.write("### Download Options")
416
+ col1, col2, col3 = st.columns(3)
417
+
418
+ docx_buffer = create_docx(st.session_state.resume_data)
419
+ col1.download_button(
420
+ label="Download as DOCX",
421
+ data=docx_buffer,
422
+ file_name="resume.docx",
423
+ mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
424
+ )
425
+
426
+ pdf_buffer = create_pdf(st.session_state.resume_data)
427
+ col2.download_button(
428
+ label="Download as PDF",
429
+ data=pdf_buffer,
430
+ file_name="resume.pdf",
431
+ mime="application/pdf"
432
+ )
433
+
434
+ txt_content = create_txt(st.session_state.resume_data)
435
+ col3.download_button(
436
+ label="Download as TXT",
437
+ data=txt_content,
438
+ file_name="resume.txt",
439
+ mime="text/plain"
440
+ )
441
+
442
+ if st.button("Edit Resume"):
443
+ st.session_state.step = 1
444
+
445
+ if st.button("Start Over"):
446
+ st.session_state.step = 1
447
+ st.session_state.resume_data = {
448
+ 'name': '', 'email': '', 'phone': '', 'location': '',
449
+ 'summary': '', 'work_experience': [], 'education': [], 'skills': [], 'photo': None
450
+ }
451
+ st.experimental_rerun()
452
+
453
+ if __name__ == "__main__":
454
+ main()
pages/sherlock_observation.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ from langchain.schema import HumanMessage, SystemMessage
5
+ from langchain_community.document_loaders import PyPDFLoader, TextLoader
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+ from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.chains import RetrievalQA
10
+ import os
11
+ from dotenv import load_dotenv
12
+ import tempfile
13
+
14
+ # Load environment variables
15
+ load_dotenv()
16
+
17
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
18
+ AI71_API_KEY = os.getenv('AI71_API_KEY')
19
+
20
+ # Initialize the Falcon model
21
+ chat = ChatOpenAI(
22
+ model="tiiuae/falcon-180B-chat",
23
+ api_key=AI71_API_KEY,
24
+ base_url=AI71_BASE_URL,
25
+ streaming=True,
26
+ )
27
+
28
+ # Initialize embeddings
29
+ embeddings = HuggingFaceEmbeddings()
30
+
31
+ # Expanded list of predefined topics
32
+ PREDEFINED_TOPICS = [
33
+ "Quantum Computing", "Artificial Intelligence Ethics", "Blockchain Technology",
34
+ "Neuroscience", "Climate Change Mitigation", "Space Exploration",
35
+ "Renewable Energy", "Genetic Engineering", "Cybersecurity",
36
+ "Machine Learning", "Nanotechnology", "Robotics",
37
+ "Virtual Reality", "Augmented Reality", "Internet of Things",
38
+ "5G Technology", "Autonomous Vehicles", "Bioinformatics",
39
+ "Cloud Computing", "Data Science", "Artificial General Intelligence",
40
+ "Quantum Cryptography", "3D Printing", "Smart Cities",
41
+ "Biotechnology", "Fusion Energy", "Sustainable Agriculture",
42
+ "Space Tourism", "Quantum Sensors", "Brain-Computer Interfaces",
43
+ "Personalized Medicine", "Synthetic Biology", "Exoplanets",
44
+ "Dark Matter", "CRISPR Technology", "Quantum Internet",
45
+ "Deep Learning", "Edge Computing", "Humanoid Robots",
46
+ "Drone Technology", "Quantum Supremacy", "Neuromorphic Computing",
47
+ "Asteroid Mining", "Bionic Implants", "Smart Materials",
48
+ "Quantum Dots", "Lab-grown Meat", "Vertical Farming",
49
+ "Hyperloop Transportation", "Molecular Nanotechnology", "Quantum Metrology",
50
+ "Artificial Photosynthesis", "Cognitive Computing", "Swarm Robotics",
51
+ "Metamaterials", "Neuroplasticity", "Quantum Machine Learning",
52
+ "Green Hydrogen", "Organ-on-a-Chip", "Bioprinting",
53
+ "Plasma Physics", "Quantum Simulation", "Soft Robotics",
54
+ "Geoengineering", "Exoskeletons", "Programmable Matter",
55
+ "Graphene Applications", "Quantum Sensing", "Neuralink",
56
+ "Holographic Displays", "Quantum Error Correction", "Synthetic Genomes",
57
+ "Carbon Capture and Storage", "Quantum Memory", "Organoids",
58
+ "Artificial Synapses", "Quantum Imaging", "Biosensors",
59
+ "Memristors", "Quantum Annealing", "DNA Data Storage",
60
+ "Cultured Meat", "Quantum Radar", "Neuromorphic Hardware",
61
+ "Quantum Entanglement", "Phytomining", "Biohacking",
62
+ "Topological Quantum Computing", "Neuroprosthetics", "Optogenetics",
63
+ "Quantum Gravity", "Molecular Machines", "Biomimicry",
64
+ "Quantum Teleportation", "Neurogenesis", "Bioelectronics",
65
+ "Quantum Tunneling", "Tissue Engineering", "Bioremediation",
66
+ "Quantum Photonics", "Synthetic Neurobiology", "Nanomedicine",
67
+ "Quantum Biology", "Biogeochemistry", "Molecular Gastronomy",
68
+ "Quantum Thermodynamics", "Nutrigenomics", "Biomechatronics",
69
+ "Quantum Chemistry", "Psychoneuroimmunology", "Nanophotonics",
70
+ "Quantum Optics", "Neuroeconomics", "Bionanotechnology"
71
+ ]
72
+
73
+ def process_document(file):
74
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.name)[1]) as temp_file:
75
+ temp_file.write(file.getvalue())
76
+ temp_file_path = temp_file.name
77
+
78
+ if file.name.endswith('.pdf'):
79
+ loader = PyPDFLoader(temp_file_path)
80
+ else:
81
+ loader = TextLoader(temp_file_path)
82
+
83
+ documents = loader.load()
84
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
85
+ texts = text_splitter.split_documents(documents)
86
+
87
+ vectorstore = FAISS.from_documents(texts, embeddings)
88
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
89
+
90
+ qa_chain = RetrievalQA.from_chain_type(
91
+ llm=chat,
92
+ chain_type="stuff",
93
+ retriever=retriever,
94
+ return_source_documents=True
95
+ )
96
+
97
+ os.unlink(temp_file_path)
98
+ return qa_chain
99
+
100
+ def get_sherlock_analysis(topic, qa_chain=None):
101
+ system_prompt = """
102
+ You are Sherlock Holmes, the world's greatest detective and master of observation and deduction.
103
+ Your task is to provide an in-depth analysis of the given topic, offering unique insights on how to approach learning it from the ground up.
104
+ Your analysis should:
105
+ 1. Break down the topic into its fundamental components.
106
+ 2. Identify key concepts and their relationships.
107
+ 3. Suggest a structured approach to learning, starting from first principles.
108
+ 4. Highlight potential challenges and how to overcome them.
109
+ 5. Provide a unique point of view that encourages critical thinking.
110
+ Your response should be detailed, insightful, and encourage a deep understanding of the subject.
111
+ """
112
+
113
+ if qa_chain:
114
+ result = qa_chain({"query": f"Provide a Sherlock Holmes style analysis of the topic: {topic}"})
115
+ response = result['result']
116
+ else:
117
+ messages = [
118
+ SystemMessage(content=system_prompt),
119
+ HumanMessage(content=f"Analyze the following topic: {topic}")
120
+ ]
121
+ response = chat.invoke(messages).content
122
+
123
+ return response
124
+
125
+ def chunk_text(text, max_chunk_size=4000):
126
+ chunks = []
127
+ current_chunk = ""
128
+ for sentence in text.split(". "):
129
+ if len(current_chunk) + len(sentence) < max_chunk_size:
130
+ current_chunk += sentence + ". "
131
+ else:
132
+ chunks.append(current_chunk)
133
+ current_chunk = sentence + ". "
134
+ if current_chunk:
135
+ chunks.append(current_chunk)
136
+ return chunks
137
+
138
+ def main():
139
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Observation", page_icon="πŸ”", layout="wide")
140
+
141
+ st.title("πŸ•΅οΈ S.H.E.R.L.O.C.K. Observation")
142
+ st.markdown("*Uncover the depths of any subject with the keen insight of Sherlock Holmes*")
143
+
144
+ col1, col2 = st.columns([2, 1])
145
+
146
+ with col2:
147
+ st.subheader("Choose Your Method")
148
+ method = st.radio("Select input method:", ["Enter Topic", "Upload Document", "Choose from List"])
149
+
150
+ if method == "Enter Topic":
151
+ topic = st.text_input("Enter your topic of interest:")
152
+ elif method == "Upload Document":
153
+ uploaded_file = st.file_uploader("Upload a document (PDF or TXT)", type=["pdf", "txt"])
154
+ if uploaded_file:
155
+ topic = uploaded_file.name
156
+ else:
157
+ topic = st.selectbox("Choose a topic:", PREDEFINED_TOPICS)
158
+
159
+ if st.button("Analyze", key="analyze_button"):
160
+ if method == "Upload Document" and uploaded_file:
161
+ qa_chain = process_document(uploaded_file)
162
+ analysis = get_sherlock_analysis(topic, qa_chain)
163
+ elif topic:
164
+ analysis = get_sherlock_analysis(topic)
165
+ else:
166
+ st.warning("Please provide a topic or upload a document.")
167
+ return
168
+
169
+ col1.markdown("## Sherlock's Analysis")
170
+ chunks = chunk_text(analysis)
171
+ for chunk in chunks:
172
+ col1.markdown(chunk)
173
+
174
+ st.sidebar.image("https://upload.wikimedia.org/wikipedia/commons/c/cd/Sherlock_Holmes_Portrait_Paget.jpg", use_column_width=True)
175
+ st.sidebar.title("About S.H.E.R.L.O.C.K. Observation")
176
+ st.sidebar.markdown("""
177
+ S.H.E.R.L.O.C.K. Observation is your personal detective for any subject.
178
+ It provides:
179
+ - In-depth analysis of topics
180
+ - Unique perspectives on learning approaches
181
+ - First principles breakdown of subjects
182
+ - Critical thinking encouragement
183
+
184
+ Let Sherlock guide you through the intricacies of any field of study!
185
+ """)
186
+
187
+ st.sidebar.markdown("---")
188
+ st.sidebar.markdown("Powered by Falcon-180B and Streamlit")
189
+
190
+ if __name__ == "__main__":
191
+ main()
pages/study_roadmap.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import networkx as nx
3
+ import plotly.graph_objects as go
4
+ from dotenv import load_dotenv
5
+ from langchain.chat_models import ChatOpenAI
6
+ from langchain.prompts import ChatPromptTemplate
7
+ from langchain.output_parsers import PydanticOutputParser
8
+ from pydantic import BaseModel, Field
9
+ from typing import List, Dict, Optional
10
+ import json
11
+ import pandas as pd
12
+ import time
13
+ from datetime import datetime
14
+ import random
15
+ import re
16
+ from PIL import Image
17
+ import logging
18
+
19
+ # Set up logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Load environment variables
24
+ load_dotenv()
25
+
26
+ AI71_BASE_URL = "https://api.ai71.ai/v1/"
27
+ AI71_API_KEY = "api71-api-92fc2ef9-9f3c-47e5-a019-18e257b04af2"
28
+
29
+ # Initialize the Falcon model
30
+ chat = ChatOpenAI(
31
+ model="tiiuae/falcon-180B-chat",
32
+ api_key=AI71_API_KEY,
33
+ base_url=AI71_BASE_URL,
34
+ temperature=0.7,
35
+ )
36
+
37
+ class RoadmapStep(BaseModel):
38
+ title: str
39
+ description: str
40
+ resources: List[Dict[str, str]] = Field(default_factory=list)
41
+ estimated_time: str
42
+ how_to_use: Optional[str] = None
43
+
44
+ class Roadmap(BaseModel):
45
+ steps: Dict[str, RoadmapStep] = Field(default_factory=dict)
46
+
47
+ def clean_json(content):
48
+ # Remove any leading or trailing whitespace
49
+ content = content.strip()
50
+
51
+ # Ensure the content starts and ends with curly braces
52
+ if not content.startswith('{'):
53
+ content = '{' + content
54
+ if not content.endswith('}'):
55
+ content = content + '}'
56
+
57
+ # Remove any newline characters and extra spaces
58
+ content = ' '.join(content.split())
59
+
60
+ # Escape any unescaped double quotes within string values
61
+ content = re.sub(r'(?<!\\)"(?=(?:(?:[^"]*"){2})*[^"]*$)', r'\"', content)
62
+
63
+ return content
64
+
65
+ def ensure_valid_json(content):
66
+ # First, apply our existing cleaning function
67
+ content = clean_json(content)
68
+
69
+ # Use regex to find and fix unquoted property names
70
+ pattern = r'(\{|\,)\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*:'
71
+ content = re.sub(pattern, r'\1 "\2":', content)
72
+
73
+ # Replace single quotes with double quotes
74
+ content = content.replace("'", '"')
75
+
76
+ # Attempt to parse the JSON to catch any remaining issues
77
+ try:
78
+ json_obj = json.loads(content)
79
+ return json.dumps(json_obj) # Return a properly formatted JSON string
80
+ except json.JSONDecodeError as e:
81
+ # If we still can't parse it, log the error and return None
82
+ logger.error(f"Failed to parse JSON after cleaning: {str(e)}")
83
+ logger.debug(f"Problematic JSON: {content}")
84
+ return None
85
+
86
+ def generate_roadmap(topic):
87
+ levels = [
88
+ "knowledge",
89
+ "comprehension",
90
+ "application",
91
+ "analysis",
92
+ "synthesis",
93
+ "evaluation"
94
+ ]
95
+
96
+ roadmap = Roadmap()
97
+
98
+ for level in levels:
99
+ try:
100
+ logger.info(f"Generating roadmap step for topic: {topic} at {level} level")
101
+ step = generate_simplified_step(topic, level, chat)
102
+ roadmap.steps[level] = step
103
+ logger.info(f"Added step for {level} level")
104
+
105
+ except Exception as e:
106
+ logger.error(f"Error in generate_roadmap for {level}: {str(e)}")
107
+ step = create_fallback_step(topic, level, chat)
108
+ roadmap.steps[level] = step
109
+
110
+ logger.info("Roadmap generation complete")
111
+ return roadmap
112
+
113
+ def generate_diverse_resources(topic, level):
114
+ encoded_topic = topic.replace(' ', '+')
115
+ encoded_level = level.replace(' ', '+')
116
+
117
+ resource_templates = [
118
+ {"title": "Wikipedia", "url": f"https://en.wikipedia.org/wiki/{topic.replace(' ', '_')}"},
119
+ {"title": "YouTube Overview", "url": f"https://www.youtube.com/results?search_query={encoded_topic}+{encoded_level}"},
120
+ {"title": "Coursera Courses", "url": f"https://www.coursera.org/search?query={encoded_topic}"},
121
+ {"title": "edX Courses", "url": f"https://www.edx.org/search?q={encoded_topic}"},
122
+ {"title": "Brilliant", "url": f"https://brilliant.org/search/?q={encoded_topic}"},
123
+ {"title": "Google Scholar", "url": f"https://scholar.google.com/scholar?q={encoded_topic}"},
124
+ {"title": "MIT OpenCourseWare", "url": f"https://ocw.mit.edu/search/?q={encoded_topic}"},
125
+ {"title": "Khan Academy", "url": f"https://www.khanacademy.org/search?query={encoded_topic}"},
126
+ {"title": "TED Talks", "url": f"https://www.ted.com/search?q={encoded_topic}"},
127
+ {"title": "arXiv Papers", "url": f"https://arxiv.org/search/?query={encoded_topic}&searchtype=all"},
128
+ {"title": "ResearchGate", "url": f"https://www.researchgate.net/search/publication?q={encoded_topic}"},
129
+ {"title": "Academic Earth", "url": f"https://academicearth.org/search/?q={encoded_topic}"},
130
+ ]
131
+
132
+ # Randomly select 5-7 resources
133
+ num_resources = random.randint(5, 7)
134
+ selected_resources = random.sample(resource_templates, num_resources)
135
+
136
+ return selected_resources
137
+
138
+ def create_fallback_step(topic, level, chat):
139
+ def generate_component(prompt, default_value):
140
+ try:
141
+ response = chat.invoke([{"role": "system", "content": prompt}])
142
+ return response.content.strip() or default_value
143
+ except Exception as e:
144
+ logger.error(f"Error generating component: {str(e)}")
145
+ return default_value
146
+
147
+ # Generate title
148
+ title_prompt = f"Create a concise title (max 10 words) for a study step about {topic} at the {level} level of Bloom's Taxonomy."
149
+ default_title = f"{level.capitalize()} Step for {topic}"
150
+ title = generate_component(title_prompt, default_title)
151
+
152
+ # Generate description
153
+ description_prompt = f"""Write a detailed description (500-700 words) for a study step about {topic} at the {level} level of Bloom's Taxonomy.
154
+ Explain what this step entails, how the user should approach it, and why it's important for mastering the topic at this level.
155
+ The description should be specific to {topic} and not a generic explanation of the Bloom's Taxonomy level."""
156
+ default_description = f"In this step, you will focus on {topic} at the {level} level. This involves understanding key concepts and theories related to {topic}. Engage with the provided resources to build a strong foundation."
157
+ description = generate_component(description_prompt, default_description)
158
+
159
+ # Generate estimated time
160
+ time_prompt = f"Estimate the time needed to complete a study step about {topic} at the {level} level of Bloom's Taxonomy. Provide the answer in a format like '3-4 days' or '1-2 weeks'."
161
+ default_time = "3-4 days"
162
+ estimated_time = generate_component(time_prompt, default_time)
163
+
164
+ # Generate how to use
165
+ how_to_use_prompt = f"""Write a paragraph (100-150 words) on how to effectively use the {level} level of Bloom's Taxonomy when studying {topic}.
166
+ Include tips and strategies specific to {topic} at this {level} level."""
167
+ default_how_to_use = f"Explore the provided resources and take notes on key concepts related to {topic}. Practice explaining these concepts in your own words to reinforce your understanding at the {level} level."
168
+ how_to_use = generate_component(how_to_use_prompt, default_how_to_use)
169
+
170
+ return RoadmapStep(
171
+ title=title,
172
+ description=description,
173
+ resources=generate_diverse_resources(topic, level),
174
+ estimated_time=estimated_time,
175
+ how_to_use=how_to_use
176
+ )
177
+
178
+ def create_interactive_graph(roadmap):
179
+ G = nx.DiGraph()
180
+ color_map = {
181
+ 'Knowledge': '#FF6B6B',
182
+ 'Comprehension': '#4ECDC4',
183
+ 'Application': '#45B7D1',
184
+ 'Analysis': '#FFA07A',
185
+ 'Synthesis': '#98D8C8',
186
+ 'Evaluation': '#F9D56E'
187
+ }
188
+
189
+ for i, (level, step) in enumerate(roadmap.steps.items()):
190
+ G.add_node(step.title, level=level.capitalize(), pos=(i, -i))
191
+
192
+ pos = nx.get_node_attributes(G, 'pos')
193
+
194
+ edge_trace = go.Scatter(
195
+ x=[], y=[],
196
+ line=dict(width=2, color='#888'),
197
+ hoverinfo='none',
198
+ mode='lines')
199
+
200
+ node_trace = go.Scatter(
201
+ x=[], y=[],
202
+ mode='markers+text',
203
+ hoverinfo='text',
204
+ marker=dict(
205
+ showscale=False,
206
+ color=[],
207
+ size=30,
208
+ line_width=2
209
+ ),
210
+ text=[],
211
+ textposition="top center"
212
+ )
213
+
214
+ for node in G.nodes():
215
+ x, y = pos[node]
216
+ node_trace['x'] += (x,)
217
+ node_trace['y'] += (y,)
218
+ node_info = f"{node}<br>Level: {G.nodes[node]['level']}"
219
+ node_trace['text'] += (node_info,)
220
+ node_trace['marker']['color'] += (color_map.get(G.nodes[node]['level'], '#CCCCCC'),)
221
+
222
+ fig = go.Figure(data=[edge_trace, node_trace],
223
+ layout=go.Layout(
224
+ title='Interactive Study Roadmap',
225
+ titlefont_size=16,
226
+ showlegend=False,
227
+ hovermode='closest',
228
+ margin=dict(b=20,l=5,r=5,t=40),
229
+ annotations=[dict(
230
+ text="",
231
+ showarrow=False,
232
+ xref="paper", yref="paper",
233
+ x=0.005, y=-0.002
234
+ )],
235
+ xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
236
+ yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
237
+ plot_bgcolor='rgba(0,0,0,0)',
238
+ paper_bgcolor='rgba(0,0,0,0)'
239
+ ))
240
+
241
+ # Add a color legend
242
+ for level, color in color_map.items():
243
+ fig.add_trace(go.Scatter(
244
+ x=[None], y=[None],
245
+ mode='markers',
246
+ marker=dict(size=10, color=color),
247
+ showlegend=True,
248
+ name=level
249
+ ))
250
+
251
+ fig.update_layout(legend=dict(
252
+ orientation="h",
253
+ yanchor="bottom",
254
+ y=1.02,
255
+ xanchor="right",
256
+ x=1
257
+ ))
258
+
259
+ return fig
260
+
261
+ def get_user_progress(roadmap):
262
+ if 'user_progress' not in st.session_state:
263
+ st.session_state.user_progress = {}
264
+
265
+ for level, step in roadmap.steps.items():
266
+ if step.title not in st.session_state.user_progress:
267
+ st.session_state.user_progress[step.title] = 0
268
+
269
+ return st.session_state.user_progress
270
+
271
+ def update_user_progress(step_title, progress):
272
+ st.session_state.user_progress[step_title] = progress
273
+
274
+ def calculate_overall_progress(progress_dict):
275
+ if not progress_dict:
276
+ return 0
277
+ total_steps = len(progress_dict)
278
+ completed_steps = sum(1 for progress in progress_dict.values() if progress == 100)
279
+ return (completed_steps / total_steps) * 100
280
+
281
+ def generate_simplified_step(topic, level, chat):
282
+ prompt = f"""Create a detailed study step for the topic: {topic} at the {level} level of Bloom's Taxonomy.
283
+
284
+ Provide:
285
+ 1. A descriptive title (max 10 words)
286
+ 2. A detailed description (500-700 words) explaining what this step entails, how the user should approach it, and why it's important for mastering the topic at this level. The description should be specific to {topic} and not a generic explanation of the Bloom's Taxonomy level.
287
+ 3. Estimated time for completion (e.g., 3-4 days, 1-2 weeks, etc.)
288
+ 4. A paragraph (100-150 words) on how to use this level effectively, including tips and strategies specific to {topic} at this {level} level
289
+
290
+ Format your response as a valid JSON object with the following structure:
291
+ {{
292
+ "title": "Step title",
293
+ "description": "Step description",
294
+ "estimated_time": "Estimated time",
295
+ "how_to_use": "Paragraph on how to use this level effectively"
296
+ }}
297
+ """
298
+
299
+ try:
300
+ response = chat.invoke([{"role": "system", "content": prompt}])
301
+ valid_json = ensure_valid_json(response.content)
302
+ if valid_json is None:
303
+ raise ValueError("Failed to create valid JSON")
304
+
305
+ step_dict = json.loads(valid_json)
306
+
307
+ # Generate diverse resources
308
+ resources = generate_diverse_resources(topic, level)
309
+
310
+ return RoadmapStep(
311
+ title=step_dict["title"],
312
+ description=step_dict["description"],
313
+ resources=resources,
314
+ estimated_time=step_dict["estimated_time"],
315
+ how_to_use=step_dict["how_to_use"]
316
+ )
317
+ except Exception as e:
318
+ logger.error(f"Error in generate_simplified_step for {level}: {str(e)}")
319
+ return create_fallback_step(topic, level, chat)
320
+
321
+
322
+
323
+ def display_step(step, level, user_progress):
324
+ with st.expander(f"{level.capitalize()}: {step.title}"):
325
+ st.write(f"**Description:** {step.description}")
326
+ st.write(f"**Estimated Time:** {step.estimated_time}")
327
+ st.write("**Resources:**")
328
+ for resource in step.resources:
329
+ st.markdown(f"- [{resource['title']}]({resource['url']})")
330
+ if 'contribution' in resource:
331
+ st.write(f" *{resource['contribution']}*")
332
+
333
+ # Check if how_to_use exists before displaying it
334
+ if step.how_to_use:
335
+ st.write("**How to use this level effectively:**")
336
+ st.write(step.how_to_use)
337
+
338
+ progress = st.slider(f"Progress for {step.title}", 0, 100, user_progress.get(step.title, 0), key=f"progress_{level}")
339
+ update_user_progress(step.title, progress)
340
+
341
+ def main():
342
+ st.set_page_config(page_title="S.H.E.R.L.O.C.K. Study Roadmap Generator", layout="wide")
343
+
344
+ # Custom CSS for dark theme
345
+ st.markdown("""
346
+ <style>
347
+ .stApp {
348
+ background-color: #1E1E1E;
349
+ color: #FFFFFF;
350
+ }
351
+ .stButton>button {
352
+ background-color: #4CAF50;
353
+ color: white;
354
+ border-radius: 5px;
355
+ }
356
+ .stProgress > div > div > div > div {
357
+ background-color: #4CAF50;
358
+ }
359
+ .streamlit-expanderHeader {
360
+ background-color: #2E2E2E;
361
+ color: #FFFFFF;
362
+ }
363
+ .streamlit-expanderContent {
364
+ background-color: #2E2E2E;
365
+ color: #FFFFFF;
366
+ }
367
+ </style>
368
+ """, unsafe_allow_html=True)
369
+
370
+ st.title("🧠 S.H.E.R.L.O.C.K. Study Roadmap Generator")
371
+ st.write("Generate a comprehensive study roadmap based on first principles for any topic.")
372
+
373
+ # Sidebar
374
+ with st.sidebar:
375
+ st.image("https://placekitten.com/300/200", caption="S.H.E.R.L.O.C.K.", use_column_width=True)
376
+ st.markdown("""
377
+ ## About S.H.E.R.L.O.C.K.
378
+ **S**tudy **H**elper for **E**fficient **R**oadmaps and **L**earning **O**ptimization using **C**omprehensive **K**nowledge
379
+
380
+ S.H.E.R.L.O.C.K. is your AI-powered study companion, designed to create personalized learning roadmaps for any topic. It breaks down complex subjects into manageable steps, ensuring a comprehensive understanding from fundamentals to advanced concepts.
381
+ """)
382
+
383
+ st.subheader("πŸ“‹ Todo List")
384
+ if 'todos' not in st.session_state:
385
+ st.session_state.todos = []
386
+
387
+ new_todo = st.text_input("Add a new todo:")
388
+ if st.button("Add Todo", key="add_todo"):
389
+ if new_todo:
390
+ st.session_state.todos.append({"task": new_todo, "completed": False})
391
+ st.success("Todo added successfully!")
392
+ else:
393
+ st.warning("Please enter a todo item.")
394
+
395
+ for i, todo in enumerate(st.session_state.todos):
396
+ col1, col2, col3 = st.columns([0.05, 0.8, 0.15])
397
+ with col1:
398
+ todo['completed'] = st.checkbox("", todo['completed'], key=f"todo_{i}")
399
+ with col2:
400
+ st.write(todo['task'], key=f"todo_text_{i}")
401
+ with col3:
402
+ if st.button("πŸ—‘οΈ", key=f"delete_{i}", help="Delete todo"):
403
+ st.session_state.todos.pop(i)
404
+ st.experimental_rerun()
405
+
406
+ st.subheader("⏱️ Pomodoro Timer")
407
+ pomodoro_duration = st.slider("Pomodoro Duration (minutes)", 1, 60, 25)
408
+ if st.button("Start Pomodoro"):
409
+ progress_bar = st.progress(0)
410
+ for i in range(pomodoro_duration * 60):
411
+ time.sleep(1)
412
+ progress_bar.progress((i + 1) / (pomodoro_duration * 60))
413
+ st.success("Pomodoro completed!")
414
+ if 'achievements' not in st.session_state:
415
+ st.session_state.achievements = set()
416
+ st.session_state.achievements.add("Consistent Learner")
417
+
418
+ topic = st.text_input("πŸ“š Enter the topic you want to master:")
419
+
420
+ if st.button("πŸš€ Generate Roadmap"):
421
+ if topic:
422
+ with st.spinner("🧠 Generating your personalized study roadmap..."):
423
+ try:
424
+ logger.info(f"Starting roadmap generation for topic: {topic}")
425
+ roadmap = generate_roadmap(topic)
426
+ if roadmap and roadmap.steps:
427
+ logger.info("Roadmap generated successfully")
428
+ st.session_state.current_roadmap = roadmap
429
+ st.session_state.current_topic = topic
430
+ st.success("Roadmap generated successfully!")
431
+ else:
432
+ logger.warning("Generated roadmap is empty or invalid")
433
+ st.error("Failed to generate a valid roadmap. Please try again with a different topic.")
434
+ except Exception as e:
435
+ logger.error(f"Error during roadmap generation: {str(e)}", exc_info=True)
436
+ st.error(f"An error occurred while generating the roadmap: {str(e)}")
437
+
438
+ if 'current_roadmap' in st.session_state:
439
+ st.subheader(f"πŸ“Š Study Roadmap for: {st.session_state.current_topic}")
440
+
441
+ roadmap = st.session_state.current_roadmap
442
+ fig = create_interactive_graph(roadmap)
443
+ fig.update_layout(
444
+ plot_bgcolor='rgba(0,0,0,0)',
445
+ paper_bgcolor='rgba(0,0,0,0)',
446
+ font_color='#FFFFFF'
447
+ )
448
+ st.plotly_chart(fig, use_container_width=True)
449
+
450
+ user_progress = get_user_progress(roadmap)
451
+
452
+ levels_description = {
453
+ "knowledge": "Understanding and remembering basic facts and concepts",
454
+ "comprehension": "Grasping the meaning and interpreting information",
455
+ "application": "Using knowledge in new situations",
456
+ "analysis": "Breaking information into parts and examining relationships",
457
+ "synthesis": "Combining elements to form a new whole",
458
+ "evaluation": "Making judgments based on criteria and standards"
459
+ }
460
+
461
+ for level, step in roadmap.steps.items():
462
+ st.header(f"{level.capitalize()} Level")
463
+ st.write(f"**Description:** {levels_description[level]}")
464
+ st.write("**How to master this level:**")
465
+ st.write(f"To master the {level} level, focus on {levels_description[level].lower()}. Engage with the resources provided, practice applying the concepts, and gradually build your understanding. Remember that mastery at this level is crucial before moving to the next.")
466
+ display_step(step, level, user_progress)
467
+
468
+ overall_progress = calculate_overall_progress(user_progress)
469
+ st.progress(overall_progress / 100)
470
+ st.write(f"Overall progress: {overall_progress:.2f}%")
471
+
472
+ roadmap_json = json.dumps(roadmap.dict(), indent=2)
473
+ st.download_button(
474
+ label="πŸ“₯ Download Roadmap as JSON",
475
+ data=roadmap_json,
476
+ file_name="study_roadmap.json",
477
+ mime="application/json"
478
+ )
479
+
480
+ if __name__ == "__main__":
481
+ main()