AdithyaSNair commited on
Commit
cae8015
·
verified ·
1 Parent(s): f78a406

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -107
app.py CHANGED
@@ -1,11 +1,11 @@
 
 
1
  import streamlit as st
2
  from streamlit_option_menu import option_menu
3
  from langchain_groq import ChatGroq
4
- from langchain_core.prompts import PromptTemplate
5
  import fitz # PyMuPDF
6
  import requests
7
  from bs4 import BeautifulSoup
8
- import uuid
9
  import plotly.express as px
10
  import re
11
  import pandas as pd
@@ -22,7 +22,6 @@ llm = ChatGroq(
22
  model_name="llama-3.1-70b-versatile"
23
  )
24
 
25
-
26
  def extract_text_from_pdf(pdf_file):
27
  """
28
  Extracts text from an uploaded PDF file.
@@ -48,6 +47,7 @@ def extract_job_description(job_link):
48
  response = requests.get(job_link, headers=headers)
49
  response.raise_for_status()
50
  soup = BeautifulSoup(response.text, 'html.parser')
 
51
  job_description = soup.get_text(separator='\n')
52
  return job_description.strip()
53
  except Exception as e:
@@ -58,7 +58,7 @@ def extract_requirements(job_description):
58
  """
59
  Uses Groq to extract job requirements from the job description.
60
  """
61
- prompt_text = f"""
62
  The following is a job description:
63
 
64
  {job_description}
@@ -68,18 +68,19 @@ def extract_requirements(job_description):
68
  Requirements:
69
  """
70
 
71
- prompt = PromptTemplate.from_template(prompt_text)
72
- chain = prompt | llm
73
- response = chain.invoke(prompt) # Changed from dict to string
74
-
75
- requirements = response.content.strip()
76
- return requirements
 
77
 
78
  def generate_email(job_description, requirements, resume_text):
79
  """
80
  Generates a personalized cold email using Groq based on the job description, requirements, and resume.
81
  """
82
- prompt_text = f"""
83
  You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Craft a concise and professional cold email to a potential employer based on the following information:
84
 
85
  **Job Description:**
@@ -100,18 +101,19 @@ def generate_email(job_description, requirements, resume_text):
100
  **Email:**
101
  """
102
 
103
- prompt = PromptTemplate.from_template(prompt_text)
104
- chain = prompt | llm
105
- response = chain.invoke(prompt) # Changed from dict to string
106
-
107
- email_text = response.content.strip()
108
- return email_text
 
109
 
110
  def generate_cover_letter(job_description, requirements, resume_text):
111
  """
112
  Generates a personalized cover letter using Groq based on the job description, requirements, and resume.
113
  """
114
- prompt_text = f"""
115
  You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Compose a personalized and professional cover letter based on the following information:
116
 
117
  **Job Description:**
@@ -136,18 +138,19 @@ def generate_cover_letter(job_description, requirements, resume_text):
136
  **Cover Letter:**
137
  """
138
 
139
- prompt = PromptTemplate.from_template(prompt_text)
140
- chain = prompt | llm
141
- response = chain.invoke(prompt) # Changed from dict to string
142
-
143
- cover_letter = response.content.strip()
144
- return cover_letter
 
145
 
146
  def extract_skills(text):
147
  """
148
  Extracts a list of skills from the resume text using Groq.
149
  """
150
- prompt_text = f"""
151
  Extract a comprehensive list of technical and soft skills from the following resume text. Provide the skills as a comma-separated list.
152
 
153
  Resume Text:
@@ -156,20 +159,21 @@ def extract_skills(text):
156
  Skills:
157
  """
158
 
159
- prompt = PromptTemplate.from_template(prompt_text)
160
- chain = prompt | llm
161
- response = chain.invoke(prompt) # Changed from dict to string
162
-
163
- skills = response.content.strip()
164
- # Clean and split the skills
165
- skills_list = [skill.strip() for skill in re.split(',|\n', skills) if skill.strip()]
166
- return skills_list
 
167
 
168
  def suggest_keywords(resume_text, job_description=None):
169
  """
170
  Suggests additional relevant keywords to enhance resume compatibility with ATS.
171
  """
172
- prompt_text = f"""
173
  Analyze the following resume text and suggest additional relevant keywords that can enhance its compatibility with Applicant Tracking Systems (ATS). If a job description is provided, tailor the keywords to align with the job requirements.
174
 
175
  Resume Text:
@@ -181,13 +185,14 @@ def suggest_keywords(resume_text, job_description=None):
181
  Suggested Keywords:
182
  """
183
 
184
- prompt = PromptTemplate.from_template(prompt_text)
185
- chain = prompt | llm
186
- response = chain.invoke(prompt) # Changed from dict to string
187
-
188
- keywords = response.content.strip()
189
- keywords_list = [keyword.strip() for keyword in re.split(',|\n', keywords) if keyword.strip()]
190
- return keywords_list
 
191
 
192
  def get_job_recommendations(job_title, location="India"):
193
  """
@@ -195,8 +200,8 @@ def get_job_recommendations(job_title, location="India"):
195
  """
196
  url = "https://jsearch.p.rapidapi.com/estimated-salary"
197
  querystring = {
198
- "job_title": job_title,
199
- "location": location,
200
  "radius": "100" # Adjust radius as needed
201
  }
202
 
@@ -220,6 +225,9 @@ def get_job_recommendations(job_title, location="India"):
220
  "avg_salary": avg_salary,
221
  "max_salary": max_salary
222
  }
 
 
 
223
  except Exception as e:
224
  st.error(f"Error fetching salary data: {e}")
225
  return {}
@@ -240,7 +248,7 @@ def create_experience_timeline(resume_text):
240
  Creates an experience timeline from the resume text.
241
  """
242
  # Extract work experience details using Groq
243
- prompt_text = f"""
244
  From the following resume text, extract the job titles, companies, and durations of employment. Provide the information in a table format with columns: Job Title, Company, Duration (in years).
245
 
246
  Resume Text:
@@ -249,32 +257,33 @@ def create_experience_timeline(resume_text):
249
  Table:
250
  """
251
 
252
- prompt = PromptTemplate.from_template(prompt_text)
253
- chain = prompt | llm
254
- response = chain.invoke(prompt) # Changed from dict to string
255
-
256
- table_text = response.content.strip()
257
- # Parse the table_text to create a DataFrame
258
- data = []
259
- for line in table_text.split('\n'):
260
- if line.strip() and not line.lower().startswith("job title"):
261
- parts = line.split('|')
262
- if len(parts) == 3:
263
- job_title = parts[0].strip()
264
- company = parts[1].strip()
265
- duration = parts[2].strip()
266
- # Convert duration to a float representing years
267
- duration_years = parse_duration(duration)
268
- data.append({"Job Title": job_title, "Company": company, "Duration (years)": duration_years})
269
- df = pd.DataFrame(data)
270
- if not df.empty:
271
- # Create a cumulative duration for timeline
272
- df['Start Year'] = df['Duration (years)'].cumsum() - df['Duration (years)']
273
- df['End Year'] = df['Duration (years)'].cumsum()
274
- fig = px.timeline(df, x_start="Start Year", x_end="End Year", y="Job Title", color="Company", title="Experience Timeline")
275
- fig.update_yaxes(categoryorder="total ascending")
276
- return fig
277
- else:
 
278
  return None
279
 
280
  def parse_duration(duration_str):
@@ -350,7 +359,7 @@ def fetch_applications():
350
  "Notes": app[6],
351
  "Job Description": app[7],
352
  "Resume Text": app[8],
353
- "Skills": app[9].split(', ')
354
  })
355
  return applications
356
 
@@ -390,9 +399,13 @@ def generate_learning_path(career_goal, current_skills):
390
  **Learning Path:**
391
  """
392
 
393
- response = llm.invoke(prompt) # Changed from dict to string
394
- learning_path = response.content.strip()
395
- return learning_path
 
 
 
 
396
 
397
  # -------------------------------
398
  # Page Functions
@@ -517,13 +530,16 @@ def resume_analysis_page():
517
  st.subheader("Extracted Information")
518
  # Extracted skills
519
  skills = extract_skills(resume_text)
520
- st.write("**Skills:**", ', '.join(skills))
521
  # Extract keywords
522
  keywords = suggest_keywords(resume_text)
523
- st.write("**Suggested Keywords for ATS Optimization:**", ', '.join(keywords))
524
  # Provide optimization suggestions
525
  st.subheader("Optimization Suggestions")
526
- st.write("- **Keyword Optimization:** Incorporate the suggested keywords to improve ATS compatibility.")
 
 
 
527
  st.write("- **Formatting:** Ensure consistent formatting for headings and bullet points to enhance readability.")
528
  st.write("- **Experience Details:** Provide specific achievements and quantify your accomplishments where possible.")
529
 
@@ -534,6 +550,8 @@ def resume_analysis_page():
534
  st.write("**Skill Distribution:**")
535
  fig_skills = create_skill_distribution_chart(skills)
536
  st.plotly_chart(fig_skills)
 
 
537
 
538
  # Experience Timeline (if applicable)
539
  fig_experience = create_experience_timeline(resume_text)
@@ -653,23 +671,29 @@ def interview_preparation_module():
653
  prompt = f"""
654
  Generate a list of 10 interview questions for a {job_title} position at {company}. Include a mix of technical and behavioral questions.
655
  """
656
- questions = llm.invoke(prompt).content.strip() # Changed from dict to string
657
- st.subheader("Mock Interview Questions:")
658
- st.write(questions)
659
-
660
- # Optionally, provide sample answers or tips
661
- if st.checkbox("Show Sample Answers"):
662
- sample_prompt = f"""
663
- Provide sample answers for the following interview questions for a {job_title} position at {company}.
664
-
665
- Questions:
666
- {questions}
667
-
668
- Sample Answers:
669
- """
670
- sample_answers = llm.invoke(sample_prompt).content.strip() # Changed from dict to string
671
- st.subheader("Sample Answers:")
672
- st.write(sample_answers)
 
 
 
 
 
 
673
 
674
  def personalized_learning_paths_module():
675
  st.header("Personalized Learning Paths")
@@ -688,8 +712,11 @@ def personalized_learning_paths_module():
688
  return
689
  with st.spinner("Generating your personalized learning path..."):
690
  learning_path = generate_learning_path(career_goal, current_skills)
691
- st.subheader("Your Personalized Learning Path:")
692
- st.write(learning_path)
 
 
 
693
 
694
  def networking_opportunities_module():
695
  st.header("Networking Opportunities")
@@ -710,9 +737,12 @@ def networking_opportunities_module():
710
  prompt = f"""
711
  Based on the following skills: {user_skills}, and industry: {industry}, suggest relevant LinkedIn groups, professional organizations, and industry events for networking.
712
  """
713
- suggestions = llm.invoke(prompt).content.strip() # Changed from dict to string
714
- st.subheader("Recommended Networking Groups and Events:")
715
- st.write(suggestions)
 
 
 
716
 
717
  def salary_estimation_module():
718
  st.header("Salary Estimation and Negotiation Tips")
@@ -761,9 +791,12 @@ def salary_estimation_module():
761
  tips_prompt = f"""
762
  Provide a list of 5 effective tips for negotiating a salary for a {job_title} position in {location}.
763
  """
764
- tips = llm.invoke(tips_prompt).content.strip() # Changed from dict to string
765
- st.subheader("Negotiation Tips:")
766
- st.write(tips)
 
 
 
767
  else:
768
  st.error("Failed to retrieve salary data.")
769
 
@@ -797,6 +830,9 @@ def gamification_module():
797
  Stay motivated by earning badges and tracking your progress!
798
  """)
799
 
 
 
 
800
  # Example achievements
801
  applications = fetch_applications()
802
  num_apps = len(applications)
@@ -815,10 +851,10 @@ def gamification_module():
815
  st.info(f"🔜 {achievement}")
816
 
817
  # Progress Bar
818
- progress = min(num_apps / 10, 1.0) # Changed from num_apps / 10 * 100 to num_apps / 10
819
  st.write("**Overall Progress:**")
820
  st.progress(progress)
821
- st.write(f"{progress * 100:.0f}% complete") # Adjusted percentage display
822
 
823
  def resource_library_page():
824
  st.header("Resource Library")
@@ -857,7 +893,7 @@ def resource_library_page():
857
  mime="application/octet-stream"
858
  )
859
  except FileNotFoundError:
860
- st.error(f"File {resource['file']} not found.")
861
  st.write("---")
862
 
863
  def success_stories_page():
@@ -917,8 +953,12 @@ def chatbot_support_page():
917
 
918
  {user_input}
919
  """
920
- response = llm.invoke(prompt).content.strip() # Changed from dict to string
921
- st.session_state['chat_history'].append(f"Assistant: {response}")
 
 
 
 
922
 
923
  # Display chat history
924
  for message in st.session_state['chat_history']:
@@ -934,6 +974,9 @@ def chatbot_support_page():
934
  def main():
935
  st.set_page_config(page_title="Job Application Assistant", layout="wide")
936
 
 
 
 
937
  # Sidebar Navigation
938
  with st.sidebar:
939
  selected = option_menu(
 
1
+ # app.py
2
+
3
  import streamlit as st
4
  from streamlit_option_menu import option_menu
5
  from langchain_groq import ChatGroq
 
6
  import fitz # PyMuPDF
7
  import requests
8
  from bs4 import BeautifulSoup
 
9
  import plotly.express as px
10
  import re
11
  import pandas as pd
 
22
  model_name="llama-3.1-70b-versatile"
23
  )
24
 
 
25
  def extract_text_from_pdf(pdf_file):
26
  """
27
  Extracts text from an uploaded PDF file.
 
47
  response = requests.get(job_link, headers=headers)
48
  response.raise_for_status()
49
  soup = BeautifulSoup(response.text, 'html.parser')
50
+ # You might need to adjust the selectors based on the website's structure
51
  job_description = soup.get_text(separator='\n')
52
  return job_description.strip()
53
  except Exception as e:
 
58
  """
59
  Uses Groq to extract job requirements from the job description.
60
  """
61
+ prompt = f"""
62
  The following is a job description:
63
 
64
  {job_description}
 
68
  Requirements:
69
  """
70
 
71
+ try:
72
+ response = llm.invoke(prompt)
73
+ requirements = response.content.strip()
74
+ return requirements
75
+ except Exception as e:
76
+ st.error(f"Error extracting requirements: {e}")
77
+ return ""
78
 
79
  def generate_email(job_description, requirements, resume_text):
80
  """
81
  Generates a personalized cold email using Groq based on the job description, requirements, and resume.
82
  """
83
+ prompt = f"""
84
  You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Craft a concise and professional cold email to a potential employer based on the following information:
85
 
86
  **Job Description:**
 
101
  **Email:**
102
  """
103
 
104
+ try:
105
+ response = llm.invoke(prompt)
106
+ email_text = response.content.strip()
107
+ return email_text
108
+ except Exception as e:
109
+ st.error(f"Error generating email: {e}")
110
+ return ""
111
 
112
  def generate_cover_letter(job_description, requirements, resume_text):
113
  """
114
  Generates a personalized cover letter using Groq based on the job description, requirements, and resume.
115
  """
116
+ prompt = f"""
117
  You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Compose a personalized and professional cover letter based on the following information:
118
 
119
  **Job Description:**
 
138
  **Cover Letter:**
139
  """
140
 
141
+ try:
142
+ response = llm.invoke(prompt)
143
+ cover_letter = response.content.strip()
144
+ return cover_letter
145
+ except Exception as e:
146
+ st.error(f"Error generating cover letter: {e}")
147
+ return ""
148
 
149
  def extract_skills(text):
150
  """
151
  Extracts a list of skills from the resume text using Groq.
152
  """
153
+ prompt = f"""
154
  Extract a comprehensive list of technical and soft skills from the following resume text. Provide the skills as a comma-separated list.
155
 
156
  Resume Text:
 
159
  Skills:
160
  """
161
 
162
+ try:
163
+ response = llm.invoke(prompt)
164
+ skills = response.content.strip()
165
+ # Clean and split the skills
166
+ skills_list = [skill.strip() for skill in re.split(',|\n', skills) if skill.strip()]
167
+ return skills_list
168
+ except Exception as e:
169
+ st.error(f"Error extracting skills: {e}")
170
+ return []
171
 
172
  def suggest_keywords(resume_text, job_description=None):
173
  """
174
  Suggests additional relevant keywords to enhance resume compatibility with ATS.
175
  """
176
+ prompt = f"""
177
  Analyze the following resume text and suggest additional relevant keywords that can enhance its compatibility with Applicant Tracking Systems (ATS). If a job description is provided, tailor the keywords to align with the job requirements.
178
 
179
  Resume Text:
 
185
  Suggested Keywords:
186
  """
187
 
188
+ try:
189
+ response = llm.invoke(prompt)
190
+ keywords = response.content.strip()
191
+ keywords_list = [keyword.strip() for keyword in re.split(',|\n', keywords) if keyword.strip()]
192
+ return keywords_list
193
+ except Exception as e:
194
+ st.error(f"Error suggesting keywords: {e}")
195
+ return []
196
 
197
  def get_job_recommendations(job_title, location="India"):
198
  """
 
200
  """
201
  url = "https://jsearch.p.rapidapi.com/estimated-salary"
202
  querystring = {
203
+ "job_title": job_title.strip(),
204
+ "location": location.strip(),
205
  "radius": "100" # Adjust radius as needed
206
  }
207
 
 
225
  "avg_salary": avg_salary,
226
  "max_salary": max_salary
227
  }
228
+ except requests.exceptions.HTTPError as http_err:
229
+ st.error(f"HTTP error occurred: {http_err}")
230
+ return {}
231
  except Exception as e:
232
  st.error(f"Error fetching salary data: {e}")
233
  return {}
 
248
  Creates an experience timeline from the resume text.
249
  """
250
  # Extract work experience details using Groq
251
+ prompt = f"""
252
  From the following resume text, extract the job titles, companies, and durations of employment. Provide the information in a table format with columns: Job Title, Company, Duration (in years).
253
 
254
  Resume Text:
 
257
  Table:
258
  """
259
 
260
+ try:
261
+ response = llm.invoke(prompt)
262
+ table_text = response.content.strip()
263
+ # Parse the table_text to create a DataFrame
264
+ data = []
265
+ for line in table_text.split('\n'):
266
+ if line.strip() and not line.lower().startswith("job title"):
267
+ parts = line.split('|')
268
+ if len(parts) == 3:
269
+ job_title = parts[0].strip()
270
+ company = parts[1].strip()
271
+ duration = parts[2].strip()
272
+ # Convert duration to a float representing years
273
+ duration_years = parse_duration(duration)
274
+ data.append({"Job Title": job_title, "Company": company, "Duration (years)": duration_years})
275
+ df = pd.DataFrame(data)
276
+ if not df.empty:
277
+ # Create a cumulative duration for timeline
278
+ df['Start Year'] = df['Duration (years)'].cumsum() - df['Duration (years)']
279
+ df['End Year'] = df['Duration (years)'].cumsum()
280
+ fig = px.timeline(df, x_start="Start Year", x_end="End Year", y="Job Title", color="Company", title="Experience Timeline")
281
+ fig.update_yaxes(categoryorder="total ascending")
282
+ return fig
283
+ else:
284
+ return None
285
+ except Exception as e:
286
+ st.error(f"Error creating experience timeline: {e}")
287
  return None
288
 
289
  def parse_duration(duration_str):
 
359
  "Notes": app[6],
360
  "Job Description": app[7],
361
  "Resume Text": app[8],
362
+ "Skills": app[9].split(', ') if app[9] else []
363
  })
364
  return applications
365
 
 
399
  **Learning Path:**
400
  """
401
 
402
+ try:
403
+ response = llm.invoke(prompt)
404
+ learning_path = response.content.strip()
405
+ return learning_path
406
+ except Exception as e:
407
+ st.error(f"Error generating learning path: {e}")
408
+ return ""
409
 
410
  # -------------------------------
411
  # Page Functions
 
530
  st.subheader("Extracted Information")
531
  # Extracted skills
532
  skills = extract_skills(resume_text)
533
+ st.write("**Skills:**", ', '.join(skills) if skills else "No skills extracted.")
534
  # Extract keywords
535
  keywords = suggest_keywords(resume_text)
536
+ st.write("**Suggested Keywords for ATS Optimization:**", ', '.join(keywords) if keywords else "No keywords suggested.")
537
  # Provide optimization suggestions
538
  st.subheader("Optimization Suggestions")
539
+ if keywords:
540
+ st.write("- **Keyword Optimization:** Incorporate the suggested keywords to improve ATS compatibility.")
541
+ else:
542
+ st.write("- **Keyword Optimization:** No keywords suggested.")
543
  st.write("- **Formatting:** Ensure consistent formatting for headings and bullet points to enhance readability.")
544
  st.write("- **Experience Details:** Provide specific achievements and quantify your accomplishments where possible.")
545
 
 
550
  st.write("**Skill Distribution:**")
551
  fig_skills = create_skill_distribution_chart(skills)
552
  st.plotly_chart(fig_skills)
553
+ else:
554
+ st.write("**Skill Distribution:** No skills to display.")
555
 
556
  # Experience Timeline (if applicable)
557
  fig_experience = create_experience_timeline(resume_text)
 
671
  prompt = f"""
672
  Generate a list of 10 interview questions for a {job_title} position at {company}. Include a mix of technical and behavioral questions.
673
  """
674
+ try:
675
+ questions = llm.invoke(prompt).content.strip()
676
+ st.subheader("Mock Interview Questions:")
677
+ st.write(questions)
678
+
679
+ # Optionally, provide sample answers or tips
680
+ if st.checkbox("Show Sample Answers"):
681
+ sample_prompt = f"""
682
+ Provide sample answers for the following interview questions for a {job_title} position at {company}.
683
+
684
+ Questions:
685
+ {questions}
686
+
687
+ Sample Answers:
688
+ """
689
+ try:
690
+ sample_answers = llm.invoke(sample_prompt).content.strip()
691
+ st.subheader("Sample Answers:")
692
+ st.write(sample_answers)
693
+ except Exception as e:
694
+ st.error(f"Error generating sample answers: {e}")
695
+ except Exception as e:
696
+ st.error(f"Error generating interview questions: {e}")
697
 
698
  def personalized_learning_paths_module():
699
  st.header("Personalized Learning Paths")
 
712
  return
713
  with st.spinner("Generating your personalized learning path..."):
714
  learning_path = generate_learning_path(career_goal, current_skills)
715
+ if learning_path:
716
+ st.subheader("Your Personalized Learning Path:")
717
+ st.write(learning_path)
718
+ else:
719
+ st.error("Failed to generate learning path.")
720
 
721
  def networking_opportunities_module():
722
  st.header("Networking Opportunities")
 
737
  prompt = f"""
738
  Based on the following skills: {user_skills}, and industry: {industry}, suggest relevant LinkedIn groups, professional organizations, and industry events for networking.
739
  """
740
+ try:
741
+ suggestions = llm.invoke(prompt).content.strip()
742
+ st.subheader("Recommended Networking Groups and Events:")
743
+ st.write(suggestions)
744
+ except Exception as e:
745
+ st.error(f"Error fetching networking opportunities: {e}")
746
 
747
  def salary_estimation_module():
748
  st.header("Salary Estimation and Negotiation Tips")
 
791
  tips_prompt = f"""
792
  Provide a list of 5 effective tips for negotiating a salary for a {job_title} position in {location}.
793
  """
794
+ try:
795
+ tips = llm.invoke(tips_prompt).content.strip()
796
+ st.subheader("Negotiation Tips:")
797
+ st.write(tips)
798
+ except Exception as e:
799
+ st.error(f"Error generating negotiation tips: {e}")
800
  else:
801
  st.error("Failed to retrieve salary data.")
802
 
 
830
  Stay motivated by earning badges and tracking your progress!
831
  """)
832
 
833
+ # Initialize database
834
+ init_db()
835
+
836
  # Example achievements
837
  applications = fetch_applications()
838
  num_apps = len(applications)
 
851
  st.info(f"🔜 {achievement}")
852
 
853
  # Progress Bar
854
+ progress = min(num_apps / 10, 1.0) # Ensure progress is between 0.0 and 1.0
855
  st.write("**Overall Progress:**")
856
  st.progress(progress)
857
+ st.write(f"{progress * 100:.0f}% complete")
858
 
859
  def resource_library_page():
860
  st.header("Resource Library")
 
893
  mime="application/octet-stream"
894
  )
895
  except FileNotFoundError:
896
+ st.error(f"File {resource['file']} not found. Please ensure the file is in the correct directory.")
897
  st.write("---")
898
 
899
  def success_stories_page():
 
953
 
954
  {user_input}
955
  """
956
+ try:
957
+ response = llm.invoke(prompt).content.strip()
958
+ st.session_state['chat_history'].append(f"Assistant: {response}")
959
+ except Exception as e:
960
+ st.session_state['chat_history'].append(f"Assistant: Sorry, I encountered an error while processing your request.")
961
+ st.error(f"Error in chatbot: {e}")
962
 
963
  # Display chat history
964
  for message in st.session_state['chat_history']:
 
974
  def main():
975
  st.set_page_config(page_title="Job Application Assistant", layout="wide")
976
 
977
+ # Initialize database early to ensure tables exist
978
+ init_db()
979
+
980
  # Sidebar Navigation
981
  with st.sidebar:
982
  selected = option_menu(