bsiddhharth commited on
Commit
c626607
·
1 Parent(s): 50c3f7b

Updated the groq_api to deploy in streamlit , commanded the logger

Browse files
Files changed (4) hide show
  1. app.py +8 -8
  2. cv_analyzer_search.py +17 -17
  3. cv_short.py +16 -15
  4. resume_advance_analysis.py +10 -10
app.py CHANGED
@@ -3,7 +3,7 @@ import streamlit as st
3
  import cv_question
4
  import cv_short
5
  import cv_analyzer_search
6
- from logger import setup_logger
7
 
8
  # def initialize_session_state():
9
  # """Initialize all session state variables with default values."""
@@ -31,27 +31,27 @@ def clear_session_state():
31
 
32
  def main():
33
  # Setup logger for app
34
- app_logger = setup_logger('app_logger', 'app.log')
35
 
36
  # initialize_session_state()
37
 
38
  # Sidebar
39
  st.sidebar.title("Navigation")
40
- app_logger.info("Sidebar navigation displayed")
41
 
42
  # Add reset button in sidebar
43
  if st.sidebar.button("Reset All Data"):
44
  clear_session_state()
45
  st.sidebar.success("All data has been reset!")
46
- app_logger.info("Session state reset")
47
 
48
  # Navigation
49
  page = st.sidebar.radio("Go to", ["CV Shortlisting", "Interview Questions","CV Analyser + JobSearch"])
50
- app_logger.info(f"Page selected: {page}")
51
 
52
  try:
53
  if page == "CV Shortlisting":
54
- app_logger.info("Navigating to CV Shortlisting")
55
  cv_short.create_cv_shortlisting_page()
56
 
57
  elif page == "Interview Questions":
@@ -60,14 +60,14 @@ def main():
60
  # st.warning("Please complete the CV shortlisting process first.")
61
  # app_logger.warning("Attempted to access Interview Questions without completing CV shortlisting")
62
  # else:
63
- app_logger.info("Navigating to Interview Questions")
64
  cv_question.create_interview_questions_page()
65
 
66
  elif page == "CV Analyser + JobSearch":
67
  cv_analyzer_search.Job_assistant()
68
 
69
  except Exception as e:
70
- app_logger.error(f"Error occurred: {e}")
71
  st.error(f"An error occurred: {e}")
72
 
73
  if __name__ == "__main__":
 
3
  import cv_question
4
  import cv_short
5
  import cv_analyzer_search
6
+ # from logger import setup_logger
7
 
8
  # def initialize_session_state():
9
  # """Initialize all session state variables with default values."""
 
31
 
32
  def main():
33
  # Setup logger for app
34
+ # app_logger = setup_logger('app_logger', 'app.log')
35
 
36
  # initialize_session_state()
37
 
38
  # Sidebar
39
  st.sidebar.title("Navigation")
40
+ # app_logger.info("Sidebar navigation displayed")
41
 
42
  # Add reset button in sidebar
43
  if st.sidebar.button("Reset All Data"):
44
  clear_session_state()
45
  st.sidebar.success("All data has been reset!")
46
+ # app_logger.info("Session state reset")
47
 
48
  # Navigation
49
  page = st.sidebar.radio("Go to", ["CV Shortlisting", "Interview Questions","CV Analyser + JobSearch"])
50
+ # app_logger.info(f"Page selected: {page}")
51
 
52
  try:
53
  if page == "CV Shortlisting":
54
+ # app_logger.info("Navigating to CV Shortlisting")
55
  cv_short.create_cv_shortlisting_page()
56
 
57
  elif page == "Interview Questions":
 
60
  # st.warning("Please complete the CV shortlisting process first.")
61
  # app_logger.warning("Attempted to access Interview Questions without completing CV shortlisting")
62
  # else:
63
+ # app_logger.info("Navigating to Interview Questions")
64
  cv_question.create_interview_questions_page()
65
 
66
  elif page == "CV Analyser + JobSearch":
67
  cv_analyzer_search.Job_assistant()
68
 
69
  except Exception as e:
70
+ # app_logger.error(f"Error occurred: {e}")
71
  st.error(f"An error occurred: {e}")
72
 
73
  if __name__ == "__main__":
cv_analyzer_search.py CHANGED
@@ -29,8 +29,8 @@ def make_clickable_link(link):
29
  groq_api_key = st.secrets["GROQ_API_KEY"]
30
 
31
  # Configure logging
32
- logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
33
- logger = logging.getLogger(__name__)
34
 
35
  class JobSuggestionEngine:
36
  def __init__(self):
@@ -48,7 +48,7 @@ class JobSuggestionEngine:
48
  Extracting JSON from LLM
49
  """
50
  try:
51
- logger.debug("Extracting JSON from LLM response")
52
  # Clean and extract JSON
53
  json_match = re.search(r'\{.*\}', text, re.DOTALL)
54
  if json_match:
@@ -57,12 +57,12 @@ class JobSuggestionEngine:
57
 
58
  except Exception as e:
59
  st.error(f"JSON Extraction Error: {e}")
60
- logger.error(f"JSON Extraction Error: {e}")
61
  return {}
62
 
63
  def generate_job_suggestions(self, resume_data: cv) -> List[Dict[str, str]]:
64
 
65
- logger.info("Generating job suggestions based on resume")
66
 
67
  prompt = f"""Based on the following resume details, provide job suggestions:
68
 
@@ -91,7 +91,7 @@ class JobSuggestionEngine:
91
  """
92
  try:
93
 
94
- logger.debug(f"Calling Groq API with prompt: {prompt[:100]}...") # start of api call
95
 
96
  # API call to the Groq client for chat completions
97
  chat_completion = self.client.chat.completions.create(
@@ -111,14 +111,14 @@ class JobSuggestionEngine:
111
  response_text = chat_completion.choices[0].message.content
112
  suggestions_data = self._extract_json(response_text)
113
 
114
- logger.info(f"Job suggestions generated: {len(suggestions_data.get('job_suggestions', []))} found")
115
 
116
  # Return job suggestions, if not found -> empty list
117
  return suggestions_data.get('job_suggestions', [])
118
 
119
  except Exception as e:
120
  st.error(f"Job Suggestion Error: {e}")
121
- logger.error(f"Job Suggestion Error: {e}")
122
  return []
123
 
124
  def Job_assistant():
@@ -181,14 +181,14 @@ def Job_assistant():
181
  try:
182
  # Extract resume text
183
  resume_text = process_file(uploaded_resume)
184
- logger.info("Resume extracted successfully")
185
 
186
  # Extract structured CV data
187
  candidates = extract_cv_data(resume_text)
188
 
189
  if not candidates:
190
  st.error("Could not extract resume data")
191
- logger.error("No candidates extracted from resume")
192
  st.stop()
193
 
194
  st.session_state.resume_data = candidates[0]
@@ -201,17 +201,17 @@ def Job_assistant():
201
 
202
  except Exception as e:
203
  st.error(f"Resume Processing Error: {e}")
204
- logger.error(f"Resume Processing Error: {e}")
205
  st.stop()
206
 
207
  # Initialize Job Suggestion Engine
208
  if st.session_state.resume_data:
209
  suggestion_engine = JobSuggestionEngine()
210
- logger.info("Job_Suggestion_Engine initialized")
211
 
212
  # Generate Job Suggestions
213
  job_suggestions = suggestion_engine.generate_job_suggestions(resume_data)
214
- logger.info(f"Generated {len(job_suggestions)} job suggestions")
215
 
216
  st.session_state.job_suggestions = job_suggestions
217
 
@@ -227,14 +227,14 @@ def Job_assistant():
227
  try:
228
  # Extract resume text
229
  resume_text = process_file(uploaded_resume)
230
- logger.info("Resume text extracted again for improvement suggestions")
231
 
232
  # Initialize Resume Improvement Engine
233
  improvement_engine = ResumeImprovementEngine()
234
 
235
  # Generate Improvement Suggestions
236
  improvement_suggestions = improvement_engine.generate_resume_improvement_suggestions(resume_text)
237
- logger.info("Resume improvement suggestions generated")
238
  st.session_state.improvement_suggestions = improvement_suggestions
239
 
240
  # Display Suggestions
@@ -304,7 +304,7 @@ def Job_assistant():
304
 
305
  except Exception as e:
306
  st.error(f"Resume Improvement Analysis Error: {e}")
307
- logger.error(f"Resume Improvement Analysis Error: {e}")
308
 
309
 
310
  with tab2:
@@ -386,7 +386,7 @@ def Job_assistant():
386
 
387
  except Exception as e:
388
  st.error(f"Job Search Error: {e}")
389
- logger.error(f"Job Search Error: {e}")
390
  # col1, col2, col3, col4 = st.columns(4)
391
 
392
  # with col1:
 
29
  groq_api_key = st.secrets["GROQ_API_KEY"]
30
 
31
  # Configure logging
32
+ # logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
33
+ # logger = logging.getLogger(__name__)
34
 
35
  class JobSuggestionEngine:
36
  def __init__(self):
 
48
  Extracting JSON from LLM
49
  """
50
  try:
51
+ # logger.debug("Extracting JSON from LLM response")
52
  # Clean and extract JSON
53
  json_match = re.search(r'\{.*\}', text, re.DOTALL)
54
  if json_match:
 
57
 
58
  except Exception as e:
59
  st.error(f"JSON Extraction Error: {e}")
60
+ # logger.error(f"JSON Extraction Error: {e}")
61
  return {}
62
 
63
  def generate_job_suggestions(self, resume_data: cv) -> List[Dict[str, str]]:
64
 
65
+ # logger.info("Generating job suggestions based on resume")
66
 
67
  prompt = f"""Based on the following resume details, provide job suggestions:
68
 
 
91
  """
92
  try:
93
 
94
+ # logger.debug(f"Calling Groq API with prompt: {prompt[:100]}...") # start of api call
95
 
96
  # API call to the Groq client for chat completions
97
  chat_completion = self.client.chat.completions.create(
 
111
  response_text = chat_completion.choices[0].message.content
112
  suggestions_data = self._extract_json(response_text)
113
 
114
+ # logger.info(f"Job suggestions generated: {len(suggestions_data.get('job_suggestions', []))} found")
115
 
116
  # Return job suggestions, if not found -> empty list
117
  return suggestions_data.get('job_suggestions', [])
118
 
119
  except Exception as e:
120
  st.error(f"Job Suggestion Error: {e}")
121
+ # logger.error(f"Job Suggestion Error: {e}")
122
  return []
123
 
124
  def Job_assistant():
 
181
  try:
182
  # Extract resume text
183
  resume_text = process_file(uploaded_resume)
184
+ # logger.info("Resume extracted successfully")
185
 
186
  # Extract structured CV data
187
  candidates = extract_cv_data(resume_text)
188
 
189
  if not candidates:
190
  st.error("Could not extract resume data")
191
+ # logger.error("No candidates extracted from resume")
192
  st.stop()
193
 
194
  st.session_state.resume_data = candidates[0]
 
201
 
202
  except Exception as e:
203
  st.error(f"Resume Processing Error: {e}")
204
+ # logger.error(f"Resume Processing Error: {e}")
205
  st.stop()
206
 
207
  # Initialize Job Suggestion Engine
208
  if st.session_state.resume_data:
209
  suggestion_engine = JobSuggestionEngine()
210
+ # logger.info("Job_Suggestion_Engine initialized")
211
 
212
  # Generate Job Suggestions
213
  job_suggestions = suggestion_engine.generate_job_suggestions(resume_data)
214
+ # logger.info(f"Generated {len(job_suggestions)} job suggestions")
215
 
216
  st.session_state.job_suggestions = job_suggestions
217
 
 
227
  try:
228
  # Extract resume text
229
  resume_text = process_file(uploaded_resume)
230
+ # logger.info("Resume text extracted again for improvement suggestions")
231
 
232
  # Initialize Resume Improvement Engine
233
  improvement_engine = ResumeImprovementEngine()
234
 
235
  # Generate Improvement Suggestions
236
  improvement_suggestions = improvement_engine.generate_resume_improvement_suggestions(resume_text)
237
+ # logger.info("Resume improvement suggestions generated")
238
  st.session_state.improvement_suggestions = improvement_suggestions
239
 
240
  # Display Suggestions
 
304
 
305
  except Exception as e:
306
  st.error(f"Resume Improvement Analysis Error: {e}")
307
+ # logger.error(f"Resume Improvement Analysis Error: {e}")
308
 
309
 
310
  with tab2:
 
386
 
387
  except Exception as e:
388
  st.error(f"Job Search Error: {e}")
389
+ # logger.error(f"Job Search Error: {e}")
390
  # col1, col2, col3, col4 = st.columns(4)
391
 
392
  # with col1:
cv_short.py CHANGED
@@ -5,26 +5,26 @@ import streamlit as st
5
  import pandas as pd
6
 
7
  # Configure logging
8
- logging.basicConfig(level=logging.DEBUG , format='%(asctime)s - %(levelname)s - %(message)s')
9
- logger = logging.getLogger(__name__)
10
 
11
 
12
  class CVAnalyzer:
13
 
14
  def __init__(self):
15
  # Initialize Groq LLM
16
- logger.info("Initializing CVAnalyzer")
17
 
18
  self.llm = extr.initialize_llm() # Updated to use the new function
19
 
20
- logger.info(" LLM initialized")
21
  # Initialize embeddings (if needed)
22
  # self.embeddings = HuggingFaceEmbeddings(
23
  # model_name="sentence-transformers/all-mpnet-base-v2"
24
  # )
25
 
26
  def load_document(self, file_path: str) -> str:
27
- logger.info(f"Loading document from file: {file_path}")
28
 
29
  """Load document based on file type."""
30
 
@@ -34,22 +34,22 @@ class CVAnalyzer:
34
  loader = TextLoader(file_path)
35
  documents = loader.load()
36
 
37
- logger.info(f"Document loaded from {file_path}")
38
 
39
  return " ".join([doc.page_content for doc in documents])
40
 
41
  def extract_cv_info(self, cv_text: str) -> list[extr.cv]: # referring to cv class in extraction.py
42
- logger.info("Extracting CV information")
43
 
44
  """Extract structured information from CV text using new extraction method."""
45
 
46
  extracted_data = extr.extract_cv_data(cv_text)
47
- logger.info(f"Extracted {len(extracted_data)} candidate(s) from CV")
48
  return extracted_data
49
  # return extr.extract_cv_data(cv_text)
50
 
51
  def calculate_match_score(self, cv_info: dict, jd_requirements: dict) -> dict:
52
- logger.info(f"Calculating match score for CV: {cv_info.get('name', 'Unknown')}")
53
 
54
  """Calculate match score between CV and job requirements."""
55
 
@@ -80,7 +80,7 @@ class CVAnalyzer:
80
  if component != "overall_score"
81
  )
82
 
83
- logger.debug(f"Match score for {cv_info.get('name', 'Unknown')}: {score_components['overall_score']:.2%}")
84
 
85
  return score_components
86
 
@@ -209,7 +209,7 @@ class CVAnalyzer:
209
 
210
 
211
  def create_cv_shortlisting_page():
212
- logger.info("Starting CV shortlisting system")
213
 
214
  # Initialize session state if not already initialized
215
  if 'jd_text' not in st.session_state:
@@ -261,7 +261,7 @@ def create_cv_shortlisting_page():
261
  st.session_state.uploaded_files = uploaded_files
262
 
263
  if st.button("Analyze CVs") and uploaded_files and jd_text:
264
- logger.info("Analyzing uploaded CVs")
265
  with st.spinner('Analyzing CVs...'):
266
  analyzer = CVAnalyzer()
267
 
@@ -301,10 +301,11 @@ def create_cv_shortlisting_page():
301
  st.session_state.results.append(result)
302
 
303
  except Exception as e:
304
- logger.error(f"Error processing CV: {str(e)}")
 
305
 
306
  # Display results
307
- logger.info(f"Displaying analyzed results for {len(results)} candidate(s)")
308
 
309
  if st.session_state.results:
310
  df = pd.DataFrame(st.session_state.results)
@@ -312,6 +313,6 @@ def create_cv_shortlisting_page():
312
  st.dataframe(df)
313
  st.session_state.analysis_complete = True
314
  else:
315
- logger.warning("No valid candidates found in uploaded CVs")
316
  st.error("No valid results found from CV analysis")
317
  st.session_state.analysis_complete = False
 
5
  import pandas as pd
6
 
7
  # Configure logging
8
+ # logging.basicConfig(level=logging.DEBUG , format='%(asctime)s - %(levelname)s - %(message)s')
9
+ # logger = logging.getLogger(__name__)
10
 
11
 
12
  class CVAnalyzer:
13
 
14
  def __init__(self):
15
  # Initialize Groq LLM
16
+ # logger.info("Initializing CVAnalyzer")
17
 
18
  self.llm = extr.initialize_llm() # Updated to use the new function
19
 
20
+ # logger.info(" LLM initialized")
21
  # Initialize embeddings (if needed)
22
  # self.embeddings = HuggingFaceEmbeddings(
23
  # model_name="sentence-transformers/all-mpnet-base-v2"
24
  # )
25
 
26
  def load_document(self, file_path: str) -> str:
27
+ # logger.info(f"Loading document from file: {file_path}")
28
 
29
  """Load document based on file type."""
30
 
 
34
  loader = TextLoader(file_path)
35
  documents = loader.load()
36
 
37
+ # logger.info(f"Document loaded from {file_path}")
38
 
39
  return " ".join([doc.page_content for doc in documents])
40
 
41
  def extract_cv_info(self, cv_text: str) -> list[extr.cv]: # referring to cv class in extraction.py
42
+ # logger.info("Extracting CV information")
43
 
44
  """Extract structured information from CV text using new extraction method."""
45
 
46
  extracted_data = extr.extract_cv_data(cv_text)
47
+ # logger.info(f"Extracted {len(extracted_data)} candidate(s) from CV")
48
  return extracted_data
49
  # return extr.extract_cv_data(cv_text)
50
 
51
  def calculate_match_score(self, cv_info: dict, jd_requirements: dict) -> dict:
52
+ # logger.info(f"Calculating match score for CV: {cv_info.get('name', 'Unknown')}")
53
 
54
  """Calculate match score between CV and job requirements."""
55
 
 
80
  if component != "overall_score"
81
  )
82
 
83
+ # logger.debug(f"Match score for {cv_info.get('name', 'Unknown')}: {score_components['overall_score']:.2%}")
84
 
85
  return score_components
86
 
 
209
 
210
 
211
  def create_cv_shortlisting_page():
212
+ # logger.info("Starting CV shortlisting system")
213
 
214
  # Initialize session state if not already initialized
215
  if 'jd_text' not in st.session_state:
 
261
  st.session_state.uploaded_files = uploaded_files
262
 
263
  if st.button("Analyze CVs") and uploaded_files and jd_text:
264
+ # logger.info("Analyzing uploaded CVs")
265
  with st.spinner('Analyzing CVs...'):
266
  analyzer = CVAnalyzer()
267
 
 
301
  st.session_state.results.append(result)
302
 
303
  except Exception as e:
304
+ st.error(f"Error processing CV: {str(e)}")
305
+ # logger.error(f"Error processing CV: {str(e)}")
306
 
307
  # Display results
308
+ # logger.info(f"Displaying analyzed results for {len(results)} candidate(s)")
309
 
310
  if st.session_state.results:
311
  df = pd.DataFrame(st.session_state.results)
 
313
  st.dataframe(df)
314
  st.session_state.analysis_complete = True
315
  else:
316
+ # logger.warning("No valid candidates found in uploaded CVs")
317
  st.error("No valid results found from CV analysis")
318
  st.session_state.analysis_complete = False
resume_advance_analysis.py CHANGED
@@ -6,8 +6,8 @@ import re
6
  import os
7
  import logging
8
 
9
- logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
10
- logger = logging.getLogger(__name__)
11
 
12
 
13
  os.environ['GROQ_API_KEY'] = os.getenv("GROQ_API_KEY")
@@ -22,7 +22,7 @@ class ResumeImprovementEngine:
22
  # max_tokens=4096
23
  # )
24
  self.client = Groq(api_key=groq_api_key)
25
- logger.info("ResumeImprovementEngine initialized with Groq API key.")
26
 
27
  def generate_resume_improvement_suggestions(self, resume_text: str) -> dict[str, Any]:
28
  """
@@ -79,7 +79,7 @@ class ResumeImprovementEngine:
79
  """
80
 
81
  try:
82
- logger.info("Sending request to Groq for resume improvement.")
83
  # Make API call to generate improvement suggestions
84
  chat_completion = self.client.chat.completions.create(
85
  messages=[
@@ -99,19 +99,19 @@ class ResumeImprovementEngine:
99
  stream=False
100
  )
101
 
102
- logger.info("Groq API response received.")
103
 
104
  # Extract and parse the JSON response
105
  response_text = chat_completion.choices[0].message.content
106
  suggestions = self._extract_json(response_text)
107
 
108
- logger.debug(f"Improvement suggestions received: {suggestions}")
109
 
110
  return suggestions
111
 
112
  except Exception as e:
113
  st.error(f"Resume Improvement Error: {e}")
114
- logger.error(f"Resume Improvement Error: {e}")
115
  return {}
116
 
117
 
@@ -126,19 +126,19 @@ class ResumeImprovementEngine:
126
  Dict of extracted JSON or empty dict
127
  """
128
  try:
129
- logger.debug("Extracting JSON from response text.")
130
 
131
  json_match = re.search(r'\{.*\}', text, re.DOTALL | re.MULTILINE)
132
  if json_match:
133
  return json.loads(json_match.group(0))
134
 
135
- logger.warning("No valid JSON found in response text.")
136
 
137
  return {}
138
 
139
  except Exception as e:
140
  st.error(f"JSON Extraction Error: {e}")
141
- logger.error(f"JSON Extraction Error: {e}")
142
  return {}
143
 
144
 
 
6
  import os
7
  import logging
8
 
9
+ # logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
10
+ # logger = logging.getLogger(__name__)
11
 
12
 
13
  os.environ['GROQ_API_KEY'] = os.getenv("GROQ_API_KEY")
 
22
  # max_tokens=4096
23
  # )
24
  self.client = Groq(api_key=groq_api_key)
25
+ # logger.info("ResumeImprovementEngine initialized with Groq API key.")
26
 
27
  def generate_resume_improvement_suggestions(self, resume_text: str) -> dict[str, Any]:
28
  """
 
79
  """
80
 
81
  try:
82
+ # logger.info("Sending request to Groq for resume improvement.")
83
  # Make API call to generate improvement suggestions
84
  chat_completion = self.client.chat.completions.create(
85
  messages=[
 
99
  stream=False
100
  )
101
 
102
+ # logger.info("Groq API response received.")
103
 
104
  # Extract and parse the JSON response
105
  response_text = chat_completion.choices[0].message.content
106
  suggestions = self._extract_json(response_text)
107
 
108
+ # logger.debug(f"Improvement suggestions received: {suggestions}")
109
 
110
  return suggestions
111
 
112
  except Exception as e:
113
  st.error(f"Resume Improvement Error: {e}")
114
+ # logger.error(f"Resume Improvement Error: {e}")
115
  return {}
116
 
117
 
 
126
  Dict of extracted JSON or empty dict
127
  """
128
  try:
129
+ # logger.debug("Extracting JSON from response text.")
130
 
131
  json_match = re.search(r'\{.*\}', text, re.DOTALL | re.MULTILINE)
132
  if json_match:
133
  return json.loads(json_match.group(0))
134
 
135
+ # logger.warning("No valid JSON found in response text.")
136
 
137
  return {}
138
 
139
  except Exception as e:
140
  st.error(f"JSON Extraction Error: {e}")
141
+ # logger.error(f"JSON Extraction Error: {e}")
142
  return {}
143
 
144