bsiddhharth commited on
Commit
68cdd74
·
1 Parent(s): d914592

made some changes in short.py and cv_analyzer_search.py

Browse files
Files changed (2) hide show
  1. cv_analyzer_search.py +2 -102
  2. cv_short.py +0 -109
cv_analyzer_search.py CHANGED
@@ -1,17 +1,9 @@
1
  import streamlit as st
2
  import pandas as pd
3
- from langchain_groq import ChatGroq
4
  from groq import Groq
5
  from jobspy import scrape_jobs
6
  from resume_advance_analysis import *
7
  from extraction import *
8
- # (
9
- # cv,
10
- # extract_cv_data,
11
- # process_file, # File processing function
12
- # initialize_llm, # LLM initialization function
13
- # display_candidates_info # Candidate info display function
14
- # )
15
  from typing import List, Dict, Any
16
  import json
17
  import re
@@ -26,6 +18,7 @@ def make_clickable_link(link):
26
 
27
  # os.environ['GROQ_API_KEY'] = os.getenv("GROQ_API_KEY")
28
  # groq_api_key = os.getenv("GROQ_API_KEY")
 
29
  groq_api_key = st.secrets["GROQ_API_KEY"]
30
 
31
  # Configure logging
@@ -387,97 +380,4 @@ def Job_assistant():
387
  except Exception as e:
388
  st.error(f"Job Search Error: {e}")
389
  # logger.error(f"Job Search Error: {e}")
390
- # col1, col2, col3, col4 = st.columns(4)
391
-
392
- # with col1:
393
- # site_name = st.multiselect(
394
- # "Select Job Sites",
395
- # ["indeed", "linkedin", "zip_recruiter", "glassdoor", "google"],
396
- # default=st.session_state.site_name
397
- # # default=["indeed", "glassdoor"]
398
- # )
399
- # st.session_state.site_name = site_name
400
-
401
- # with col2:
402
- # search_term = st.text_input("Search Term", st.session_state.search_term)
403
- # st.session_state.search_term = search_term
404
-
405
- # with col3:
406
- # location = st.text_input("Location", st.session_state.location)
407
- # st.session_state.location = location
408
-
409
-
410
- # with col4:
411
- # results_wanted = st.number_input("Number of Results", min_value=1, max_value=100, value=st.session_state.results_wanted)
412
- # st.session_state.results_wanted = results_wanted
413
-
414
- # # Additional parameters
415
- # col5, col6 = st.columns(2)
416
-
417
- # with col5:
418
- # hours_old = st.number_input("Jobs Posted Within (hours)", min_value=1, max_value=168, value=st.session_state.hours_old)
419
- # st.session_state.hours_old = hours_old
420
-
421
- # with col6:
422
- # country_indeed = st.text_input("Country (for Indeed)", st.session_state.country_indeed)
423
- # st.session_state.country_indeed = country_indeed
424
-
425
- # search_button_clicked = st.button("Search Jobs")
426
-
427
- # # Search Button
428
- # # if st.button("Search Jobs"):
429
- # if search_button_clicked:
430
- # with st.spinner("Searching Jobs..."):
431
- # # Perform job search
432
- # try:
433
- # logger.info(f"Performing job search with {search_term} in {location}")
434
- # # jobs = scrape_jobs(
435
- # # site_name=site_name,
436
- # # search_term=search_term,
437
- # # google_search_term=f"{search_term} jobs near {location}",
438
- # # location=location,
439
- # # results_wanted=results_wanted,
440
- # # hours_old=hours_old,
441
- # # country_indeed=country_indeed,
442
- # # )
443
- # jobs = scrape_jobs(
444
- # site_name=st.session_state.site_name,
445
- # search_term=st.session_state.search_term,
446
- # google_search_term=f"{st.session_state.search_term} jobs near {st.session_state.location}",
447
- # location=st.session_state.location,
448
- # results_wanted=st.session_state.results_wanted,
449
- # hours_old=st.session_state.hours_old,
450
- # country_indeed=st.session_state.country_indeed,
451
- # )
452
- # st.session_state.job_search_results = jobs
453
-
454
- # if len(jobs) > 0:
455
- # st.success(f"Found {len(jobs)} jobs")
456
-
457
- # jobs_filtered = jobs[['site', 'job_url', 'title', 'company', 'location', 'date_posted']]
458
- # # Display job data in a table
459
- # # st.dataframe(jobs)
460
- # jobs_filtered['job_url'] = jobs_filtered['job_url'].apply(make_clickable_link)
461
- # st.write(jobs_filtered.to_html(escape=False), unsafe_allow_html=True)
462
-
463
- # # st.dataframe(jobs_filtered)
464
-
465
- # # Option to download jobs
466
- # csv_file = jobs.to_csv(index=False)
467
- # st.download_button(
468
- # label="Download Jobs as CSV",
469
- # data=csv_file,
470
- # file_name='job_search_results.csv',
471
- # mime='text/csv'
472
- # )
473
- # else:
474
- # st.warning("No jobs found")
475
-
476
- # except Exception as e:
477
- # st.error(f"Job Search Error: {e}")
478
- # logger.error(f"Job Search Error: {e}")
479
-
480
-
481
-
482
- # if __name__ == "__main__":
483
- # main()
 
1
  import streamlit as st
2
  import pandas as pd
 
3
  from groq import Groq
4
  from jobspy import scrape_jobs
5
  from resume_advance_analysis import *
6
  from extraction import *
 
 
 
 
 
 
 
7
  from typing import List, Dict, Any
8
  import json
9
  import re
 
18
 
19
  # os.environ['GROQ_API_KEY'] = os.getenv("GROQ_API_KEY")
20
  # groq_api_key = os.getenv("GROQ_API_KEY")
21
+
22
  groq_api_key = st.secrets["GROQ_API_KEY"]
23
 
24
  # Configure logging
 
380
  except Exception as e:
381
  st.error(f"Job Search Error: {e}")
382
  # logger.error(f"Job Search Error: {e}")
383
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cv_short.py CHANGED
@@ -86,115 +86,6 @@ class CVAnalyzer:
86
 
87
 
88
  def create_cv_shortlisting_page():
89
- # logger.info("Starting CV shortlisting system")
90
-
91
- # Initialize session state if not already initialized
92
- # if 'jd_text' not in st.session_state:
93
- # st.session_state.jd_text = ""
94
- # if 'min_years' not in st.session_state:
95
- # st.session_state.min_years = 3
96
- # if 'required_skills_list' not in st.session_state:
97
- # st.session_state.required_skills_list = []
98
- # if 'uploaded_files' not in st.session_state:
99
- # st.session_state.uploaded_files = None
100
- # if 'results' not in st.session_state:
101
- # st.session_state.results = []
102
- # if 'analysis_complete' not in st.session_state:
103
- # st.session_state.analysis_complete = False
104
-
105
- # st.title("CV Shortlisting System")
106
-
107
- # # Job Description Input
108
- # st.header("Job Description")
109
- # jd_text = st.text_area("Enter the job description", value=st.session_state.jd_text)
110
- # if jd_text:
111
- # st.session_state.jd_text = jd_text
112
-
113
- # # Job Requirements Input
114
- # st.header("Job Requirements")
115
- # min_years = st.number_input("Minimum years of experience",
116
- # min_value=0,
117
- # value=st.session_state.min_years,
118
- # )
119
-
120
- # required_skills = st.text_input("Required skills (comma-separated)",
121
- # value=','.join(st.session_state.required_skills_list) if st.session_state.required_skills_list else "")
122
-
123
- # required_skills_list = [skill.strip() for skill in required_skills.split(",") if skill.strip()]
124
-
125
- # if required_skills_list:
126
- # st.session_state.required_skills_list = required_skills_list
127
- # if min_years:
128
- # st.session_state.min_years = min_years
129
-
130
- # # CV Upload
131
- # st.header("Upload CVs")
132
- # uploaded_files = st.file_uploader("Choose CV files",
133
- # accept_multiple_files=True,
134
- # type=['pdf', 'txt'],
135
- # key="unique_cv_upload")
136
-
137
- # if uploaded_files:
138
- # st.session_state.uploaded_files = uploaded_files
139
-
140
- # if st.button("Analyze CVs") and uploaded_files and jd_text:
141
- # # logger.info("Analyzing uploaded CVs")
142
- # with st.spinner('Analyzing CVs...'):
143
- # analyzer = CVAnalyzer()
144
-
145
- # # Prepare job requirements
146
- # job_requirements = {
147
- # "min_years_experience": st.session_state.min_years,
148
- # "required_skills": st.session_state.required_skills_list
149
- # }
150
-
151
- # results = []
152
- # st.session_state.results = [] # Reset results for new analysis
153
-
154
- # # Process each CV
155
- # for uploaded_file in uploaded_files:
156
- # cv_text = extr.process_file(uploaded_file)
157
-
158
- # try:
159
- # candidates = analyzer.extract_cv_info(cv_text)
160
-
161
- # for candidate in candidates:
162
- # match_scores = analyzer.calculate_match_score(
163
- # candidate.__dict__,
164
- # job_requirements
165
- # )
166
-
167
- # result = {
168
- # "Name": candidate.name or "Unknown",
169
- # "Experience (Years)": candidate.years_of_exp or 0,
170
- # "Skills": ", ".join(candidate.skills) if candidate.skills else "None",
171
- # "Certifications": ", ".join(candidate.certifications) if candidate.certifications else "None",
172
- # "Skills Match": f"{match_scores['skills_match']:.2%}",
173
- # "Experience Match": f"{match_scores['experience_match']:.2%}",
174
- # "Overall Score": f"{match_scores['overall_score']:.2%}"
175
- # }
176
-
177
- # results.append(result)
178
- # st.session_state.results.append(result)
179
-
180
- # except Exception as e:
181
- # st.error(f"Error processing CV: {str(e)}")
182
- # # logger.error(f"Error processing CV: {str(e)}")
183
-
184
- # # Display results
185
- # # logger.info(f"Displaying analyzed results for {len(results)} candidate(s)")
186
-
187
- # if st.session_state.results:
188
- # df = pd.DataFrame(st.session_state.results)
189
- # df = df.sort_values("Overall Score", ascending=False)
190
- # st.dataframe(df)
191
- # st.session_state.analysis_complete = True
192
- # else:
193
- # # logger.warning("No valid candidates found in uploaded CVs")
194
- # st.error("No valid results found from CV analysis")
195
- # st.session_state.analysis_complete = False
196
-
197
-
198
  # Initialize session state variables if they don't exist
199
  if 'jd_text' not in st.session_state:
200
  st.session_state.jd_text = ''
 
86
 
87
 
88
  def create_cv_shortlisting_page():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  # Initialize session state variables if they don't exist
90
  if 'jd_text' not in st.session_state:
91
  st.session_state.jd_text = ''