File size: 19,967 Bytes
0b0fa7c
 
19a9439
3019fd8
feca185
 
0b0fa7c
19a9439
 
3019fd8
0b0fa7c
 
 
 
19a9439
0b0fa7c
feca185
 
0b0fa7c
feca185
 
19a9439
3019fd8
19a9439
0b0fa7c
 
 
19a9439
3019fd8
 
 
 
 
 
 
 
19a9439
 
0b0fa7c
 
 
19a9439
0b0fa7c
 
 
 
19a9439
 
0b0fa7c
feca185
19a9439
 
 
 
 
feca185
0b0fa7c
 
 
feca185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b0fa7c
 
 
feca185
0b0fa7c
19a9439
0b0fa7c
 
 
 
 
feca185
0b0fa7c
 
19a9439
0b0fa7c
 
 
 
 
19a9439
0b0fa7c
19a9439
feca185
 
 
 
 
 
 
19a9439
0b0fa7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019fd8
 
 
 
 
 
19a9439
 
0b0fa7c
19a9439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
feca185
 
 
 
 
 
19a9439
 
 
 
 
 
 
feca185
19a9439
 
 
0b0fa7c
 
 
 
 
 
 
19a9439
 
 
0b0fa7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019fd8
0b0fa7c
 
3019fd8
 
 
 
 
 
 
 
 
 
0b0fa7c
3019fd8
 
0b0fa7c
 
 
3019fd8
 
0b0fa7c
3019fd8
 
0b0fa7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019fd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b0fa7c
 
 
 
 
 
3019fd8
 
 
0b0fa7c
 
3019fd8
0b0fa7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3019fd8
 
 
 
 
 
 
 
 
 
 
0b0fa7c
 
3019fd8
 
 
 
 
 
0b0fa7c
 
3019fd8
 
 
 
0b0fa7c
 
3019fd8
19a9439
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
# app.py

import streamlit as st
from streamlit_option_menu import option_menu
from langchain_groq import ChatGroq
from langchain_core.prompts import PromptTemplate
import fitz  # PyMuPDF
import requests
from bs4 import BeautifulSoup
import uuid
import plotly.express as px
import re
import pandas as pd
import json

# Initialize the LLM with your Groq API key from Streamlit secrets
llm = ChatGroq(
    temperature=0,
    groq_api_key=st.secrets["groq_api_key"],
    model_name="llama-3.1-70b-versatile"
)


def extract_text_from_pdf(pdf_file):
    """
    Extracts text from an uploaded PDF file.
    """
    text = ""
    try:
        with fitz.open(stream=pdf_file.read(), filetype="pdf") as doc:
            for page in doc:
                text += page.get_text()
        return text
    except Exception as e:
        st.error(f"Error extracting text from resume: {e}")
        return ""

def extract_job_description(job_link):
    """
    Fetches and extracts job description text from a given URL.
    """
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
        }
        response = requests.get(job_link, headers=headers)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        # Adjust selectors based on the website's structure for better extraction
        job_description = soup.get_text(separator='\n')
        return job_description.strip()
    except Exception as e:
        st.error(f"Error fetching job description: {e}")
        return ""

def extract_requirements(job_description):
    """
    Uses Groq to extract job requirements from the job description.
    """
    prompt_text = f"""
    The following is a job description:

    {job_description}

    Extract the list of job requirements, qualifications, and skills from the job description. Provide them as a numbered list.

    Requirements:
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    requirements = response.content.strip()
    return requirements

def generate_email(job_description, requirements, resume_text):
    """
    Generates a personalized cold email using Groq based on the job description, requirements, and resume.
    """
    prompt_text = f"""
    You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Craft a concise and professional cold email to a potential employer based on the following information:

    **Job Description:**
    {job_description}

    **Extracted Requirements:**
    {requirements}

    **Your Resume:**
    {resume_text}

    **Email Requirements:**
    - **Introduction:** Briefly introduce yourself and mention the specific job you are applying for.
    - **Body:** Highlight your relevant skills, projects, internships, and leadership experiences that align with the job requirements.
    - **Value Proposition:** Explain how your fresh perspective and recent academic knowledge can add value to the company.
    - **Closing:** Express enthusiasm for the opportunity, mention your willingness for an interview, and thank the recipient for their time.

    **Email:**
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    email_text = response.content.strip()
    return email_text

def generate_cover_letter(job_description, requirements, resume_text):
    """
    Generates a personalized cover letter using Groq based on the job description, requirements, and resume.
    """
    prompt_text = f"""
    You are Adithya S Nair, a recent Computer Science graduate specializing in Artificial Intelligence and Machine Learning. Compose a personalized and professional cover letter based on the following information:

    **Job Description:**
    {job_description}

    **Extracted Requirements:**
    {requirements}

    **Your Resume:**
    {resume_text}

    **Cover Letter Requirements:**
    1. **Greeting:** Address the hiring manager by name if available; otherwise, use a generic greeting such as "Dear Hiring Manager."
    2. **Introduction:** Begin with an engaging opening that mentions the specific position you are applying for and conveys your enthusiasm.
    3. **Body:**
       - **Skills and Experiences:** Highlight relevant technical skills, projects, internships, and leadership roles that align with the job requirements.
       - **Alignment:** Demonstrate how your academic background and hands-on experiences make you a suitable candidate for the role.
    4. **Value Proposition:** Explain how your fresh perspective, recent academic knowledge, and eagerness to learn can contribute to the company's success.
    5. **Conclusion:** End with a strong closing statement expressing your interest in an interview, your availability, and gratitude for the hiring manager’s time and consideration.
    6. **Professional Tone:** Maintain a respectful and professional tone throughout the letter.

    **Cover Letter:**
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    cover_letter = response.content.strip()
    return cover_letter

def extract_skills(text):
    """
    Extracts a list of skills from the resume text using Groq.
    """
    prompt_text = f"""
    Extract a comprehensive list of technical and soft skills from the following resume text. Provide the skills as a comma-separated list.

    Resume Text:
    {text}

    Skills:
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    skills = response.content.strip()
    # Clean and split the skills
    skills_list = [skill.strip() for skill in re.split(',|\\n', skills) if skill.strip()]
    return skills_list

def suggest_keywords(resume_text, job_description=None):
    """
    Suggests additional relevant keywords to enhance resume compatibility with ATS.
    """
    prompt_text = f"""
    Analyze the following resume text and suggest additional relevant keywords that can enhance its compatibility with Applicant Tracking Systems (ATS). If a job description is provided, tailor the keywords to align with the job requirements.

    Resume Text:
    {resume_text}

    Job Description:
    {job_description if job_description else "N/A"}

    Suggested Keywords:
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    keywords = response.content.strip()
    keywords_list = [keyword.strip() for keyword in re.split(',|\\n', keywords) if keyword.strip()]
    return keywords_list

def get_job_recommendations(resume_text, location="India"):
    """
    Fetches job recommendations using the JSearch API based on the user's skills.
    """
    # Extract skills from resume
    skills = extract_skills(resume_text)
    query = " ".join(skills) if skills else "Software Engineer"

    url = "https://jsearch.p.rapidapi.com/search"
    headers = {
        "X-RapidAPI-Key": st.secrets["rapidapi_key"],  # Accessing RapidAPI key securely
        "X-RapidAPI-Host": "jsearch.p.rapidapi.com"
    }
    params = {
        "query": query,
        "page": "1",
        "num_pages": "1",
        "size": "20",
        "remote_filter": "false",
        "location": location,
        "sort": "relevance",
        "salary_min": "0",
        "salary_max": "0",
        "salary_currency": "INR",
        "radius": "0",
        "company_type": "",
        "job_type": "",
        "degree_level": "",
        "career_level": "",
        "include_remote": "false"
    }

    try:
        response = requests.get(url, headers=headers, params=params)
        response.raise_for_status()
        data = response.json()
        jobs = data.get("data", [])
        job_list = []
        for job in jobs:
            job_info = {
                "title": job.get("job_title"),
                "company": job.get("employer", {}).get("name"),
                "link": job.get("job_apply_link") or job.get("job_listing_url")
            }
            job_list.append(job_info)
        return job_list
    except Exception as e:
        st.error(f"Error fetching job recommendations: {e}")
        return []

def create_skill_distribution_chart(skills):
    """
    Creates a bar chart showing the distribution of skills.
    """
    skill_counts = {}
    for skill in skills:
        skill_counts[skill] = skill_counts.get(skill, 0) + 1
    df = pd.DataFrame(list(skill_counts.items()), columns=['Skill', 'Count'])
    fig = px.bar(df, x='Skill', y='Count', title='Skill Distribution')
    return fig

def create_experience_timeline(resume_text):
    """
    Creates an experience timeline from the resume text.
    """
    # Extract work experience details using Groq
    prompt_text = f"""
    From the following resume text, extract the job titles, companies, and durations of employment. Provide the information in a table format with columns: Job Title, Company, Duration (in years).

    Resume Text:
    {resume_text}

    Table:
    """

    prompt = PromptTemplate.from_template(prompt_text)
    chain = prompt | llm
    response = chain.invoke({})

    table_text = response.content.strip()
    # Parse the table_text to create a DataFrame
    data = []
    for line in table_text.split('\n'):
        if line.strip() and not line.lower().startswith("job title"):
            parts = line.split('|')
            if len(parts) == 3:
                job_title = parts[0].strip()
                company = parts[1].strip()
                duration = parts[2].strip()
                # Convert duration to a float representing years
                duration_years = parse_duration(duration)
                data.append({"Job Title": job_title, "Company": company, "Duration (years)": duration_years})
    df = pd.DataFrame(data)
    if not df.empty:
        # Create a cumulative duration for timeline
        df['Start Year'] = df['Duration (years)'].cumsum() - df['Duration (years)']
        df['End Year'] = df['Duration (years)'].cumsum()
        fig = px.timeline(df, x_start="Start Year", x_end="End Year", y="Job Title", color="Company", title="Experience Timeline")
        fig.update_yaxes(categoryorder="total ascending")
        return fig
    else:
        return None

def parse_duration(duration_str):
    """
    Parses duration strings like '2 years' or '6 months' into float years.
    """
    try:
        if 'year' in duration_str.lower():
            years = float(re.findall(r'\d+\.?\d*', duration_str)[0])
            return years
        elif 'month' in duration_str.lower():
            months = float(re.findall(r'\d+\.?\d*', duration_str)[0])
            return months / 12
        else:
            return 0
    except:
        return 0

# -------------------------------
# Page Functions
# -------------------------------

def email_generator_page():
    st.header("Automated Email Generator")

    st.write("""
    This application generates a personalized cold email based on a job posting and your resume.
    """)

    # Input fields
    job_link = st.text_input("Enter the job link:")
    uploaded_file = st.file_uploader("Upload your resume (PDF format):", type="pdf")

    if st.button("Generate Email"):
        if not job_link:
            st.error("Please enter a job link.")
            return
        if not uploaded_file:
            st.error("Please upload your resume.")
            return

        with st.spinner("Processing..."):
            # Extract job description
            job_description = extract_job_description(job_link)
            if not job_description:
                st.error("Failed to extract job description.")
                return

            # Extract requirements
            requirements = extract_requirements(job_description)
            if not requirements:
                st.error("Failed to extract requirements.")
                return

            # Extract resume text
            resume_text = extract_text_from_pdf(uploaded_file)
            if not resume_text:
                st.error("Failed to extract text from resume.")
                return

            # Generate email
            email_text = generate_email(job_description, requirements, resume_text)
            if email_text:
                st.subheader("Generated Email:")
                st.write(email_text)
                # Provide download option
                st.download_button(
                    label="Download Email",
                    data=email_text,
                    file_name="generated_email.txt",
                    mime="text/plain"
                )
            else:
                st.error("Failed to generate email.")

def cover_letter_generator_page():
    st.header("Automated Cover Letter Generator")

    st.write("""
    This application generates a personalized cover letter based on a job posting and your resume.
    """)

    # Input fields
    job_link = st.text_input("Enter the job link:")
    uploaded_file = st.file_uploader("Upload your resume (PDF format):", type="pdf")

    if st.button("Generate Cover Letter"):
        if not job_link:
            st.error("Please enter a job link.")
            return
        if not uploaded_file:
            st.error("Please upload your resume.")
            return

        with st.spinner("Processing..."):
            # Extract job description
            job_description = extract_job_description(job_link)
            if not job_description:
                st.error("Failed to extract job description.")
                return

            # Extract requirements
            requirements = extract_requirements(job_description)
            if not requirements:
                st.error("Failed to extract requirements.")
                return

            # Extract resume text
            resume_text = extract_text_from_pdf(uploaded_file)
            if not resume_text:
                st.error("Failed to extract text from resume.")
                return

            # Generate cover letter
            cover_letter = generate_cover_letter(job_description, requirements, resume_text)
            if cover_letter:
                st.subheader("Generated Cover Letter:")
                st.write(cover_letter)
                # Provide download option
                st.download_button(
                    label="Download Cover Letter",
                    data=cover_letter,
                    file_name="generated_cover_letter.txt",
                    mime="text/plain"
                )
            else:
                st.error("Failed to generate cover letter.")

def resume_analysis_page():
    import pandas as pd  # Importing here to prevent unnecessary imports if not used

    st.header("Resume Analysis and Optimization")

    uploaded_file = st.file_uploader("Upload your resume (PDF format):", type="pdf")

    if uploaded_file:
        resume_text = extract_text_from_pdf(uploaded_file)
        if resume_text:
            st.success("Resume uploaded successfully!")
            # Perform analysis
            st.subheader("Extracted Information")
            # Extracted skills
            skills = extract_skills(resume_text)
            st.write("**Skills:**", ', '.join(skills))
            # Extract keywords
            keywords = suggest_keywords(resume_text)
            st.write("**Suggested Keywords for ATS Optimization:**", ', '.join(keywords))
            # Provide optimization suggestions
            st.subheader("Optimization Suggestions")
            st.write("- **Keyword Optimization:** Incorporate the suggested keywords to improve ATS compatibility.")
            st.write("- **Formatting:** Ensure consistent formatting for headings and bullet points to enhance readability.")
            st.write("- **Experience Details:** Provide specific achievements and quantify your accomplishments where possible.")

            # Visual Resume Analytics
            st.subheader("Visual Resume Analytics")
            # Skill Distribution Chart
            if skills:
                st.write("**Skill Distribution:**")
                fig_skills = create_skill_distribution_chart(skills)
                st.plotly_chart(fig_skills)

            # Experience Timeline (if applicable)
            fig_experience = create_experience_timeline(resume_text)
            if fig_experience:
                st.write("**Experience Timeline:**")
                st.plotly_chart(fig_experience)
            else:
                st.write("**Experience Timeline:** Not enough data to generate a timeline.")
        else:
            st.error("Failed to extract text from resume.")

def job_recommendations_page():
    st.header("Job Recommendations")

    uploaded_file = st.file_uploader("Upload your resume (PDF format):", type="pdf")

    if uploaded_file:
        resume_text = extract_text_from_pdf(uploaded_file)
        if resume_text:
            st.success("Resume uploaded successfully!")
            # Fetch job recommendations
            st.subheader("Recommended Jobs")
            jobs = get_job_recommendations(resume_text)
            if jobs:
                for job in jobs:
                    st.write(f"**{job['title']}** at {job['company']}")
                    st.markdown(f"[Apply Here]({job['link']})")
            else:
                st.write("No job recommendations found based on your skills.")
        else:
            st.error("Failed to extract text from resume.")

def skill_matching_page():
    st.header("Skill Matching and Gap Analysis")

    job_description_input = st.text_area("Paste the job description here:")
    uploaded_file = st.file_uploader("Upload your resume (PDF format):", type="pdf")

    if st.button("Analyze Skills"):
        if not job_description_input:
            st.error("Please paste the job description.")
            return
        if not uploaded_file:
            st.error("Please upload your resume.")
            return

        with st.spinner("Analyzing..."):
            # Extract resume text
            resume_text = extract_text_from_pdf(uploaded_file)
            if not resume_text:
                st.error("Failed to extract text from resume.")
                return

            # Extract skills
            resume_skills = extract_skills(resume_text)
            job_skills = extract_skills(job_description_input)

            # Find matches and gaps
            matching_skills = set(resume_skills).intersection(set(job_skills))
            missing_skills = set(job_skills) - set(resume_skills)

            # Display results
            st.subheader("Matching Skills")
            st.write(', '.join(matching_skills) if matching_skills else "No matching skills found.")

            st.subheader("Missing Skills")
            st.write(', '.join(missing_skills) if missing_skills else "No missing skills.")

# -------------------------------
# Main App with Sidebar Navigation
# -------------------------------

def main():
    st.set_page_config(page_title="Job Application Assistant", layout="wide")

    with st.sidebar:
        selected = option_menu(
            "Main Menu",
            ["Email Generator", "Cover Letter Generator", "Resume Analysis", "Job Recommendations", "Skill Matching"],
            icons=["envelope", "file-earmark-text", "file-person", "briefcase", "bar-chart"],
            menu_icon="cast",
            default_index=0,
        )

    if selected == "Email Generator":
        email_generator_page()
    elif selected == "Cover Letter Generator":
        cover_letter_generator_page()
    elif selected == "Resume Analysis":
        resume_analysis_page()
    elif selected == "Job Recommendations":
        job_recommendations_page()
    elif selected == "Skill Matching":
        skill_matching_page()

if __name__ == "__main__":
    main()