File size: 10,621 Bytes
cf8a522
92f45fe
8e1d297
92f45fe
 
cc18787
c6d228e
b0dca97
8e1d297
 
c6d228e
 
 
 
 
 
 
 
 
 
 
 
b0dca97
 
 
 
c6d228e
 
 
 
b0dca97
c6d228e
 
 
 
 
 
 
 
 
 
 
 
8e1d297
 
92f45fe
7716c5c
 
92f45fe
 
 
 
7716c5c
 
9753cc9
92f45fe
c6d228e
 
9753cc9
92f45fe
 
 
c6d228e
92f45fe
 
 
 
c6d228e
 
92f45fe
 
 
 
 
8e1d297
 
c6d228e
7716c5c
c6d228e
d836318
c6d228e
d836318
c6d228e
0d4f4dd
c6d228e
 
 
 
cc18787
c6d228e
 
 
 
 
 
 
 
 
 
 
 
 
 
0d4f4dd
cc18787
d836318
cccaa8e
b0dca97
cccaa8e
c6d228e
cccaa8e
b0dca97
cccaa8e
 
b0dca97
c6d228e
b0dca97
 
 
 
 
c6d228e
b0dca97
 
cccaa8e
 
7716c5c
 
8e1d297
c6d228e
cc18787
 
 
 
c6d228e
 
 
 
 
 
 
 
 
 
 
 
 
0d4f4dd
cc18787
cccaa8e
 
c6d228e
8e1d297
c6d228e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3661e7e
c6d228e
8e1d297
c6d228e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d4f4dd
c6d228e
 
 
 
 
 
 
 
 
 
 
 
3661e7e
c6d228e
 
 
 
 
 
3661e7e
c6d228e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3661e7e
c6d228e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
import os
import tempfile
import streamlit as st
import docx
import textract
from transformers import pipeline
import threading
import numpy as np

#####################################
# Load Models - Optimized with Threading
#####################################
@st.cache_resource(show_spinner=False)
def load_models():
    """
    Load all models in parallel using threading to speed up initialization
    """
    models = {}
    
    def load_summarizer_thread():
        models['summarizer'] = pipeline("summarization", model="google/pegasus-xsum", device=0 if st.session_state.get('use_gpu', False) else -1)
    
    def load_similarity_thread():
        # Using sentence-similarity pipeline instead of SentenceTransformer
        models['similarity'] = pipeline("sentence-similarity", model="sentence-transformers/all-MiniLM-L6-v2", 
                                       device=0 if st.session_state.get('use_gpu', False) else -1)
    
    # Start threads to load models in parallel
    threads = [
        threading.Thread(target=load_summarizer_thread),
        threading.Thread(target=load_similarity_thread)
    ]
    
    for thread in threads:
        thread.start()
    
    for thread in threads:
        thread.join()
    
    return models

#####################################
# Function: Extract Text from File - Optimized
#####################################
def extract_text_from_file(file_obj):
    """
    Extract text from .doc and .docx files.
    Returns the extracted text or an error message if extraction fails.
    """
    filename = file_obj.name
    ext = os.path.splitext(filename)[1].lower()
    text = ""

    if ext == ".docx":
        try:
            document = docx.Document(file_obj)
            # Use a list comprehension and join for better performance
            text = "\n".join(para.text for para in document.paragraphs if para.text.strip())
        except Exception as e:
            text = f"Error processing DOCX file: {e}"
    elif ext == ".doc":
        try:
            # Use a context manager for better file handling
            with tempfile.NamedTemporaryFile(delete=False, suffix=".doc") as tmp:
                tmp.write(file_obj.read())
                tmp_filename = tmp.name
            text = textract.process(tmp_filename).decode("utf-8")
            # Clean up the temporary file immediately
            os.unlink(tmp_filename)
        except Exception as e:
            text = f"Error processing DOC file: {e}"
    else:
        text = "Unsupported file type."
    return text

#####################################
# Function: Summarize Resume Text - Optimized
#####################################
def summarize_resume_text(resume_text, models):
    """
    Generates a concise summary of the resume text using the pre-loaded summarization model.
    """
    summarizer = models['summarizer']

    # Optimize text processing - only use essential text
    # Break text into chunks and summarize important parts
    max_input_length = 1024  # PEGASUS-XSUM limit
    
    if len(resume_text) > max_input_length:
        # Instead of simple trimming, extract key sections
        chunks = [resume_text[i:i+max_input_length] for i in range(0, min(len(resume_text), 3*max_input_length), max_input_length)]
        summaries = []
        
        for chunk in chunks:
            chunk_summary = summarizer(chunk, max_length=100, min_length=30, do_sample=False)[0]['summary_text']
            summaries.append(chunk_summary)
        
        candidate_summary = " ".join(summaries)
        # Summarize again if combined summary is too long
        if len(candidate_summary) > max_input_length:
            candidate_summary = summarizer(candidate_summary[:max_input_length], max_length=150, min_length=40, do_sample=False)[0]['summary_text']
    else:
        candidate_summary = summarizer(resume_text, max_length=150, min_length=40, do_sample=False)[0]['summary_text']
    
    return candidate_summary

#####################################
# Function: Compare Candidate Summary to Company Prompt - Using Pipeline
#####################################
def compute_suitability(candidate_summary, company_prompt, models):
    """
    Compute the similarity between candidate summary and company prompt using the similarity pipeline.
    Returns a score in the range [0, 1].
    """
    similarity_pipeline = models['similarity']
    
    # The pipeline expects a document and a list of candidates to compare to
    result = similarity_pipeline(
        candidate_summary,
        [company_prompt]
    )
    
    # Extract the similarity score from the result
    score = result[0]['score']
    return score

#####################################
# Main Resume Processing Logic
#####################################
def process_resume(file_obj, models):
    """
    Extracts text from the uploaded file and then generates a summary
    using a text summarization model.
    """
    with st.status("Processing resume...") as status:
        status.update(label="Extracting text from resume...")
        resume_text = extract_text_from_file(file_obj)
        
        # Check if resume_text is valid
        if not resume_text or resume_text.strip() == "":
            status.update(label="Error: No text could be extracted", state="error")
            return ""
        
        status.update(label=f"Extracted {len(resume_text)} characters. Generating summary...")
        
        candidate_summary = summarize_resume_text(resume_text, models)
        status.update(label="Processing complete!", state="complete")
    
    return candidate_summary

#####################################
# Streamlit Interface - Optimized
#####################################
def main():
    st.set_page_config(page_title="Resume Analyzer", layout="wide")
    
    # Initialize session state for GPU usage
    if 'use_gpu' not in st.session_state:
        st.session_state.use_gpu = False
    
    # Only show sidebar settings on first run
    with st.sidebar:
        st.title("Settings")
        if st.checkbox("Use GPU (if available)", value=st.session_state.use_gpu):
            st.session_state.use_gpu = True
        else:
            st.session_state.use_gpu = False
        
        st.info("Using GPU can significantly speed up model inference if available")
    
    # Load models - this happens only once due to caching
    with st.spinner("Loading AI models..."):
        models = load_models()
    
    st.title("Resume Analyzer and Company Suitability Checker")
    st.markdown(
        """
    Upload your resume file in **.doc** or **.docx** format. The app performs the following tasks:
    1. Extracts text from the resume.
    2. Uses a transformer-based model to generate a concise candidate summary.
    3. Compares the candidate summary with a company profile to produce a suitability score.
    """
    )

    # Use columns for better layout
    col1, col2 = st.columns([1, 1])
    
    with col1:
        # File uploader for resume
        uploaded_file = st.file_uploader("Upload Resume", type=["doc", "docx"])
        
        # Button to process the resume
        if st.button("Process Resume", type="primary", use_container_width=True):
            if uploaded_file is None:
                st.error("Please upload a resume file first.")
            else:
                candidate_summary = process_resume(uploaded_file, models)
                if candidate_summary:  # only if summary is generated
                    st.session_state["candidate_summary"] = candidate_summary
        
        # Display candidate summary if available
        if "candidate_summary" in st.session_state:
            st.subheader("Candidate Summary")
            st.markdown(st.session_state["candidate_summary"])
    
    with col2:
        # Pre-defined company prompt for Google LLC.
        default_company_prompt = (
            "Google LLC, a global leader in technology and innovation, specializes in internet services, cloud computing, "
            "artificial intelligence, and software development. As part of Alphabet Inc., Google seeks candidates with strong "
            "problem-solving skills, adaptability, and collaboration abilities. Technical roles require proficiency in programming "
            "languages such as Python, Java, C++, Go, or JavaScript, with expertise in data structures, algorithms, and system design. "
            "Additionally, skills in AI, cybersecurity, UX/UI design, and digital marketing are highly valued. Google fosters a culture "
            "of innovation, expecting candidates to demonstrate creativity, analytical thinking, and a passion for cutting-edge technology."
        )

        # Company prompt text area.
        company_prompt = st.text_area(
            "Enter company details:",
            value=default_company_prompt,
            height=150,
        )

        # Button to compute the suitability score.
        if st.button("Compute Suitability Score", type="primary", use_container_width=True):
            if "candidate_summary" not in st.session_state:
                st.error("Please process the resume first!")
            else:
                candidate_summary = st.session_state["candidate_summary"]
                if candidate_summary.strip() == "":
                    st.error("Candidate summary is empty; please check your resume file.")
                elif company_prompt.strip() == "":
                    st.error("Please enter the company information.")
                else:
                    with st.spinner("Computing suitability score..."):
                        score = compute_suitability(candidate_summary, company_prompt, models)
                    
                    # Display score with a progress bar for visual feedback
                    st.success(f"Suitability Score: {score:.2f} (range 0 to 1)")
                    st.progress(score)
                    
                    # Add interpretation of score
                    if score > 0.75:
                        st.info("Excellent match! Your profile appears very well suited for this company.")
                    elif score > 0.5:
                        st.info("Good match. Your profile aligns with many aspects of the company's requirements.")
                    elif score > 0.3:
                        st.info("Moderate match. Consider highlighting more relevant skills or experience.")
                    else:
                        st.info("Low match. Your profile may need significant adjustments to better align with this company.")


if __name__ == "__main__":
    main()