import gradio as gr import os import io import PyPDF2 from langchain_openai import ChatOpenAI from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain import PromptTemplate from gradio.components import File, Textbox, Slider def extract_text_from_pdf_binary(pdf_binary): text = "" pdf_data = io.BytesIO(pdf_binary) reader = PyPDF2.PdfReader(pdf_data) num_pages = len(reader.pages) for page in range(num_pages): current_page = reader.pages[page] page_text = current_page.extract_text() if page_text: # Check if page_text is not None or empty text += page_text return text def format_resume_to_yaml(api_key, file_content): os.environ['OPENAI_API_KEY'] = api_key if not file_content: raise ValueError("The uploaded file is empty.") resume_text = extract_text_from_pdf_binary(file_content) template = """Format the provided resume to this YAML template: --- name: '' phoneNumbers: - '' websites: - '' emails: - '' dateOfBirth: '' addresses: - street: '' city: '' state: '' zip: '' country: '' summary: '' education: - school: '' degree: '' fieldOfStudy: '' startDate: '' endDate: '' workExperience: - company: '' position: '' startDate: '' endDate: '' skills: - name: '' certifications: - name: '' {chat_history} {human_input}""" prompt = PromptTemplate( input_variables=["chat_history", "human_input"], template=template ) memory = ConversationBufferMemory(memory_key="chat_history") llm_chain = LLMChain( llm=ChatOpenAI(model="gpt-3.5-turbo"), prompt=prompt, verbose=True, memory=memory, ) res = llm_chain.predict(human_input=resume_text) return res def match_resume_to_job_description(api_key, resume_file_content, job_description): os.environ['OPENAI_API_KEY'] = api_key if not resume_file_content or not job_description: raise ValueError("The uploaded file or job description is empty.") resume_text = extract_text_from_pdf_binary(resume_file_content) prompt = f"Given the following resume text:\n{resume_text}\n\nAnd the job description:\n{job_description}\n\nEvaluate how well the resume matches the job description and provide a matching score from 0 to 100, where 100 is a perfect match." llm = ChatOpenAI(model="gpt-3.5-turbo") response = llm.predict(prompt=prompt) return response def main(): input_api_key = gr.Textbox(label="Enter your OpenAI API Key") input_pdf_file = gr.File(label="Upload your PDF resume", type="binary") input_job_description = gr.Textbox(label="Enter the job description", placeholder="Paste the job description here") output_yaml = gr.Textbox(label="Formatted Resume in YAML") output_match_score = gr.Textbox(label="Resume Match Score") # Define separate interfaces for each function to simplify format_resume_interface = gr.Interface( fn=format_resume_to_yaml, inputs=[input_api_key, input_pdf_file], outputs=output_yaml, title="Resume to YAML Formatter", description="Upload a PDF resume and enter your OpenAI API key to get it formatted to a YAML template.", ) match_resume_interface = gr.Interface( fn=match_resume_to_job_description, inputs=[input_api_key, input_pdf_file, input_job_description], outputs=output_match_score, title="Resume Matcher", description="Upload a PDF resume, enter your OpenAI API key and job description to get the matching score.", ) # Launch interfaces in parallel if needed, or redesign to choose which to display based on user input format_resume_interface.launch(debug=True, share=True) # match_resume_interface.launch(debug=True, share=True) # Uncomment to run separately if __name__ == "__main__": main()