Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,20 +6,20 @@ from langchain.memory import ConversationBufferMemory
|
|
6 |
from langchain import PromptTemplate
|
7 |
import os
|
8 |
import tempfile
|
9 |
-
|
10 |
-
# Updated imports for Gradio components
|
11 |
from gradio.components import File, Textbox
|
12 |
|
13 |
-
|
14 |
def format_resume_to_yaml(api_key, file):
|
15 |
# Set the API key for OpenAI
|
16 |
os.environ['OPENAI_API_KEY'] = api_key
|
|
|
|
|
|
|
17 |
|
18 |
file_content = file.read()
|
19 |
|
20 |
# Check if the file content is not empty
|
21 |
if not file_content:
|
22 |
-
|
23 |
|
24 |
# Save the uploaded file content to a temporary file
|
25 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb+') as tmp_file:
|
@@ -28,51 +28,16 @@ def format_resume_to_yaml(api_key, file):
|
|
28 |
os.fsync(tmp_file.fileno()) # Ensure data is written to disk
|
29 |
temp_file_path = tmp_file.name
|
30 |
|
31 |
-
# Now we can use PyPDFLoader with the path to the temporary file
|
32 |
try:
|
33 |
loader = PyPDFLoader(temp_file_path)
|
34 |
docs = loader.load_and_split() # This will return a list of text chunks from the PDF
|
35 |
-
except (IOError,
|
36 |
raise ValueError(f"An error occurred while processing the PDF: {e}")
|
37 |
|
38 |
# Combine the text chunks into a single string
|
39 |
resume_text = " ".join(docs)
|
40 |
|
41 |
-
template = """
|
42 |
-
---
|
43 |
-
name: ''
|
44 |
-
phoneNumbers:
|
45 |
-
- ''
|
46 |
-
websites:
|
47 |
-
- ''
|
48 |
-
emails:
|
49 |
-
- ''
|
50 |
-
dateOfBirth: ''
|
51 |
-
addresses:
|
52 |
-
- street: ''
|
53 |
-
city: ''
|
54 |
-
state: ''
|
55 |
-
zip: ''
|
56 |
-
country: ''
|
57 |
-
summary: ''
|
58 |
-
education:
|
59 |
-
- school: ''
|
60 |
-
degree: ''
|
61 |
-
fieldOfStudy: ''
|
62 |
-
startDate: ''
|
63 |
-
endDate: ''
|
64 |
-
workExperience:
|
65 |
-
- company: ''
|
66 |
-
position: ''
|
67 |
-
startDate: ''
|
68 |
-
endDate: ''
|
69 |
-
skills:
|
70 |
-
- name: ''
|
71 |
-
certifications:
|
72 |
-
- name: ''
|
73 |
-
|
74 |
-
{chat_history}
|
75 |
-
{human_input}"""
|
76 |
|
77 |
prompt = PromptTemplate(
|
78 |
input_variables=["chat_history", "human_input"],
|
@@ -91,27 +56,20 @@ def format_resume_to_yaml(api_key, file):
|
|
91 |
res = llm_chain.predict(human_input=resume_text)
|
92 |
return res['output_text']
|
93 |
|
94 |
-
def on_file_upload(filename, file_content):
|
95 |
-
if not file_content:
|
96 |
-
gr.Interface.alert(title="Error", message="Please upload a valid PDF resume.")
|
97 |
-
|
98 |
def main():
|
99 |
input_api_key = Textbox(label="Enter your OpenAI API Key")
|
100 |
input_pdf_file = File(label="Upload your PDF resume")
|
101 |
-
output_yaml = Textbox(label="Formatted Resume in YAML")
|
102 |
|
103 |
iface = gr.Interface(
|
104 |
fn=format_resume_to_yaml,
|
105 |
-
inputs=[input_api_key,
|
106 |
outputs=output_yaml,
|
107 |
title="Resume to YAML Formatter",
|
108 |
description="Upload a PDF resume and enter your OpenAI API key to get it formatted to a YAML template.",
|
109 |
)
|
110 |
|
111 |
-
iface.on_data_received(on_file_upload) # Attach the on_file_upload function to the interface's on_data_received event
|
112 |
-
|
113 |
iface.launch(debug=True)
|
114 |
|
115 |
-
|
116 |
if __name__ == "__main__":
|
117 |
main()
|
|
|
6 |
from langchain import PromptTemplate
|
7 |
import os
|
8 |
import tempfile
|
|
|
|
|
9 |
from gradio.components import File, Textbox
|
10 |
|
|
|
11 |
def format_resume_to_yaml(api_key, file):
|
12 |
# Set the API key for OpenAI
|
13 |
os.environ['OPENAI_API_KEY'] = api_key
|
14 |
+
|
15 |
+
if file is None:
|
16 |
+
return "Please upload a valid PDF resume."
|
17 |
|
18 |
file_content = file.read()
|
19 |
|
20 |
# Check if the file content is not empty
|
21 |
if not file_content:
|
22 |
+
return "The uploaded file is empty."
|
23 |
|
24 |
# Save the uploaded file content to a temporary file
|
25 |
with tempfile.NamedTemporaryFile(delete=False, mode='wb+') as tmp_file:
|
|
|
28 |
os.fsync(tmp_file.fileno()) # Ensure data is written to disk
|
29 |
temp_file_path = tmp_file.name
|
30 |
|
|
|
31 |
try:
|
32 |
loader = PyPDFLoader(temp_file_path)
|
33 |
docs = loader.load_and_split() # This will return a list of text chunks from the PDF
|
34 |
+
except (IOError, Exception) as e: # Adjusted for a broader range of exceptions
|
35 |
raise ValueError(f"An error occurred while processing the PDF: {e}")
|
36 |
|
37 |
# Combine the text chunks into a single string
|
38 |
resume_text = " ".join(docs)
|
39 |
|
40 |
+
template = """[Your YAML template here]"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
prompt = PromptTemplate(
|
43 |
input_variables=["chat_history", "human_input"],
|
|
|
56 |
res = llm_chain.predict(human_input=resume_text)
|
57 |
return res['output_text']
|
58 |
|
|
|
|
|
|
|
|
|
59 |
def main():
|
60 |
input_api_key = Textbox(label="Enter your OpenAI API Key")
|
61 |
input_pdf_file = File(label="Upload your PDF resume")
|
62 |
+
output_yaml = Textbox(label="Formatted Resume in YAML", interactive=False)
|
63 |
|
64 |
iface = gr.Interface(
|
65 |
fn=format_resume_to_yaml,
|
66 |
+
inputs=[input_api_key, input_pdf_file],
|
67 |
outputs=output_yaml,
|
68 |
title="Resume to YAML Formatter",
|
69 |
description="Upload a PDF resume and enter your OpenAI API key to get it formatted to a YAML template.",
|
70 |
)
|
71 |
|
|
|
|
|
72 |
iface.launch(debug=True)
|
73 |
|
|
|
74 |
if __name__ == "__main__":
|
75 |
main()
|