Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
7 |
"""
|
8 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
9 |
|
|
|
10 |
def extract_text_from_pdf(pdf_path):
|
11 |
# Open the provided PDF file
|
12 |
doc = fitz.open(pdf_path)
|
@@ -19,14 +20,8 @@ def extract_text_from_pdf(pdf_path):
|
|
19 |
doc.close() # Ensure the PDF file is closed
|
20 |
return text
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
history: list[tuple[str, str]],
|
25 |
-
system_message,
|
26 |
-
max_tokens,
|
27 |
-
temperature,
|
28 |
-
top_p,
|
29 |
-
):
|
30 |
messages = [{"role": "system", "content": system_message}]
|
31 |
|
32 |
for val in history:
|
@@ -51,6 +46,7 @@ def respond(
|
|
51 |
print(f"Token: {token}") # Debugging statement to trace tokens
|
52 |
yield response # Yield the complete response up to this point
|
53 |
|
|
|
54 |
def process_resume_and_respond(pdf_file, message, history, system_message, max_tokens, temperature, top_p):
|
55 |
# Extract text from the PDF file
|
56 |
resume_text = extract_text_from_pdf(pdf_file.name)
|
@@ -61,23 +57,30 @@ def process_resume_and_respond(pdf_file, message, history, system_message, max_t
|
|
61 |
response = "".join([token for token in response_gen])
|
62 |
return response
|
63 |
|
|
|
64 |
# Store the uploaded PDF content globally
|
65 |
uploaded_resume_text = ""
|
66 |
|
|
|
67 |
def upload_resume(pdf_file):
|
68 |
global uploaded_resume_text
|
69 |
uploaded_resume_text = extract_text_from_pdf(pdf_file.name)
|
70 |
return "Resume uploaded successfully!"
|
71 |
|
|
|
72 |
def respond_with_resume(message, history, system_message, max_tokens, temperature, top_p):
|
73 |
global uploaded_resume_text
|
74 |
# Combine the uploaded resume text with the user message
|
75 |
combined_message = f"Resume:\n{uploaded_resume_text}\n\nUser message:\n{message}"
|
76 |
# Respond using the combined message
|
77 |
response_gen = respond(combined_message, history, system_message, max_tokens, temperature, top_p)
|
78 |
-
|
|
|
|
|
|
|
79 |
return response
|
80 |
|
|
|
81 |
"""
|
82 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
83 |
"""
|
@@ -108,5 +111,6 @@ demo = gr.TabbedInterface(
|
|
108 |
["Upload Resume", "Chat with Job Advisor"]
|
109 |
)
|
110 |
|
|
|
111 |
if __name__ == "__main__":
|
112 |
demo.launch()
|
|
|
7 |
"""
|
8 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
9 |
|
10 |
+
|
11 |
def extract_text_from_pdf(pdf_path):
|
12 |
# Open the provided PDF file
|
13 |
doc = fitz.open(pdf_path)
|
|
|
20 |
doc.close() # Ensure the PDF file is closed
|
21 |
return text
|
22 |
|
23 |
+
|
24 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
messages = [{"role": "system", "content": system_message}]
|
26 |
|
27 |
for val in history:
|
|
|
46 |
print(f"Token: {token}") # Debugging statement to trace tokens
|
47 |
yield response # Yield the complete response up to this point
|
48 |
|
49 |
+
|
50 |
def process_resume_and_respond(pdf_file, message, history, system_message, max_tokens, temperature, top_p):
|
51 |
# Extract text from the PDF file
|
52 |
resume_text = extract_text_from_pdf(pdf_file.name)
|
|
|
57 |
response = "".join([token for token in response_gen])
|
58 |
return response
|
59 |
|
60 |
+
|
61 |
# Store the uploaded PDF content globally
|
62 |
uploaded_resume_text = ""
|
63 |
|
64 |
+
|
65 |
def upload_resume(pdf_file):
|
66 |
global uploaded_resume_text
|
67 |
uploaded_resume_text = extract_text_from_pdf(pdf_file.name)
|
68 |
return "Resume uploaded successfully!"
|
69 |
|
70 |
+
|
71 |
def respond_with_resume(message, history, system_message, max_tokens, temperature, top_p):
|
72 |
global uploaded_resume_text
|
73 |
# Combine the uploaded resume text with the user message
|
74 |
combined_message = f"Resume:\n{uploaded_resume_text}\n\nUser message:\n{message}"
|
75 |
# Respond using the combined message
|
76 |
response_gen = respond(combined_message, history, system_message, max_tokens, temperature, top_p)
|
77 |
+
# Collect all tokens generated
|
78 |
+
response = ""
|
79 |
+
for token in response_gen:
|
80 |
+
response = token # Update the response with the latest token
|
81 |
return response
|
82 |
|
83 |
+
|
84 |
"""
|
85 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
86 |
"""
|
|
|
111 |
["Upload Resume", "Chat with Job Advisor"]
|
112 |
)
|
113 |
|
114 |
+
|
115 |
if __name__ == "__main__":
|
116 |
demo.launch()
|