Update app.py
Browse files
app.py
CHANGED
@@ -2,16 +2,15 @@ import openai
|
|
2 |
import os
|
3 |
from paperqa import Docs
|
4 |
import gradio as gr
|
5 |
-
from
|
6 |
-
from langchain.vectorstores import Chroma
|
7 |
-
from langchain.embeddings.openai import OpenAIEmbeddings
|
8 |
-
from langchain.document_loaders import UnstructuredPDFLoader
|
9 |
-
from langchain.llms import OpenAI
|
10 |
-
from langchain.chains.question_answering import load_qa_chain
|
11 |
-
from langchain.chat_models import ChatOpenAI
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
|
|
|
15 |
.gradio-container {
|
16 |
font-family: "IBM Plex Mono";
|
17 |
}
|
@@ -22,7 +21,6 @@ css_style = """
|
|
22 |
}
|
23 |
"""
|
24 |
|
25 |
-
|
26 |
def run(uploaded_files):
|
27 |
all_files = []
|
28 |
if uploaded_files is None:
|
@@ -33,18 +31,17 @@ def run(uploaded_files):
|
|
33 |
print(all_files)
|
34 |
return all_files
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
|
49 |
with gr.Blocks(css=css_style) as demo:
|
50 |
gr.Markdown(f"""
|
@@ -59,26 +56,28 @@ with gr.Blocks(css=css_style) as demo:
|
|
59 |
for a single query. That is $0.10-0.20 per query, so please be careful!
|
60 |
Porting it to Llama.cpp soon for saved cost.
|
61 |
|
62 |
-
1.
|
63 |
-
2.
|
64 |
-
3. Provide Designation for which you are hiring
|
65 |
""")
|
66 |
|
67 |
-
|
68 |
-
label='Your OpenAI Api Key', value="")
|
69 |
-
position = gr.Text(
|
70 |
-
label='Position/Designation for which you are hiring for', value="")
|
71 |
|
72 |
with gr.Tab('File Upload'):
|
73 |
-
uploaded_files = gr.File(
|
74 |
-
label="Resume Upload - ONLY PDF. (Doc File Support Coming Soon)", file_count="multiple", show_progress=True)
|
75 |
|
76 |
-
uploaded_files.change(
|
77 |
-
fn=run, inputs=[uploaded_files], outputs=[uploaded_files])
|
78 |
ask = gr.Button("Find Top Candidate")
|
79 |
-
answer = gr.Markdown(label="Result",
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
demo.queue(concurrency_count=20)
|
84 |
demo.launch(show_error=True)
|
|
|
2 |
import os
|
3 |
from paperqa import Docs
|
4 |
import gradio as gr
|
5 |
+
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Load environment variables from .env file
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
# Get the OpenAI API key from the environment variable
|
11 |
+
openai_api_key = os.getenv('OPENAI_API_KEY')
|
12 |
|
13 |
+
css_style = """
|
14 |
.gradio-container {
|
15 |
font-family: "IBM Plex Mono";
|
16 |
}
|
|
|
21 |
}
|
22 |
"""
|
23 |
|
|
|
24 |
def run(uploaded_files):
|
25 |
all_files = []
|
26 |
if uploaded_files is None:
|
|
|
31 |
print(all_files)
|
32 |
return all_files
|
33 |
|
34 |
+
def createAnswer(files, designation):
|
35 |
+
try:
|
36 |
+
os.environ['OPENAI_API_KEY'] = openai_api_key
|
37 |
+
docs = Docs(llm='gpt-3.5-turbo')
|
38 |
+
for d in files:
|
39 |
+
docs.add(d.name)
|
40 |
+
answer = docs.query(
|
41 |
+
f"Who is the best candidate to hire for {designation}. Provide a list with the candidate name. If you don't know, simply say None of the candidates are suited for the Job role.")
|
42 |
+
return answer.answer
|
43 |
+
except Exception as e:
|
44 |
+
return f"An error occurred: {str(e)}"
|
|
|
45 |
|
46 |
with gr.Blocks(css=css_style) as demo:
|
47 |
gr.Markdown(f"""
|
|
|
56 |
for a single query. That is $0.10-0.20 per query, so please be careful!
|
57 |
Porting it to Llama.cpp soon for saved cost.
|
58 |
|
59 |
+
1. Upload your Resumes (Try a few resumes/cv to try < 5)
|
60 |
+
2. Provide Designation for which you are hiring
|
|
|
61 |
""")
|
62 |
|
63 |
+
position = gr.Text(label='Position/Designation for which you are hiring for', value="")
|
|
|
|
|
|
|
64 |
|
65 |
with gr.Tab('File Upload'):
|
66 |
+
uploaded_files = gr.File(label="Resume Upload - ONLY PDF. (Doc File Support Coming Soon)", file_count="multiple", show_progress=True)
|
|
|
67 |
|
68 |
+
uploaded_files.change(fn=run, inputs=[uploaded_files], outputs=[uploaded_files])
|
|
|
69 |
ask = gr.Button("Find Top Candidate")
|
70 |
+
answer = gr.Markdown(label="Result", elem_classes='answerText')
|
71 |
+
loading_indicator = gr.Markdown(label="Loading", visible=False)
|
72 |
+
|
73 |
+
def on_ask_click():
|
74 |
+
loading_indicator.update(visible=True, value="Processing...")
|
75 |
+
|
76 |
+
ask.click(fn=on_ask_click, inputs=[], outputs=[loading_indicator]).then(
|
77 |
+
fn=createAnswer, inputs=[uploaded_files, position], outputs=[answer]
|
78 |
+
).then(
|
79 |
+
fn=lambda: gr.Markdown.update(visible=False), inputs=[], outputs=[loading_indicator]
|
80 |
+
)
|
81 |
|
82 |
demo.queue(concurrency_count=20)
|
83 |
demo.launch(show_error=True)
|