Spaces:
Sleeping
Sleeping
Update QnA.py
Browse files
QnA.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
2 |
from langchain_core.prompts import ChatPromptTemplate
|
3 |
from langchain.chains import create_retrieval_chain
|
|
|
4 |
|
5 |
#from Api_Key import google_plam
|
6 |
from langchain_groq import ChatGroq
|
@@ -11,14 +12,31 @@ load_dotenv()
|
|
11 |
|
12 |
def prompt_template_to_analyze_resume():
|
13 |
template = """
|
14 |
-
You are provided with the Resume of the Candidate in the context below . As an Talent Aquistion bot , your task is to provide insights about the
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
Grade him on the given basis:
|
17 |
If less than 2 Year - very less Reliable
|
18 |
if more than 2 years but less than 5 years - Reliable
|
19 |
if more than 5 Years - Highly Reliable
|
|
|
20 |
|
21 |
\n\n:{context}
|
|
|
22 |
"""
|
23 |
prompt = ChatPromptTemplate.from_messages(
|
24 |
[
|
@@ -27,13 +45,26 @@ def prompt_template_to_analyze_resume():
|
|
27 |
]
|
28 |
)
|
29 |
return prompt
|
|
|
30 |
|
31 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
os.environ["GROQ_API_KEY"] = API_KEY
|
33 |
llm_groq = ChatGroq(model="llama3-8b-8192")
|
|
|
34 |
# Create a retriever
|
35 |
retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
37 |
chain = create_retrieval_chain(retriever, question_answer_chain)
|
38 |
result = chain.invoke({'input':question})
|
39 |
return result['answer']
|
|
|
1 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
2 |
from langchain_core.prompts import ChatPromptTemplate
|
3 |
from langchain.chains import create_retrieval_chain
|
4 |
+
from langchain.chains.summarize.chain import load_summarize_chain
|
5 |
|
6 |
#from Api_Key import google_plam
|
7 |
from langchain_groq import ChatGroq
|
|
|
12 |
|
13 |
def prompt_template_to_analyze_resume():
|
14 |
template = """
|
15 |
+
You are provided with the Resume of the Candidate in the context below . As an Talent Aquistion bot , your task is to provide insights about the
|
16 |
+
candidate in point wise. Mention his strength and wekaness in general.Do not make up answers.
|
17 |
+
|
18 |
+
\n\n:{context}
|
19 |
+
"""
|
20 |
+
prompt = ChatPromptTemplate.from_messages(
|
21 |
+
[
|
22 |
+
('system',template),
|
23 |
+
('human','input'),
|
24 |
+
]
|
25 |
+
)
|
26 |
+
return prompt
|
27 |
+
|
28 |
+
def prompt_template_for_relaibility():
|
29 |
+
template ="""
|
30 |
+
You are provided with the Resume of the Candidate in the context below
|
31 |
+
If asked about reliability , check How frequently the candidate has switched from one company to another.
|
32 |
Grade him on the given basis:
|
33 |
If less than 2 Year - very less Reliable
|
34 |
if more than 2 years but less than 5 years - Reliable
|
35 |
if more than 5 Years - Highly Reliable
|
36 |
+
and generate verdict .
|
37 |
|
38 |
\n\n:{context}
|
39 |
+
|
40 |
"""
|
41 |
prompt = ChatPromptTemplate.from_messages(
|
42 |
[
|
|
|
45 |
]
|
46 |
)
|
47 |
return prompt
|
48 |
+
|
49 |
|
50 |
+
def summarize(documents,llm):
|
51 |
+
summarize_chain = load_summarize_chain(llm=llm, chain_type='refine', verbose = True)
|
52 |
+
results = summarize_chain.invoke({'input_documents':documents})
|
53 |
+
return results['output_text']
|
54 |
+
|
55 |
+
|
56 |
+
def Q_A(vectorstore,documents,question,API_KEY):
|
57 |
os.environ["GROQ_API_KEY"] = API_KEY
|
58 |
llm_groq = ChatGroq(model="llama3-8b-8192")
|
59 |
+
|
60 |
# Create a retriever
|
61 |
retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
|
62 |
+
if 'relaible' in question.lower() or 'relaibility' in question.lower():
|
63 |
+
question_answer_chain = create_stuff_documents_chain(llm_groq, prompt_template_for_relaibility())
|
64 |
+
|
65 |
+
else:
|
66 |
+
question_answer_chain = create_stuff_documents_chain(llm_groq, prompt_template_to_analyze_resume())
|
67 |
+
|
68 |
chain = create_retrieval_chain(retriever, question_answer_chain)
|
69 |
result = chain.invoke({'input':question})
|
70 |
return result['answer']
|