File size: 3,498 Bytes
fdc0d68
 
 
92ae9aa
8e6e3c5
752f8a9
 
fdc0d68
4b41cfa
fdc0d68
4b41cfa
 
 
 
 
6d9b2de
578f8e2
85bfa6d
a73a5b0
9846d5c
92ae9aa
 
 
 
 
 
 
 
 
578f8e2
92ae9aa
 
 
 
6d9b2de
 
 
 
92ae9aa
03c9778
6d9b2de
92ae9aa
6d9b2de
 
 
 
 
 
 
 
92ae9aa
6d9b2de
92ae9aa
 
 
 
 
 
85a522e
1cba1c6
85a522e
1cba1c6
 
 
 
 
ecd1a34
83cecf4
ecd1a34
 
 
 
a918942
1648d0f
1cba1c6
ecd1a34
e836885
745b176
92ae9aa
fdc0d68
 
a918942
 
 
 
 
 
 
6d726e3
c338f45
92ae9aa
 
c338f45
 
 
92ae9aa
a918942
fdc0d68
1648d0f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
from langchain.chains.summarize.chain import load_summarize_chain
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.retrievers import ContextualCompressionRetriever

#from Api_Key import google_plam
from langchain_groq import ChatGroq
import os
from dotenv import load_dotenv
load_dotenv()


def prompt_template_to_analyze_resume():
    template = """
    You are provided with the Resume of the Candidate in the context provided below . 
    As an Talent Aquistion bot, your task is to answer about the questions asked about candidate from the context .
    
    \n\n:{context}
    """
    prompt = ChatPromptTemplate.from_messages(
        [
            ('system',template),
            ('human','input'),
        ]
        )
    return prompt
    
def prompt_template_for_relaibility():
    template ="""
    You are provided with the Resume of the Candidate in the context below
    If asked about reliability , check How frequently the candidate has switched from one company to another. 
    Grade him on the given basis: 
        If less than 2 Year - very less Reliable  
        if more than 2 years but less than 5 years - Reliable 
        if more than 5 Years - Highly Reliable
    and generate verdict . 
    
    \n\n:{context}
    
    """
    prompt = ChatPromptTemplate.from_messages(
        [
            ('system',template),
            ('human','input'),
        ]
        )
    return prompt
    

def summarize(documents,llm):
    summarize_chain = load_summarize_chain(llm=llm, chain_type='refine', verbose = True)
    results = summarize_chain.invoke({'input_documents':documents})
    return results['output_text']


def get_hugging_face_model(model_id='mistralai/Mistral-7B-Instruct-v0.2',temperature=0.01,max_tokens=4096,api_key=None):
    llm = HuggingFaceHub(
        huggingfacehub_api_token =api_key,
        repo_id=model_id, 
        model_kwargs={"temperature":temperature, "max_new_tokens":max_tokens}
        )
    return llm

def get_groq_model(api_key):
    os.environ["GROQ_API_KEY"] = api_key
    llm = ChatGroq(model="llama3-8b-8192") # (model="gemma2-9b-it")
    return llm
    

def Q_A(vectorstore,question,API_KEY,compressor=False):
    
    if API_KEY.startswith('gsk'):
       chat_llm = get_groq_model(api_key=API_KEY)
    elif API_KEY.startswith('hf'):
        chat_llm = get_hugging_face_model(api_key=API_KEY)
    
    # Create a retriever
    retriever = vectorstore.as_retriever(search_type = 'similarity',search_kwargs = {'k':2},)
    
    if compressor:
        #Create a contextual compressor
        compressor = LLMChainExtractor.from_llm(chat_llm)
        compression_retriever = ContextualCompressionRetriever(base_compressor=compressor,base_retriever=retriever)
        retriever = compression_retriever
        
    if  'reliable' in question.lower() or 'relaibility' in question.lower():
        prompt = prompt_template_for_relaibility()
        
    else:
        prompt = prompt_template_to_analyze_resume()
        
    question_answer_chain = create_stuff_documents_chain(chat_llm, prompt)
    
    chain = create_retrieval_chain(retriever, question_answer_chain)
    result = chain.invoke({'input':question})
    return result['answer']