File size: 1,943 Bytes
c7e94cf
6827c6b
 
55a5dbd
 
 
 
 
3e98f5c
55a5dbd
 
a0ffd43
42db5c5
6827c6b
 
 
 
42db5c5
 
 
6827c6b
 
 
 
 
 
 
 
42db5c5
6827c6b
 
 
 
 
55a5dbd
e550816
55a5dbd
9af4b16
55a5dbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6827c6b
55a5dbd
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
from logger import log_response
from custom_agent import CustomHfAgent 

   
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain, ConversationChain
from langchain.llms import HuggingFaceHub


image = []
def handle_submission(user_message, selected_tools, url_endpoint, document, image, context):
    
    log_response("User input \n {}".format(user_message))
    log_response("selected_tools \n {}".format(selected_tools))
    log_response("url_endpoint \n {}".format(url_endpoint))
    log_response("document \n {}".format(document))
    log_response("image \n {}".format(image))
    log_response("context \n {}".format(context))
    
    agent = CustomHfAgent(
        url_endpoint=url_endpoint,
        token=os.environ['HF_token'],
        additional_tools=selected_tools,
        input_params={"max_new_tokens": 192},
    )

    response = agent.chat(user_message,document=document,image=image, context = context)

    log_response("Agent Response\n {}".format(response))

    return response

def handle_submission_chat(user_message, response):
    #os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.environ['HF_token']
    agent_chat_bot =get_conversation_chain()
    text = agent_chat_bot.predict(input=user_message+response)
    print(text)
    return text
 
def get_conversation_chain( ):
    """
    Create a conversational retrieval chain and a language model.

    """
    

    llm = HuggingFaceHub(
        repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
        model_kwargs={"temperature": 0.5, "max_length": 1048},
    )
    # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")

    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
    conversation_chain = ConversationChain(
        llm=llm, verbose=True, memory=ConversationBufferMemory()
    )
    return conversation_chain