File size: 3,982 Bytes
0394b1d
f9d1bd8
 
6dacbc2
 
 
 
 
0394b1d
6dacbc2
 
0394b1d
6dacbc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cf95f2
ceaa0db
6f0d563
0cf95f2
ceaa0db
f9d1bd8
 
52548bf
0cf95f2
f9d1bd8
 
6dacbc2
ceaa0db
6dacbc2
f9d1bd8
6fea818
0cf95f2
6dacbc2
 
 
 
 
f9d1bd8
6dacbc2
0cf95f2
6fea818
0cf95f2
6dacbc2
 
 
 
 
7207714
6dacbc2
0cf95f2
6dacbc2
 
 
 
 
 
 
 
 
 
f9d1bd8
6dacbc2
f9d1bd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c98d37d
0cf95f2
f9d1bd8
c98d37d
c2f639d
0394b1d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
import requests
from IPython.display import Image, display
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import FAISS
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from langchain_community.llms import HuggingFaceEndpoint

model_name = "sentence-transformers/all-mpnet-base-v2"
embedding_llm = SentenceTransformerEmbeddings(model_name=model_name)

db = FAISS.load_local("faiss_index", embedding_llm, allow_dangerous_deserialization=True)

# Set up Hugging Face model
llm = HuggingFaceEndpoint(
    repo_id="HuggingFaceH4/starchat2-15b-v0.1",
    task="text-generation",
    max_new_tokens=4096,
    temperature=0.6,
    top_p=0.9,
    top_k=40,
    repetition_penalty=1.2,
    do_sample=True,
)
chat_model = ChatHuggingFace(llm=llm)

messages = [
    SystemMessage(content="You are a helpful assistant."),
    HumanMessage(content="Hi AI, how are you today?"),
    AIMessage(content="I'm great thank you. How can I help you?")
]

def handle_message(message: str, mode: str):
    
    # Check if message is empty
    if not message.strip():
        return "Enter a valid message."
    if mode == "Chat-Message":
        return chat_message(message)
    elif mode == "Web-Search":
        return web_search(message)
    elif mode == "Chart-Generator":
        return chart_generator(message)
    else:
        return "Select a valid mode."

def chat_message(message: str):
    global messages
    prompt = HumanMessage(content=message)
    messages.append(prompt)
    response = chat_model.invoke(messages)
    messages.append(response.content)
    if len(messages) >= 6:
        messages = messages[-6:]
    return f"IT-Assistant: {response.content}"

def web_search(message: str):
    global messages
    similar_docs = db.similarity_search(message, k=3)
    if similar_docs:
        source_knowledge = "\n".join([x.page_content for x in similar_docs])
    else:
        source_knowledge = ""
    augmented_prompt = f"""
    If the answer to the next query is not contained in the Web Search, say 'No Answer Is Available' and then just give guidance for the query.

    Query: {message}

    Web Search:
    {source_knowledge}
    """
    prompt = HumanMessage(content=augmented_prompt)
    messages.append(prompt)
    response = chat_model.invoke(messages)
    messages.append(response.content)
    if len(messages) >= 6:
        messages = messages[-6:]
    return f"IT-Assistant: {response.content}"

def chart_generator(message: str):
    # Construct the full chart URL
    chart_url = f"https://quickchart.io/natural/{message}"

    # Send a GET request to the chart URL
    response = requests.get(chart_url)

    # Check if the request was successful (status code 200)
    if response.status_code == 200:
        # Add a description to the message
        message_with_description = f"Describe and analyse the content of this chart: {message}"

        # Create a HumanMessage object with the message including the description
        prompt = HumanMessage(content=message_with_description)
        messages.append(prompt)

        # Invoke the chat model with the updated messages
        response = chat_model.invoke(messages)
        messages.append(response.content)
        if len(messages) >= 6:
            messages = messages[-6:]

        # Display the chart image with a smaller size
        display(Image(url=chart_url, width=500, height=300))

        # Return the message and response
        return f"IT-Assistant: {response.content}"
    else:
        return f"Can't generate this image. Please provide valid chart details."
        
demo = gr.Interface(
    fn=handle_message,
    inputs=["text", gr.Radio(["Chat", "Web-Search", "Chart-Generator"], label="mode", info="Choose a mode and enter your message, then click submit to interact.")],
    outputs="text",
    title="IT Assistant")
demo.launch()