File size: 3,785 Bytes
0394b1d f9d1bd8 6dacbc2 0394b1d 6dacbc2 0394b1d 6dacbc2 0cf95f2 b21cd71 ceaa0db 0cf95f2 b21cd71 f9d1bd8 b21cd71 52548bf b21cd71 f9d1bd8 b21cd71 6dacbc2 b21cd71 86a73bb 9e9de3d f9d1bd8 6fea818 c737e57 0cf95f2 6dacbc2 bc17c1b 6dacbc2 bc17c1b 6dacbc2 bc17c1b f9d1bd8 6dacbc2 0cf95f2 6fea818 c737e57 0cf95f2 f83a451 6dacbc2 bc17c1b 6dacbc2 504bd27 0cf95f2 504bd27 6dacbc2 bc17c1b 6dacbc2 bc17c1b 6dacbc2 bc17c1b 6dacbc2 bc17c1b f9d1bd8 6dacbc2 f9d1bd8 c737e57 f9d1bd8 bc17c1b f9d1bd8 bc17c1b f9d1bd8 b21cd71 f9d1bd8 b21cd71 f9d1bd8 c98d37d 0cf95f2 27307fd 05737a3 a5696cc c2f639d f83a451 295f85c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import gradio as gr
import requests
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import FAISS
from langchain_community.chat_models.huggingface import ChatHuggingFace
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from langchain_community.llms import HuggingFaceEndpoint
model_name = "sentence-transformers/all-mpnet-base-v2"
embedding_llm = SentenceTransformerEmbeddings(model_name=model_name)
db = FAISS.load_local("faiss_index", embedding_llm, allow_dangerous_deserialization=True)
# Set up Hugging Face model
llm = HuggingFaceEndpoint(
repo_id="HuggingFaceH4/starchat2-15b-v0.1",
task="text-generation",
max_new_tokens=4096,
temperature=0.6,
top_p=0.9,
top_k=40,
repetition_penalty=1.2,
do_sample=True,
)
chat_model = ChatHuggingFace(llm=llm)
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hi AI, how are you today?"),
AIMessage(content="I'm great thank you. How can I help you?")
]
def handle_message(message: str, mode: str):
result_text, result_image = "", None
if not message.strip():
return "Enter a valid message.", None
if mode == "Chat-Message":
result_text = chat_message(message)
elif mode == "Web-Search":
result_text = web_search(message)
elif mode == "Chart-Generator":
result_text, result_image = chart_generator(message)
else:
result_text = "Select a valid mode."
return result_text, result_image
def chat_message(message: str):
global messages
prompt = HumanMessage(content=message)
messages.append(prompt)
response = chat_model.invoke(messages)
messages.append(response.content)
if len(messages) >= 6:
messages = messages[-6:]
return f"IT-Assistant: {response.content}"
def web_search(message: str):
global messages
similar_docs = db.similarity_search(message, k=3)
if similar_docs:
source_knowledge = "\n".join([x.page_content for x in similar_docs])
else:
source_knowledge = ""
augmented_prompt = f"""
If the answer to the next query is not contained in the Search, say 'No Answer Is Available' and then just give guidance for the query.
Query: {message}
Search:
{source_knowledge}
"""
prompt = HumanMessage(content=augmented_prompt)
messages.append(prompt)
response = chat_model.invoke(messages)
messages.append(response.content)
if len(messages) >= 6:
messages = messages[-6:]
return f"IT-Assistant: {response.content}"
def chart_generator(message: str):
global messages
chart_url = f"https://quickchart.io/natural/{message}"
response = requests.get(chart_url)
if response.status_code == 200:
message_with_description = f"Describe and analyse the content of this chart: {message}"
prompt = HumanMessage(content=message_with_description)
messages.append(prompt)
response = chat_model.invoke(messages)
messages.append(response.content)
if len(messages) >= 6:
messages = messages[-6:]
return f"IT-Assistant: {response.content}", chart_url
else:
return f"Can't generate this image. Please provide valid chart details.", None
demo = gr.Interface(
fn=handle_message,
inputs=["text", gr.Radio(["Chat-Message", "Web-Search", "Chart-Generator"], label="mode", info="Choose a mode and enter your message, then click submit to interact.")],
outputs=[gr.Textbox(label="Response"), gr.Image(label="Chart", type="filepath")],
theme=gr.themes.Soft(),
title="IT Assistant")
demo.launch(show_api=False) |