Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,33 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
|
4 |
-
|
5 |
-
from
|
6 |
-
|
7 |
-
# Load the vector store from the saved index files
|
8 |
-
vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_deserialization=True)
|
9 |
|
10 |
-
# Load the model using InferenceClient
|
11 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
12 |
|
13 |
-
#
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
history: list[tuple[str, str]],
|
22 |
-
system_message,
|
23 |
-
max_tokens,
|
24 |
-
temperature,
|
25 |
-
top_p,
|
26 |
-
):
|
27 |
-
messages = [{"role": "system", "content": system_message}]
|
28 |
|
29 |
for val in history:
|
30 |
if val[0]:
|
@@ -34,30 +37,28 @@ def respond(
|
|
34 |
|
35 |
messages.append({"role": "user", "content": message})
|
36 |
|
37 |
-
|
38 |
-
response = result["result"]
|
39 |
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
"""
|
44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
-
"""
|
46 |
demo = gr.ChatInterface(
|
47 |
respond,
|
48 |
additional_inputs=[
|
49 |
gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. \n\nUser: ", label="System message"),
|
50 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
-
gr.Slider(minimum=0.1, maximum
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
],
|
60 |
)
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
-
demo.launch()
|
|
|
1 |
+
# chatbot.py
|
2 |
+
|
3 |
import gradio as gr
|
4 |
from huggingface_hub import InferenceClient
|
5 |
+
import faiss
|
6 |
+
import json
|
7 |
+
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
8 |
|
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
+
# Load the FAISS index and the sentence transformer model
|
12 |
+
index = faiss.read_index("apexcustoms_index.faiss")
|
13 |
+
model = SentenceTransformer('sentence_transformer_model')
|
14 |
+
|
15 |
+
# Load the extracted text
|
16 |
+
with open("apexcustoms.json", "r") as f:
|
17 |
+
documents = json.load(f)
|
18 |
+
|
19 |
+
def retrieve_documents(query, k=5):
|
20 |
+
query_embedding = model.encode([query])
|
21 |
+
distances, indices = index.search(query_embedding, k)
|
22 |
+
return [documents[i] for i in indices[0]]
|
23 |
|
24 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
25 |
+
# Retrieve relevant documents
|
26 |
+
relevant_docs = retrieve_documents(message)
|
27 |
+
context = "\n\n".join(relevant_docs)
|
28 |
|
29 |
+
messages = [{"role": "system", "content": system_message},
|
30 |
+
{"role": "user", "content": f"Context: {context}\n\n{message}"}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
for val in history:
|
33 |
if val[0]:
|
|
|
37 |
|
38 |
messages.append({"role": "user", "content": message})
|
39 |
|
40 |
+
response = ""
|
|
|
41 |
|
42 |
+
for message in client.chat_completion(
|
43 |
+
messages,
|
44 |
+
max_tokens=max_tokens,
|
45 |
+
stream=True,
|
46 |
+
temperature=temperature,
|
47 |
+
top_p=top_p,
|
48 |
+
):
|
49 |
+
token = message.choices[0].delta.content
|
50 |
+
response += token
|
51 |
+
yield response
|
52 |
|
|
|
|
|
|
|
53 |
demo = gr.ChatInterface(
|
54 |
respond,
|
55 |
additional_inputs=[
|
56 |
gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. \n\nUser: ", label="System message"),
|
57 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
58 |
+
gr.Slider(minimum=0.1, maximum 4.0, value=0.7, step=0.1, label="Temperature"),
|
59 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
],
|
61 |
)
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
+
demo.launch()
|