Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,48 @@
|
|
1 |
-
|
2 |
-
|
3 |
import gradio as gr
|
4 |
-
from huggingface_hub import InferenceClient
|
5 |
import faiss
|
6 |
-
import
|
|
|
|
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
model = SentenceTransformer('
|
|
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def retrieve_documents(query, k=5):
|
20 |
query_embedding = model.encode([query])
|
@@ -22,7 +50,6 @@ def retrieve_documents(query, k=5):
|
|
22 |
return [documents[i] for i in indices[0]]
|
23 |
|
24 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
25 |
-
# Retrieve relevant documents
|
26 |
relevant_docs = retrieve_documents(message)
|
27 |
context = "\n\n".join(relevant_docs)
|
28 |
|
@@ -53,13 +80,15 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
53 |
demo = gr.ChatInterface(
|
54 |
respond,
|
55 |
additional_inputs=[
|
56 |
-
gr.Textbox(
|
|
|
|
|
|
|
57 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
58 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.
|
59 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
60 |
],
|
61 |
)
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
65 |
-
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
import gradio as gr
|
|
|
4 |
import faiss
|
5 |
+
import fitz # PyMuPDF
|
6 |
+
import numpy as np
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
from sentence_transformers import SentenceTransformer
|
9 |
|
10 |
+
# Extract text from PDF
|
11 |
+
def extract_text_from_pdf(pdf_path):
|
12 |
+
doc = fitz.open(pdf_path)
|
13 |
+
text = ""
|
14 |
+
for page_num in range(doc.page_count):
|
15 |
+
page = doc.load_page(page_num)
|
16 |
+
text += page.get_text()
|
17 |
+
return text.split("\n\n")
|
18 |
|
19 |
+
# Build FAISS index
|
20 |
+
def build_faiss_index(documents):
|
21 |
+
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
22 |
+
document_embeddings = model.encode(documents)
|
23 |
|
24 |
+
index = faiss.IndexFlatL2(document_embeddings.shape[1])
|
25 |
+
index.add(document_embeddings)
|
26 |
+
|
27 |
+
faiss.write_index(index, "apexcustoms_index.faiss")
|
28 |
+
model.save("sentence_transformer_model")
|
29 |
+
|
30 |
+
return index, model
|
31 |
+
|
32 |
+
# Ensure that text extraction and FAISS index building is done
|
33 |
+
if not os.path.exists("apexcustoms_index.faiss") or not os.path.exists("sentence_transformer_model"):
|
34 |
+
documents = extract_text_from_pdf("apexcustoms.pdf")
|
35 |
+
with open("apexcustoms.json", "w") as f:
|
36 |
+
json.dump(documents, f)
|
37 |
+
index, model = build_faiss_index(documents)
|
38 |
+
else:
|
39 |
+
index = faiss.read_index("apexcustoms_index.faiss")
|
40 |
+
model = SentenceTransformer('sentence_transformer_model')
|
41 |
+
with open("apexcustoms.json", "r") as f:
|
42 |
+
documents = json.load(f)
|
43 |
+
|
44 |
+
# Hugging Face client
|
45 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
46 |
|
47 |
def retrieve_documents(query, k=5):
|
48 |
query_embedding = model.encode([query])
|
|
|
50 |
return [documents[i] for i in indices[0]]
|
51 |
|
52 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
|
53 |
relevant_docs = retrieve_documents(message)
|
54 |
context = "\n\n".join(relevant_docs)
|
55 |
|
|
|
80 |
demo = gr.ChatInterface(
|
81 |
respond,
|
82 |
additional_inputs=[
|
83 |
+
gr.Textbox(
|
84 |
+
value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. \n\nUser: ",
|
85 |
+
label="System message"
|
86 |
+
),
|
87 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
88 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
89 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
90 |
],
|
91 |
)
|
92 |
|
93 |
if __name__ == "__main__":
|
94 |
demo.launch()
|
|