Update app.py
Browse files
app.py
CHANGED
@@ -9,15 +9,8 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
|
12 |
-
tableau_de_mots=mot_cle("mots_clés.txt")
|
13 |
-
mots_a_verifier = tableau_de_mots
|
14 |
-
docs_text, docs_embeddings = load_data()
|
15 |
-
question="give me the advantage of djezzy legend for 2500 DA ?"
|
16 |
-
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
17 |
-
print(prompt)
|
18 |
-
|
19 |
def respond(
|
20 |
-
|
21 |
history: list[tuple[str, str]],
|
22 |
system_message,
|
23 |
max_tokens,
|
@@ -32,7 +25,7 @@ def respond(
|
|
32 |
if val[1]:
|
33 |
messages.append({"role": "assistant", "content": val[1]})
|
34 |
|
35 |
-
messages.append({"role": "user", "content":
|
36 |
|
37 |
response = ""
|
38 |
|
@@ -54,7 +47,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
|
|
54 |
tableau_de_mots=mot_cle("mots_clés.txt")
|
55 |
mots_a_verifier = tableau_de_mots
|
56 |
docs_text, docs_embeddings = load_data()
|
57 |
-
question=
|
58 |
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
59 |
print(prompt)
|
60 |
|
@@ -71,6 +64,7 @@ demo = gr.ChatInterface(
|
|
71 |
step=0.05,
|
72 |
label="Top-p (nucleus sampling)",
|
73 |
),
|
|
|
74 |
],
|
75 |
)
|
76 |
|
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
def respond(
|
13 |
+
message,
|
14 |
history: list[tuple[str, str]],
|
15 |
system_message,
|
16 |
max_tokens,
|
|
|
25 |
if val[1]:
|
26 |
messages.append({"role": "assistant", "content": val[1]})
|
27 |
|
28 |
+
messages.append({"role": "user", "content": message)
|
29 |
|
30 |
response = ""
|
31 |
|
|
|
47 |
tableau_de_mots=mot_cle("mots_clés.txt")
|
48 |
mots_a_verifier = tableau_de_mots
|
49 |
docs_text, docs_embeddings = load_data()
|
50 |
+
question=message
|
51 |
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
52 |
print(prompt)
|
53 |
|
|
|
64 |
step=0.05,
|
65 |
label="Top-p (nucleus sampling)",
|
66 |
),
|
67 |
+
gr.Textbox(value=prompt, label="Prompt"),
|
68 |
],
|
69 |
)
|
70 |
|