Update app.py
Browse files
app.py
CHANGED
@@ -9,10 +9,15 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
def respond(
|
15 |
-
|
16 |
history: list[tuple[str, str]],
|
17 |
system_message,
|
18 |
max_tokens,
|
@@ -27,7 +32,7 @@ def respond(
|
|
27 |
if val[1]:
|
28 |
messages.append({"role": "assistant", "content": val[1]})
|
29 |
|
30 |
-
messages.append({"role": "user", "content":
|
31 |
|
32 |
response = ""
|
33 |
|
@@ -49,7 +54,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
|
|
49 |
tableau_de_mots=mot_cle("mots_clés.txt")
|
50 |
mots_a_verifier = tableau_de_mots
|
51 |
docs_text, docs_embeddings = load_data()
|
52 |
-
question=
|
53 |
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
54 |
print(prompt)
|
55 |
|
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
|
12 |
+
tableau_de_mots=mot_cle("mots_clés.txt")
|
13 |
+
mots_a_verifier = tableau_de_mots
|
14 |
+
docs_text, docs_embeddings = load_data()
|
15 |
+
question=str(message)
|
16 |
+
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
17 |
+
print(prompt)
|
18 |
|
19 |
def respond(
|
20 |
+
prompt,
|
21 |
history: list[tuple[str, str]],
|
22 |
system_message,
|
23 |
max_tokens,
|
|
|
32 |
if val[1]:
|
33 |
messages.append({"role": "assistant", "content": val[1]})
|
34 |
|
35 |
+
messages.append({"role": "user", "content": prompt})
|
36 |
|
37 |
response = ""
|
38 |
|
|
|
54 |
tableau_de_mots=mot_cle("mots_clés.txt")
|
55 |
mots_a_verifier = tableau_de_mots
|
56 |
docs_text, docs_embeddings = load_data()
|
57 |
+
question=stt(message)
|
58 |
prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
|
59 |
print(prompt)
|
60 |
|