File size: 2,457 Bytes
f15301b 025c907 7a36a59 1a63093 f15301b 8f24ce9 f15301b ba0af9a f15301b 484ec3d f15301b d363607 f15301b d363607 f15301b 45ce6f2 edd5d4e 7d85d47 f15301b ba0af9a 6887d64 3658d2d 6887d64 f15301b 6887d64 d304264 f15301b a6a346a 9ba883b f15301b 7c859bf f15301b 6dd57f6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
from huggingface_hub import InferenceClient
from djezzy import load_data,mot_cle,pip,vector_db
"""
For more djdj information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
tableau_de_mots=mot_cle("mots_clés.txt")
mots_a_verifier = tableau_de_mots
docs_text, docs_embeddings = load_data()
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
prompt=pip(message,docs_text, docs_embeddings,mots_a_verifier,vector_db)
messages.append({"role": "user", "content": prompt})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
repo=respond
print(repo)
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
#question={"role": "user", "content": message}
#prompt=pip(question,docs_text, docs_embeddings,mots_a_verifier,vector_db)
#print(prompt)
custom_css = """
.gradio-container {
background-color: #EEE7DA;
}
.gradio-title {
color: #EF4040;
}
"""
demo = gr.ChatInterface(
respond,
title="Djezzy Chatbot",
css=custom_css,
textbox=gr.Textbox(placeholder="What would you like to know about Dezzy?", container=False, scale=7),
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.",placeholder="What would you like to know about Djezzy "),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch(share=True) |