Spaces:
Runtime error
Runtime error
updated app
Browse files
app.py
CHANGED
@@ -12,6 +12,34 @@ def format_prompt(message, history):
|
|
12 |
return prompt
|
13 |
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
def inference(message, history, model="mistralai/Mixtral-8x7B-Instruct-v0.1", Temperature=0.3, tokens=512,top_p=0.95, r_p=0.93):
|
16 |
|
17 |
Temperature = float(Temperature)
|
@@ -35,21 +63,22 @@ def inference(message, history, model="mistralai/Mixtral-8x7B-Instruct-v0.1", Te
|
|
35 |
yield partial_message
|
36 |
|
37 |
|
38 |
-
chatbot = gr.Chatbot(avatar_images=["/
|
39 |
bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
|
40 |
|
41 |
|
42 |
UI= gr.ChatInterface(
|
43 |
inference,
|
44 |
chatbot=chatbot,
|
45 |
-
description=
|
46 |
-
title=
|
47 |
additional_inputs_accordion=gr.Accordion(label="Additional Configuration to get better response",open=False),
|
48 |
-
retry_btn=
|
49 |
-
undo_btn=
|
50 |
clear_btn="Clear",
|
51 |
theme="soft",
|
52 |
submit_btn="Send",
|
|
|
53 |
additional_inputs=[
|
54 |
gr.Dropdown(value="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
55 |
choices =["mistralai/Mixtral-8x7B-Instruct-v0.1","HuggingFaceH4/zephyr-7b-beta",
|
@@ -61,5 +90,6 @@ UI= gr.ChatInterface(
|
|
61 |
gr.Slider(value=0.93, maximum=1.0,label="Repetition Penalty"),
|
62 |
],
|
63 |
examples=[["Hello"], ["can i know about generative ai ?"], ["how can i deploy a LLM in hugguingface inference endpoint ?"]],
|
|
|
64 |
)
|
65 |
-
UI.queue().launch(debug=True)
|
|
|
12 |
return prompt
|
13 |
|
14 |
|
15 |
+
description="""The Rapid TGI (Text Generation Inference) has developed by learning purpose
|
16 |
+
<h3>Source Code:</h3>
|
17 |
+
<ul><li><a id='link' href='https://www.facebook.com'>Github Repository</a></li>
|
18 |
+
<li><a id='link' href=''>Github Repository</a></li></ul>"""
|
19 |
+
|
20 |
+
title="<span id='logo'></span>"" Rapid TGI"
|
21 |
+
|
22 |
+
css="""
|
23 |
+
.gradio-container {
|
24 |
+
background: rgb(131,58,180);
|
25 |
+
background: linear-gradient(90deg, rgba(131,58,180,1) 0%, rgba(253,29,29,1) 50%, rgba(252,176,69,1) 100%);
|
26 |
+
|
27 |
+
#logo {
|
28 |
+
content: url('https://i.ibb.co/6vz9WjL/chat-bot.png');
|
29 |
+
width: 42px;
|
30 |
+
height: 42px;
|
31 |
+
margin-right: 10px;
|
32 |
+
margin-top: 3px;
|
33 |
+
display:inline-block;
|
34 |
+
};
|
35 |
+
|
36 |
+
#link {
|
37 |
+
color: #fff;
|
38 |
+
background-color: transparent;
|
39 |
+
};
|
40 |
+
}
|
41 |
+
"""
|
42 |
+
|
43 |
def inference(message, history, model="mistralai/Mixtral-8x7B-Instruct-v0.1", Temperature=0.3, tokens=512,top_p=0.95, r_p=0.93):
|
44 |
|
45 |
Temperature = float(Temperature)
|
|
|
63 |
yield partial_message
|
64 |
|
65 |
|
66 |
+
chatbot = gr.Chatbot(avatar_images=["https://i.ibb.co/kGd6XrM/user.png", "https://i.ibb.co/6vz9WjL/chat-bot.png"],
|
67 |
bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
|
68 |
|
69 |
|
70 |
UI= gr.ChatInterface(
|
71 |
inference,
|
72 |
chatbot=chatbot,
|
73 |
+
description=description,
|
74 |
+
title=title,
|
75 |
additional_inputs_accordion=gr.Accordion(label="Additional Configuration to get better response",open=False),
|
76 |
+
retry_btn="Retry Again",
|
77 |
+
undo_btn="Undo",
|
78 |
clear_btn="Clear",
|
79 |
theme="soft",
|
80 |
submit_btn="Send",
|
81 |
+
css=css,
|
82 |
additional_inputs=[
|
83 |
gr.Dropdown(value="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
84 |
choices =["mistralai/Mixtral-8x7B-Instruct-v0.1","HuggingFaceH4/zephyr-7b-beta",
|
|
|
90 |
gr.Slider(value=0.93, maximum=1.0,label="Repetition Penalty"),
|
91 |
],
|
92 |
examples=[["Hello"], ["can i know about generative ai ?"], ["how can i deploy a LLM in hugguingface inference endpoint ?"]],
|
93 |
+
|
94 |
)
|
95 |
+
UI.queue().launch(debug=True, show_api= False, share=True,max_threads=50)
|