Spaces:
Runtime error
Runtime error
Commit
•
7a08028
0
Parent(s):
Duplicate from olivierdehaene/chat-llm-streaming
Browse filesCo-authored-by: Olivier Dehaene <[email protected]>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +319 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chat Llm Streaming
|
3 |
+
emoji: 📊
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.20.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: olivierdehaene/chat-llm-streaming
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
from text_generation import Client, InferenceAPIClient
|
6 |
+
|
7 |
+
openchat_preprompt = (
|
8 |
+
"\n<human>: Hi!\n<bot>: My name is Bot, model version is 0.15, part of an open-source kit for "
|
9 |
+
"fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source "
|
10 |
+
"community. I am not human, not evil and not alive, and thus have no thoughts and feelings, "
|
11 |
+
"but I am programmed to be helpful, polite, honest, and friendly.\n"
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
def get_client(model: str):
|
16 |
+
if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
|
17 |
+
return Client(os.getenv("OPENCHAT_API_URL"))
|
18 |
+
return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None))
|
19 |
+
|
20 |
+
|
21 |
+
def get_usernames(model: str):
|
22 |
+
"""
|
23 |
+
Returns:
|
24 |
+
(str, str, str, str): pre-prompt, username, bot name, separator
|
25 |
+
"""
|
26 |
+
if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
|
27 |
+
return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>"
|
28 |
+
if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
|
29 |
+
return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
|
30 |
+
return "", "User: ", "Assistant: ", "\n"
|
31 |
+
|
32 |
+
|
33 |
+
def predict(
|
34 |
+
model: str,
|
35 |
+
inputs: str,
|
36 |
+
typical_p: float,
|
37 |
+
top_p: float,
|
38 |
+
temperature: float,
|
39 |
+
top_k: int,
|
40 |
+
repetition_penalty: float,
|
41 |
+
watermark: bool,
|
42 |
+
chatbot,
|
43 |
+
history,
|
44 |
+
):
|
45 |
+
client = get_client(model)
|
46 |
+
preprompt, user_name, assistant_name, sep = get_usernames(model)
|
47 |
+
|
48 |
+
history.append(inputs)
|
49 |
+
|
50 |
+
past = []
|
51 |
+
for data in chatbot:
|
52 |
+
user_data, model_data = data
|
53 |
+
|
54 |
+
if not user_data.startswith(user_name):
|
55 |
+
user_data = user_name + user_data
|
56 |
+
if not model_data.startswith(sep + assistant_name):
|
57 |
+
model_data = sep + assistant_name + model_data
|
58 |
+
|
59 |
+
past.append(user_data + model_data.rstrip() + sep)
|
60 |
+
|
61 |
+
if not inputs.startswith(user_name):
|
62 |
+
inputs = user_name + inputs
|
63 |
+
|
64 |
+
total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()
|
65 |
+
|
66 |
+
partial_words = ""
|
67 |
+
|
68 |
+
if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
|
69 |
+
iterator = client.generate_stream(
|
70 |
+
total_inputs,
|
71 |
+
typical_p=typical_p,
|
72 |
+
truncate=1000,
|
73 |
+
watermark=watermark,
|
74 |
+
max_new_tokens=500,
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
iterator = client.generate_stream(
|
78 |
+
total_inputs,
|
79 |
+
top_p=top_p if top_p < 1.0 else None,
|
80 |
+
top_k=top_k,
|
81 |
+
truncate=1000,
|
82 |
+
repetition_penalty=repetition_penalty,
|
83 |
+
watermark=watermark,
|
84 |
+
temperature=temperature,
|
85 |
+
max_new_tokens=500,
|
86 |
+
stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
|
87 |
+
)
|
88 |
+
|
89 |
+
for i, response in enumerate(iterator):
|
90 |
+
if response.token.special:
|
91 |
+
continue
|
92 |
+
|
93 |
+
partial_words = partial_words + response.token.text
|
94 |
+
if partial_words.endswith(user_name.rstrip()):
|
95 |
+
partial_words = partial_words.rstrip(user_name.rstrip())
|
96 |
+
if partial_words.endswith(assistant_name.rstrip()):
|
97 |
+
partial_words = partial_words.rstrip(assistant_name.rstrip())
|
98 |
+
|
99 |
+
if i == 0:
|
100 |
+
history.append(" " + partial_words)
|
101 |
+
elif response.token.text not in user_name:
|
102 |
+
history[-1] = partial_words
|
103 |
+
|
104 |
+
chat = [
|
105 |
+
(history[i].strip(), history[i + 1].strip())
|
106 |
+
for i in range(0, len(history) - 1, 2)
|
107 |
+
]
|
108 |
+
yield chat, history
|
109 |
+
|
110 |
+
|
111 |
+
def reset_textbox():
|
112 |
+
return gr.update(value="")
|
113 |
+
|
114 |
+
|
115 |
+
def radio_on_change(
|
116 |
+
value: str,
|
117 |
+
disclaimer,
|
118 |
+
typical_p,
|
119 |
+
top_p,
|
120 |
+
top_k,
|
121 |
+
temperature,
|
122 |
+
repetition_penalty,
|
123 |
+
watermark,
|
124 |
+
):
|
125 |
+
if value in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
|
126 |
+
typical_p = typical_p.update(value=0.2, visible=True)
|
127 |
+
top_p = top_p.update(visible=False)
|
128 |
+
top_k = top_k.update(visible=False)
|
129 |
+
temperature = temperature.update(visible=False)
|
130 |
+
disclaimer = disclaimer.update(visible=False)
|
131 |
+
repetition_penalty = repetition_penalty.update(visible=False)
|
132 |
+
watermark = watermark.update(False)
|
133 |
+
elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
|
134 |
+
typical_p = typical_p.update(visible=False)
|
135 |
+
top_p = top_p.update(value=0.25, visible=True)
|
136 |
+
top_k = top_k.update(value=50, visible=True)
|
137 |
+
temperature = temperature.update(value=0.6, visible=True)
|
138 |
+
repetition_penalty = repetition_penalty.update(value=1.01, visible=True)
|
139 |
+
watermark = watermark.update(False)
|
140 |
+
disclaimer = disclaimer.update(visible=True)
|
141 |
+
else:
|
142 |
+
typical_p = typical_p.update(visible=False)
|
143 |
+
top_p = top_p.update(value=0.95, visible=True)
|
144 |
+
top_k = top_k.update(value=4, visible=True)
|
145 |
+
temperature = temperature.update(value=0.5, visible=True)
|
146 |
+
repetition_penalty = repetition_penalty.update(value=1.03, visible=True)
|
147 |
+
watermark = watermark.update(True)
|
148 |
+
disclaimer = disclaimer.update(visible=False)
|
149 |
+
return (
|
150 |
+
disclaimer,
|
151 |
+
typical_p,
|
152 |
+
top_p,
|
153 |
+
top_k,
|
154 |
+
temperature,
|
155 |
+
repetition_penalty,
|
156 |
+
watermark,
|
157 |
+
)
|
158 |
+
|
159 |
+
|
160 |
+
title = """<h1 align="center">Large Language Model Chat API</h1>"""
|
161 |
+
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
162 |
+
|
163 |
+
```
|
164 |
+
User: <utterance>
|
165 |
+
Assistant: <utterance>
|
166 |
+
User: <utterance>
|
167 |
+
Assistant: <utterance>
|
168 |
+
...
|
169 |
+
```
|
170 |
+
|
171 |
+
In this app, you can explore the outputs of multiple LLMs when prompted in this way.
|
172 |
+
"""
|
173 |
+
|
174 |
+
text_generation_inference = """
|
175 |
+
<div align="center">Powered by: <a href=https://github.com/huggingface/text-generation-inference>Text Generation Inference</a></div>
|
176 |
+
"""
|
177 |
+
|
178 |
+
openchat_disclaimer = """
|
179 |
+
<div align="center">Checkout the official <a href=https://huggingface.co/spaces/togethercomputer/OpenChatKit>OpenChatKit feedback app</a> for the full experience.</div>
|
180 |
+
"""
|
181 |
+
|
182 |
+
with gr.Blocks(
|
183 |
+
css="""#col_container {margin-left: auto; margin-right: auto;}
|
184 |
+
#chatbot {height: 520px; overflow: auto;}"""
|
185 |
+
) as demo:
|
186 |
+
gr.HTML(title)
|
187 |
+
gr.Markdown(text_generation_inference, visible=True)
|
188 |
+
with gr.Column(elem_id="col_container"):
|
189 |
+
model = gr.Radio(
|
190 |
+
value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
191 |
+
choices=[
|
192 |
+
"OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
193 |
+
"OpenAssistant/oasst-sft-1-pythia-12b",
|
194 |
+
# "togethercomputer/GPT-NeoXT-Chat-Base-20B",
|
195 |
+
"google/flan-t5-xxl",
|
196 |
+
"google/flan-ul2",
|
197 |
+
"bigscience/bloom",
|
198 |
+
"bigscience/bloomz",
|
199 |
+
"EleutherAI/gpt-neox-20b",
|
200 |
+
],
|
201 |
+
label="Model",
|
202 |
+
interactive=True,
|
203 |
+
)
|
204 |
+
|
205 |
+
chatbot = gr.Chatbot(elem_id="chatbot")
|
206 |
+
inputs = gr.Textbox(
|
207 |
+
placeholder="Hi there!", label="Type an input and press Enter"
|
208 |
+
)
|
209 |
+
disclaimer = gr.Markdown(openchat_disclaimer, visible=False)
|
210 |
+
state = gr.State([])
|
211 |
+
b1 = gr.Button()
|
212 |
+
|
213 |
+
with gr.Accordion("Parameters", open=False):
|
214 |
+
typical_p = gr.Slider(
|
215 |
+
minimum=-0,
|
216 |
+
maximum=1.0,
|
217 |
+
value=0.2,
|
218 |
+
step=0.05,
|
219 |
+
interactive=True,
|
220 |
+
label="Typical P mass",
|
221 |
+
)
|
222 |
+
top_p = gr.Slider(
|
223 |
+
minimum=-0,
|
224 |
+
maximum=1.0,
|
225 |
+
value=0.25,
|
226 |
+
step=0.05,
|
227 |
+
interactive=True,
|
228 |
+
label="Top-p (nucleus sampling)",
|
229 |
+
visible=False,
|
230 |
+
)
|
231 |
+
temperature = gr.Slider(
|
232 |
+
minimum=-0,
|
233 |
+
maximum=5.0,
|
234 |
+
value=0.6,
|
235 |
+
step=0.1,
|
236 |
+
interactive=True,
|
237 |
+
label="Temperature",
|
238 |
+
visible=False,
|
239 |
+
)
|
240 |
+
top_k = gr.Slider(
|
241 |
+
minimum=1,
|
242 |
+
maximum=50,
|
243 |
+
value=50,
|
244 |
+
step=1,
|
245 |
+
interactive=True,
|
246 |
+
label="Top-k",
|
247 |
+
visible=False,
|
248 |
+
)
|
249 |
+
repetition_penalty = gr.Slider(
|
250 |
+
minimum=0.1,
|
251 |
+
maximum=3.0,
|
252 |
+
value=1.03,
|
253 |
+
step=0.01,
|
254 |
+
interactive=True,
|
255 |
+
label="Repetition Penalty",
|
256 |
+
visible=False,
|
257 |
+
)
|
258 |
+
watermark = gr.Checkbox(value=False, label="Text watermarking")
|
259 |
+
|
260 |
+
model.change(
|
261 |
+
lambda value: radio_on_change(
|
262 |
+
value,
|
263 |
+
disclaimer,
|
264 |
+
typical_p,
|
265 |
+
top_p,
|
266 |
+
top_k,
|
267 |
+
temperature,
|
268 |
+
repetition_penalty,
|
269 |
+
watermark,
|
270 |
+
),
|
271 |
+
inputs=model,
|
272 |
+
outputs=[
|
273 |
+
disclaimer,
|
274 |
+
typical_p,
|
275 |
+
top_p,
|
276 |
+
top_k,
|
277 |
+
temperature,
|
278 |
+
repetition_penalty,
|
279 |
+
watermark,
|
280 |
+
],
|
281 |
+
)
|
282 |
+
|
283 |
+
inputs.submit(
|
284 |
+
predict,
|
285 |
+
[
|
286 |
+
model,
|
287 |
+
inputs,
|
288 |
+
typical_p,
|
289 |
+
top_p,
|
290 |
+
temperature,
|
291 |
+
top_k,
|
292 |
+
repetition_penalty,
|
293 |
+
watermark,
|
294 |
+
chatbot,
|
295 |
+
state,
|
296 |
+
],
|
297 |
+
[chatbot, state],
|
298 |
+
)
|
299 |
+
b1.click(
|
300 |
+
predict,
|
301 |
+
[
|
302 |
+
model,
|
303 |
+
inputs,
|
304 |
+
typical_p,
|
305 |
+
top_p,
|
306 |
+
temperature,
|
307 |
+
top_k,
|
308 |
+
repetition_penalty,
|
309 |
+
watermark,
|
310 |
+
chatbot,
|
311 |
+
state,
|
312 |
+
],
|
313 |
+
[chatbot, state],
|
314 |
+
)
|
315 |
+
b1.click(reset_textbox, [], [inputs])
|
316 |
+
inputs.submit(reset_textbox, [], [inputs])
|
317 |
+
|
318 |
+
gr.Markdown(description)
|
319 |
+
demo.queue(concurrency_count=16).launch(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
text-generation==0.5.0
|
2 |
+
gradio==3.20.1
|