Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -19,38 +19,6 @@ FIM_SUFFIX = "<fim_suffix>"
|
|
19 |
|
20 |
FIM_INDICATOR = "<FILL_HERE>"
|
21 |
|
22 |
-
FORMATS = """## Model Formats
|
23 |
-
The model is pretrained on code and is formatted with special tokens in addition to the pure code data,\
|
24 |
-
such as prefixes specifying the source of the file or tokens separating code from a commit message.\
|
25 |
-
Use these templates to explore the model's capacities:
|
26 |
-
### 1. Prefixes 🏷️
|
27 |
-
For pure code files, use any combination of the following prefixes:
|
28 |
-
```
|
29 |
-
<reponame>REPONAME<filename>FILENAME<gh_stars>STARS\ncode<|endoftext|>
|
30 |
-
```
|
31 |
-
STARS can be one of: 0, 1-10, 10-100, 100-1000, 1000+
|
32 |
-
### 2. Commits 💾
|
33 |
-
The commits data is formatted as follows:
|
34 |
-
```
|
35 |
-
<commit_before>code<commit_msg>text<commit_after>code<|endoftext|>
|
36 |
-
```
|
37 |
-
### 3. Jupyter Notebooks 📓
|
38 |
-
The model is trained on Jupyter notebooks as Python scripts and structured formats like:
|
39 |
-
```
|
40 |
-
<start_jupyter><jupyter_text>text<jupyter_code>code<jupyter_output>output<jupyter_text>
|
41 |
-
```
|
42 |
-
### 4. Issues 🐛
|
43 |
-
We also trained on GitHub issues using the following formatting:
|
44 |
-
```
|
45 |
-
<issue_start><issue_comment>text<issue_comment>...<issue_closed>
|
46 |
-
```
|
47 |
-
### 5. Fill-in-the-middle 🧩
|
48 |
-
Fill in the middle requires rearranging the model inputs. The playground handles this for you - all you need is to specify where to fill:
|
49 |
-
```
|
50 |
-
code before<FILL_HERE>code after
|
51 |
-
```
|
52 |
-
"""
|
53 |
-
|
54 |
theme = gr.themes.Monochrome(
|
55 |
primary_hue="indigo",
|
56 |
secondary_hue="blue",
|
@@ -284,19 +252,17 @@ css += share_btn_css + monospace_css + ".gradio-container {color: black}"
|
|
284 |
|
285 |
description = """
|
286 |
<div style="text-align: center;">
|
287 |
-
<h1>
|
|
|
288 |
</div>
|
289 |
<div style="text-align: left;">
|
290 |
<p>This is a demo to generate text and code with the following StarCoderBase models:</p>
|
291 |
<ul>
|
292 |
-
<li><a href="https://huggingface.co/
|
293 |
-
<li><a href="https://huggingface.co/
|
294 |
</ul>
|
295 |
-
<p><b>Please note:</b> These models are not designed for instruction purposes. If you're looking for instruction or want to chat with a fine-tuned model, you can visit the <a href="https://huggingface.co/spaces/HuggingFaceH4/starchat-playground">StarChat Playground</a>.</p>
|
296 |
</div>
|
297 |
"""
|
298 |
-
disclaimer = """⚠️<b>Any use or sharing of this demo constitues your acceptance of the BigCode [OpenRAIL-M](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) License Agreement and the use restrictions included within.</b>\
|
299 |
-
<br>**Intended Use**: this app and its [supporting model](https://huggingface.co/bigcode) are provided for demonstration purposes; not to serve as replacement for human expertise. For more details on the model's limitations in terms of factuality and biases, see the [model card.](hf.co/bigcode)"""
|
300 |
|
301 |
with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
|
302 |
with gr.Column():
|
@@ -379,33 +345,18 @@ with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
|
|
379 |
)
|
380 |
|
381 |
gr.Markdown(disclaimer)
|
382 |
-
|
383 |
-
community_icon = gr.HTML(community_icon_html, visible=True)
|
384 |
-
loading_icon = gr.HTML(loading_icon_html, visible=True)
|
385 |
-
share_button = gr.Button(
|
386 |
-
"Share to community", elem_id="share-btn", visible=True
|
387 |
-
)
|
388 |
gr_examples = gr.Examples(
|
389 |
examples=[example for client in clients.values() for example in client[3]],
|
390 |
inputs=[instruction],
|
391 |
cache_examples=False,
|
392 |
)
|
393 |
|
394 |
-
# def update(version):
|
395 |
-
# return clients[version][2],
|
396 |
-
|
397 |
-
# version.select(
|
398 |
-
# lambda x: (clients[x][2], clients[x][3]),
|
399 |
-
# inputs=[version],
|
400 |
-
# outputs=[system_prompt, gr_examples],
|
401 |
-
# )
|
402 |
|
403 |
-
# gr.Markdown(FORMATS)
|
404 |
|
405 |
submit.click(
|
406 |
generate,
|
407 |
inputs=[instruction, system_prompt, version, temperature, max_new_tokens, top_p, repetition_penalty],
|
408 |
outputs=[output, output2],
|
409 |
)
|
410 |
-
share_button.click(None, [], [], _js=share_js)
|
411 |
demo.queue(concurrency_count=16).launch(debug=True)
|
|
|
19 |
|
20 |
FIM_INDICATOR = "<FILL_HERE>"
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
theme = gr.themes.Monochrome(
|
23 |
primary_hue="indigo",
|
24 |
secondary_hue="blue",
|
|
|
252 |
|
253 |
description = """
|
254 |
<div style="text-align: center;">
|
255 |
+
<h1> TRL + TextEnvironment </h1>
|
256 |
+
<h2> Teaching Language Models to use tools. </h2>
|
257 |
</div>
|
258 |
<div style="text-align: left;">
|
259 |
<p>This is a demo to generate text and code with the following StarCoderBase models:</p>
|
260 |
<ul>
|
261 |
+
<li><a href="https://huggingface.co/vwxyzjn/starcoderbase-triviaqa" style='color: #e6b800;'>StarCoderBase TriviaQA</a>: A finetuned version of StarCoderBase on on the TriviaQA dataset using reinforcement learning via [TRL's TextEnvironment](https://github.com/huggingface/trl/pull/424)</li>
|
262 |
+
<li><a href="https://huggingface.co/lvwerra/starcoderbase-gsm8k" style='color: #e6b800;'>StarCoderBase GSM8K</a>: A finetuned version of StarCoderBase on on the GSM8K dataset using reinforcement learning via TRL's [TextEnvironment](https://github.com/huggingface/trl/pull/424).</li>
|
263 |
</ul>
|
|
|
264 |
</div>
|
265 |
"""
|
|
|
|
|
266 |
|
267 |
with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
|
268 |
with gr.Column():
|
|
|
345 |
)
|
346 |
|
347 |
gr.Markdown(disclaimer)
|
348 |
+
|
|
|
|
|
|
|
|
|
|
|
349 |
gr_examples = gr.Examples(
|
350 |
examples=[example for client in clients.values() for example in client[3]],
|
351 |
inputs=[instruction],
|
352 |
cache_examples=False,
|
353 |
)
|
354 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
|
|
|
356 |
|
357 |
submit.click(
|
358 |
generate,
|
359 |
inputs=[instruction, system_prompt, version, temperature, max_new_tokens, top_p, repetition_penalty],
|
360 |
outputs=[output, output2],
|
361 |
)
|
|
|
362 |
demo.queue(concurrency_count=16).launch(debug=True)
|