Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,8 @@ import numpy as np
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
|
7 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
8 |
-
model = AutoModelForCausalLM.from_pretrained("
|
9 |
|
10 |
print("Loading finished.")
|
11 |
|
@@ -147,7 +147,7 @@ a:before {
|
|
147 |
text-decoration-line: none;
|
148 |
border-radius: 5px;
|
149 |
transition: .5s;
|
150 |
-
width:
|
151 |
display: flex;
|
152 |
align-items: center;
|
153 |
justify-content: space-around;
|
@@ -207,7 +207,8 @@ def generate_nodes(token_ix, node, step):
|
|
207 |
"""Recursively generate HTML for the tree nodes."""
|
208 |
token = tokenizer.decode([token_ix])
|
209 |
html_content = f" <li> <a href='#' class='{('chosen' if node.table is None else '')}'> <span> <b>{token_ix}:<br>{clean(token)}</b> </span> "
|
210 |
-
|
|
|
211 |
html_content += "</a>"
|
212 |
if len(node.children.keys()) > 0:
|
213 |
html_content += "<ul> "
|
@@ -371,7 +372,7 @@ def get_beam_search_html(input_text, number_steps, number_beams, length_penalty)
|
|
371 |
output_scores=True,
|
372 |
do_sample=False,
|
373 |
)
|
374 |
-
markdown = "
|
375 |
decoded_sequences = tokenizer.batch_decode(outputs.sequences)
|
376 |
for i, sequence in enumerate(decoded_sequences):
|
377 |
markdown += f"\n- {sequence} ( score {outputs.sequences_scores[i]:.2f})"
|
@@ -402,11 +403,11 @@ Play with the parameters below to understand how beam search decoding works!
|
|
402 |
- **Number of beams**: the number of beams to use
|
403 |
- **Length penalty**: the length penalty to apply to outputs. `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences.
|
404 |
""")
|
405 |
-
text = gr.Textbox(label="Sentence to decode from", value="
|
406 |
with gr.Row():
|
407 |
steps = gr.Slider(label="Number of steps", minimum=1, maximum=8, step=1, value=4)
|
408 |
beams = gr.Slider(label="Number of beams", minimum=2, maximum=4, step=1, value=3)
|
409 |
-
length_penalty = gr.Slider(label="Length penalty", minimum=-
|
410 |
button = gr.Button()
|
411 |
out_html = gr.Markdown()
|
412 |
out_markdown = gr.Markdown()
|
|
|
4 |
import gradio as gr
|
5 |
import spaces
|
6 |
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
9 |
|
10 |
print("Loading finished.")
|
11 |
|
|
|
147 |
text-decoration-line: none;
|
148 |
border-radius: 5px;
|
149 |
transition: .5s;
|
150 |
+
width: 280px;
|
151 |
display: flex;
|
152 |
align-items: center;
|
153 |
justify-content: space-around;
|
|
|
207 |
"""Recursively generate HTML for the tree nodes."""
|
208 |
token = tokenizer.decode([token_ix])
|
209 |
html_content = f" <li> <a href='#' class='{('chosen' if node.table is None else '')}'> <span> <b>{token_ix}:<br>{clean(token)}</b> </span> "
|
210 |
+
if node.table is not None:
|
211 |
+
html_content += node.table
|
212 |
html_content += "</a>"
|
213 |
if len(node.children.keys()) > 0:
|
214 |
html_content += "<ul> "
|
|
|
372 |
output_scores=True,
|
373 |
do_sample=False,
|
374 |
)
|
375 |
+
markdown = "Output sequences:"
|
376 |
decoded_sequences = tokenizer.batch_decode(outputs.sequences)
|
377 |
for i, sequence in enumerate(decoded_sequences):
|
378 |
markdown += f"\n- {sequence} ( score {outputs.sequences_scores[i]:.2f})"
|
|
|
403 |
- **Number of beams**: the number of beams to use
|
404 |
- **Length penalty**: the length penalty to apply to outputs. `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences.
|
405 |
""")
|
406 |
+
text = gr.Textbox(label="Sentence to decode from", value="Conclusion: thanks a lot. This article was originally published on")
|
407 |
with gr.Row():
|
408 |
steps = gr.Slider(label="Number of steps", minimum=1, maximum=8, step=1, value=4)
|
409 |
beams = gr.Slider(label="Number of beams", minimum=2, maximum=4, step=1, value=3)
|
410 |
+
length_penalty = gr.Slider(label="Length penalty", minimum=-4, maximum=4, step=0.5, value=1)
|
411 |
button = gr.Button()
|
412 |
out_html = gr.Markdown()
|
413 |
out_markdown = gr.Markdown()
|