Spaces:
Running
Running
Commit
·
868a505
1
Parent(s):
1a21a35
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,7 +16,7 @@ example_contexts=page_py.text.split(f"\n")
|
|
| 16 |
for i in range(len(example_contexts)):
|
| 17 |
example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
|
| 18 |
# Recommended words for users to choose from
|
| 19 |
-
recommended_words = [
|
| 20 |
"did",
|
| 21 |
"what",
|
| 22 |
"how",
|
|
@@ -75,10 +75,8 @@ def generate_qa(context, recommended_word, temperature, top_p,num_seq, num_sampl
|
|
| 75 |
#
|
| 76 |
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
|
| 77 |
|
| 78 |
-
formatted_output = "\n\n".join(
|
| 79 |
return formatted_output
|
| 80 |
-
|
| 81 |
-
# Create the Gradio interface with sliders for temperature and top-p
|
| 82 |
iface = gr.Interface(
|
| 83 |
fn=generate_qa,
|
| 84 |
inputs=[
|
|
@@ -86,13 +84,11 @@ iface = gr.Interface(
|
|
| 86 |
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
|
| 87 |
gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
|
| 88 |
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
|
| 89 |
-
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"
|
| 90 |
-
|
| 91 |
],
|
| 92 |
outputs=gr.outputs.Textbox(label="Generated Output"),
|
| 93 |
title="Question Generation and Answering",
|
| 94 |
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
|
| 95 |
)
|
| 96 |
-
|
| 97 |
# Launch the interface
|
| 98 |
iface.launch()
|
|
|
|
| 16 |
for i in range(len(example_contexts)):
|
| 17 |
example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
|
| 18 |
# Recommended words for users to choose from
|
| 19 |
+
recommended_words = word_list = [
|
| 20 |
"did",
|
| 21 |
"what",
|
| 22 |
"how",
|
|
|
|
| 75 |
#
|
| 76 |
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
|
| 77 |
|
| 78 |
+
formatted_output = "\n\n".join(set(generated_text))
|
| 79 |
return formatted_output
|
|
|
|
|
|
|
| 80 |
iface = gr.Interface(
|
| 81 |
fn=generate_qa,
|
| 82 |
inputs=[
|
|
|
|
| 84 |
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
|
| 85 |
gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
|
| 86 |
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
|
| 87 |
+
gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"
|
|
|
|
| 88 |
],
|
| 89 |
outputs=gr.outputs.Textbox(label="Generated Output"),
|
| 90 |
title="Question Generation and Answering",
|
| 91 |
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
|
| 92 |
)
|
|
|
|
| 93 |
# Launch the interface
|
| 94 |
iface.launch()
|