Spaces:
Running
Running
Commit
·
e8a3ed8
1
Parent(s):
3292ea1
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
# Load the tokenizer and model
|
5 |
-
model_name = "mohamedemam/
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
8 |
|
@@ -44,7 +44,9 @@ def generate_qa(context, recommended_word, temperature, top_p, num_samples=3):
|
|
44 |
)
|
45 |
|
46 |
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
|
47 |
-
|
|
|
|
|
48 |
|
49 |
# Create the Gradio interface with sliders for temperature and top-p
|
50 |
iface = gr.Interface(
|
@@ -52,13 +54,13 @@ iface = gr.Interface(
|
|
52 |
inputs=[
|
53 |
gr.inputs.Dropdown(example_contexts, label="Choose an Example"),
|
54 |
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
|
55 |
-
gr.inputs.Slider(minimum=0.0, maximum=2, default=2.1, step=0.
|
56 |
-
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.
|
57 |
],
|
58 |
-
outputs=gr.outputs.
|
59 |
title="Question Generation and Answering",
|
60 |
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
|
61 |
)
|
62 |
|
63 |
# Launch the interface
|
64 |
-
iface.launch()
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
# Load the tokenizer and model
|
5 |
+
model_name = "mohamedemam/QA_GeneraToR"
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
8 |
|
|
|
44 |
)
|
45 |
|
46 |
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
|
47 |
+
|
48 |
+
formatted_output = "\n\n".join([f"Original Context: {context}", "Generated Sentences:"] + generated_text)
|
49 |
+
return formatted_output
|
50 |
|
51 |
# Create the Gradio interface with sliders for temperature and top-p
|
52 |
iface = gr.Interface(
|
|
|
54 |
inputs=[
|
55 |
gr.inputs.Dropdown(example_contexts, label="Choose an Example"),
|
56 |
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"),
|
57 |
+
gr.inputs.Slider(minimum=0.0, maximum=2, default=2.1, step=0.01, label="Temperature"),
|
58 |
+
gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p")
|
59 |
],
|
60 |
+
outputs=gr.outputs.Textbox(label="Generated Output"),
|
61 |
title="Question Generation and Answering",
|
62 |
description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
|
63 |
)
|
64 |
|
65 |
# Launch the interface
|
66 |
+
iface.launch(shring=True)
|