Spaces:
Running
Running
Commit
·
35e7167
1
Parent(s):
212a5f9
Update app.py
Browse files
app.py
CHANGED
@@ -6,10 +6,21 @@ model_name = "mohamedemam/QA_GeneraTor"
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# Function to generate questions and answers with configurable parameters
|
10 |
-
def generate_qa(context, temperature, top_p):
|
11 |
-
input_text = f"
|
12 |
-
input_ids = tokenizer(input_text,max_length=400,truncation=True,padding="max_length",return_tensors='pt')
|
13 |
|
14 |
# Generate with configurable parameters
|
15 |
output = model.generate(
|
@@ -24,11 +35,15 @@ def generate_qa(context, temperature, top_p):
|
|
24 |
# Create the Gradio interface with sliders for temperature and top-p
|
25 |
iface = gr.Interface(
|
26 |
fn=generate_qa,
|
27 |
-
inputs=[
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
outputs="text",
|
30 |
title="Question Generation and Answering",
|
31 |
-
description="Enter a context, adjust temperature and top-p
|
32 |
)
|
33 |
|
34 |
# Launch the interface
|
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
8 |
|
9 |
+
# Recommended words for users to choose from
|
10 |
+
recommended_words = [
|
11 |
+
"which", "how", "when", "where", "who", "whom", "whose", "why",
|
12 |
+
"which", "who", "whom", "whose", "whereas",
|
13 |
+
"can", "could", "may", "might", "will", "would", "shall", "should",
|
14 |
+
"do", "does", "did", "is", "are", "am", "was", "were", "be", "being", "been",
|
15 |
+
"have", "has", "had", "if", "is", "are", "am", "was", "were", "do", "does", "did", "can", "could",
|
16 |
+
"will", "would", "shall", "should", "might", "may", "must",
|
17 |
+
"may", "might", "must",
|
18 |
+
]
|
19 |
+
|
20 |
# Function to generate questions and answers with configurable parameters
|
21 |
+
def generate_qa(context, recommended_word, temperature, top_p):
|
22 |
+
input_text = f"{recommended_word}: {context}"
|
23 |
+
input_ids = tokenizer(input_text, max_length=400, truncation=True, padding="max_length", return_tensors='pt')
|
24 |
|
25 |
# Generate with configurable parameters
|
26 |
output = model.generate(
|
|
|
35 |
# Create the Gradio interface with sliders for temperature and top-p
|
36 |
iface = gr.Interface(
|
37 |
fn=generate_qa,
|
38 |
+
inputs=[
|
39 |
+
"text",
|
40 |
+
gr.inputs.Dropdown(recommended_words, label="Choose a Recommended Word"),
|
41 |
+
gr.inputs.Slider(minimum=0, maximum=4, default=2.2, step=0.5, label="Temperature"),
|
42 |
+
gr.inputs.Slider(minimum=0.1, maximum=1, default=0.2, step=0.5, label="Top-p")
|
43 |
+
],
|
44 |
outputs="text",
|
45 |
title="Question Generation and Answering",
|
46 |
+
description="Enter a context, choose a recommended word, and adjust temperature and top-p. The model will generate a question and answer.",
|
47 |
)
|
48 |
|
49 |
# Launch the interface
|