Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
# Load the tokenizer and model | |
model_name = "mohamedemam/QA_GeneraTor" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
# Recommended words for users to choose from | |
recommended_words = [ | |
"which", "how", "when", "where", "who", "whom", "whose", "why", | |
"which", "who", "whom", "whose", "whereas", | |
"can", "could", "may", "might", "will", "would", "shall", "should", | |
"do", "does", "did", "is", "are", "am", "was", "were", "be", "being", "been", | |
"have", "has", "had", "if", "is", "are", "am", "was", "were", "do", "does", "did", "can", "could", | |
"will", "would", "shall", "should", "might", "may", "must", | |
"may", "might", "must", | |
] | |
# Function to generate questions and answers with configurable parameters | |
def generate_qa(context, recommended_word, temperature, top_p,numofseq): | |
input_text = f"{recommended_word}: {context.lower()}" | |
input_ids = tokenizer(input_text, max_length=400, truncation=True, padding="max_length", return_tensors='pt') | |
# Generate with configurable parameters | |
output = model.generate( | |
**input_ids, | |
temperature=temperature, | |
top_p=top_p,num_return_sequences=numofseq | |
) | |
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True) | |
return generated_text | |
# Create the Gradio interface with sliders for temperature and top-p | |
iface = gr.Interface( | |
fn=generate_qa, | |
inputs=[ | |
"text", | |
gr.inputs.Radio(recommended_words, label="Choose a Recommended Word"), | |
gr.inputs.Slider(minimum=0, maximum=4, default=1, step=0.05, label="Temperature"), | |
gr.inputs.Slider(minimum=0.1, maximum=1, default=0.2, step=0.05, label="Top-p"), | |
gr.inputs.Slider(minimum=1, maximum=50, default=1, step=1, label="num_return_sequences") | |
], | |
outputs="text", | |
title="Question Generation and Answering", | |
description="Enter a context, choose a recommended word, and adjust temperature and top-p. The model will generate a question and answer.", | |
) | |
# Launch the interface | |
iface.launch() |