QA_GeneraToR / app.py
mohamedemam's picture
Create app.py
5e20e58
raw
history blame
1.43 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load the tokenizer and model
model_name = "mohamedemam/QA_GeneraTor"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Function to generate questions and answers with configurable parameters
def generate_qa(context, temperature, top_p):
input_text = f"Quation answer: {context}"
input_ids = ttokenizer(input_text,max_length=400,truncation=True,padding="max_length",return_tensors='pt')
# Generate with configurable parameters
output = model.generate(
input_ids,
max_length=150,
num_return_sequences=1,
no_repeat_ngram_size=2,
temperature=temperature,
top_p=top_p
)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
# Create the Gradio interface with sliders for temperature and top-p
iface = gr.Interface(
fn=generate_qa,
inputs=["text", gr.inputs.Slider(minimum=0.2, maximum=2, default=1, step=0.1, label="Temperature"),
gr.inputs.Slider(minimum=0.1, maximum=1, default=0.8, step=0.1, label="Top-p")],
outputs="text",
title="Question Generation and Answering",
description="Enter a context, adjust temperature and top-p, and the model will generate a question and answer.",
)
# Launch the interface
iface.launch()