t5-history-MC / app.py
ambrosfitz's picture
Update app.py
245997e verified
raw
history blame
2.11 kB
import gradio as gr
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Load the model and tokenizer from Hugging Face
model_name = "ambrosfitz/history-qa-t5-base"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def generate_qa(text, max_length=512):
input_text = f"Generate question: {text}"
input_ids = tokenizer(input_text, return_tensors="pt", max_length=max_length, truncation=True).input_ids.to(device)
with torch.no_grad():
outputs = model.generate(input_ids, max_length=max_length, num_return_sequences=1, do_sample=True, temperature=0.7)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Parse the generated text
parts = generated_text.split("Question: ")
if len(parts) > 1:
qa_parts = parts[1].split("Options:")
question = qa_parts[0].strip()
options_and_answer = qa_parts[1].split("Correct Answer:")
options = options_and_answer[0].strip()
answer_and_explanation = options_and_answer[1].split("Explanation:")
correct_answer = answer_and_explanation[0].strip()
explanation = answer_and_explanation[1].strip() if len(answer_and_explanation) > 1 else "No explanation provided."
return f"Question: {question}\n\nOptions: {options}\n\nCorrect Answer: {correct_answer}\n\nExplanation: {explanation}"
else:
return "Unable to generate a proper question and answer. Please try again with a different input."
# Define the Gradio interface
iface = gr.Interface(
fn=generate_qa,
inputs=gr.Textbox(lines=5, label="Enter historical text"),
outputs=gr.Textbox(label="Generated Q&A"),
title="History Q&A Generator",
description="Enter a piece of historical text, and the model will generate a related question, answer options, correct answer, and explanation."
)
# Launch the app
if __name__ == "__main__":
iface.launch()