Jaehan's picture
Create app.py
3e46a2a
raw
history blame
790 Bytes
from transformers import AutoModelWithLMHead, AutoTokenizer
import gradio as gr
model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
text2text_tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelWithLMHead.from_pretrained(model_name)
def text2text(context, answer):
input_text = f"answer: {answer} context: {context}"
features = text2text_tokenizer([input_text], return_tensors="pt")
output = model.generate(input_ids=features["input_ids"], attention_mask=features["attention_mask"], max_length=100)
response = text2text_tokenizer.decode(output[0])
context = gr.Textbox(lines=10, label="English", placeholder="Context")
answer = gr.Textbox(lines=1, label="Answer")
out = gr.Textbox(text2text, inputs=[context, answer], outputs=out).launch()