Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
import nltk | |
nltk.download('punkt') | |
def generate_answer(question): | |
model_name = "anukvma/bart-aiml-question-answer-v2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
inputs = ["Answer this AIML Question: " + question] | |
inputs = tokenizer(inputs, max_length=256, truncation=True, return_tensors="pt") | |
output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=1, max_length=512) | |
decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0] | |
predicted_title = nltk.sent_tokenize(decoded_output.strip())[0] | |
return predicted_title | |
iface = gr.Interface( | |
fn=generate_answer, | |
inputs=[ | |
gr.Textbox(lines=5, label="Question") | |
], | |
outputs=gr.Textbox(label="Answer") | |
) | |
iface.launch() |