ArpitTestBert / app.py
arpitneema's picture
Update app.py
0afa3b2
raw
history blame
1.94 kB
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TFAutoModelForQuestionAnswering
import gradio as gr
import torch
title = "🤖AI ChatBot"
description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
examples = [["How are you?"]]
model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False)
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
# tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
# model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
def predict(input, history=[]):
# tokenize the new input sentence
new_user_input_ids = tokenizer.encode(
input + tokenizer.eos_token, return_tensors="pt"
)
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
# generate a response
history = model.generate(
bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
).tolist()
# convert the tokens to text, and then split the responses into lines
response = tokenizer.decode(history[0]).split("<|endoftext|>")
# print('decoded_response-->>'+str(response))
response = [
(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
] # convert to tuples of list
# print('response-->>'+str(response))
return response, history
def func(context, question):
result = nlp(question = question, context=context)
return result['answer']
app = gr.Interface(fn=func, inputs = ['textbox', 'text'], outputs = 'textbox', title = 'Question Answering bot', theme = 'dark-grass', description = 'Input context and question, then get answers!')
app.launch(inline=False)