|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TFAutoModelForQuestionAnswering |
|
import gradio as gr |
|
import torch |
|
|
|
|
|
title = "🤖AI ChatBot" |
|
description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)" |
|
examples = [["How are you?"]] |
|
|
|
model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False) |
|
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") |
|
nlp = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
|
|
|
|
|
|
|
|
|
|
def predict(input, history=[]): |
|
|
|
new_user_input_ids = tokenizer.encode( |
|
input + tokenizer.eos_token, return_tensors="pt" |
|
) |
|
|
|
|
|
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) |
|
|
|
|
|
history = model.generate( |
|
bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id |
|
).tolist() |
|
|
|
|
|
response = tokenizer.decode(history[0]).split("<|endoftext|>") |
|
|
|
response = [ |
|
(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2) |
|
] |
|
|
|
return response, history |
|
|
|
|
|
def func(context, question): |
|
result = nlp(question = question, context=context) |
|
return result['answer'] |
|
|
|
app = gr.Interface(fn=func, inputs = ['textbox', 'text'], outputs = 'textbox', title = 'Question Answering bot', theme = 'dark-grass', description = 'Input context and question, then get answers!') |
|
|
|
app.launch(inline=False) |