Jhoeel commited on
Commit
8b1b93d
·
1 Parent(s): b2c7643

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
5
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
6
+
7
+ def predict(input, history=[]):
8
+ # tokenize the new input sentence
9
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
10
+
11
+ # append the new user input tokens to the chat history
12
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
13
+
14
+ # generate a response
15
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
16
+
17
+ # convert the tokens to text, and then split the responses into lines
18
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
19
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
20
+ return response, history
21
+
22
+ with gr.Blocks() as demo:
23
+ chatbot = gr.Chatbot()
24
+ state = gr.State([])
25
+
26
+ with gr.Row():
27
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
28
+
29
+ txt.submit(predict, [txt, state], [chatbot, state])
30
+
31
+ demo.launch()