carblacac commited on
Commit
a3df692
1 Parent(s): 6c6b22d

Add application file

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
2
+ import torch
3
+
4
+ import gradio as gr
5
+
6
+ mname = "facebook/blenderbot-400M-distill"
7
+ model = BlenderbotForConditionalGeneration.from_pretrained(mname)
8
+ tokenizer = BlenderbotTokenizer.from_pretrained(mname)
9
+
10
+
11
+ def take_last_tokens(inputs, note_history, history):
12
+ """Filter the last 128 tokens"""
13
+ if inputs['input_ids'].shape[1] > 128:
14
+ inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
15
+ inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
16
+ note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
17
+ history = history[1:]
18
+
19
+ return inputs, note_history, history
20
+
21
+
22
+ def add_note_to_history(note, note_history):
23
+ """Add a note to the historical information"""
24
+ note_history.append(note)
25
+ note_history = '</s> <s>'.join(note_history)
26
+ return [note_history]
27
+
28
+
29
+ title = "Mantain a conversation with the bot"
30
+ description = """
31
+ <p style="text-align:center">The bot have been trained to chat with you about whatever you wantd. Let's talk!</p>
32
+
33
+ <center><img src="https://user-images.githubusercontent.com/105242658/176054244-525c6530-1e78-42c7-8688-91dfedf8db58.png" width=300px></center>
34
+ <p style="text-align:center">Image generated from text using DALL路E mini</p>
35
+ """
36
+ # https://user-images.githubusercontent.com/105242658/176054244-525c6530-1e78-42c7-8688-91dfedf8db58.png
37
+ #https://www.craiyon.com/
38
+
39
+ def chat(message, history):
40
+ history = history or []
41
+ if history:
42
+ history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
43
+ else:
44
+ history_useful = []
45
+
46
+ history_useful = add_note_to_history(message, history_useful)
47
+ # Generate a response of the bot and add it to note_history
48
+ inputs = tokenizer(history_useful, return_tensors="pt")
49
+ inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
50
+
51
+ reply_ids = model.generate(**inputs)
52
+ response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
53
+ history_useful = add_note_to_history(response, history_useful)
54
+
55
+
56
+ list_history = history_useful[0].split('</s> <s>')
57
+ history.append((list_history[-2], list_history[-1]))
58
+
59
+ return history, history
60
+
61
+
62
+ iface = gr.Interface(
63
+ chat,
64
+ ["text", "state"],
65
+ ["chatbot", "state"],
66
+ theme="huggingface",
67
+ title=title,
68
+ description=description,
69
+ examples=[["What are you doing?"], ["Where would you like to travel?"]],
70
+ allow_flagging="never",
71
+ )
72
+ iface.launch()