thenHung commited on
Commit
e7ea662
·
1 Parent(s): 252dc03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -32
app.py CHANGED
@@ -1,51 +1,49 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
 
3
  import torch
4
 
5
-
6
- title = "🤖AI ChatBot"
7
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
  examples = [["How are you?"]]
9
 
10
-
11
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
12
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
13
 
14
- # tokenizer = AutoTokenizer.from_pretrained("Salesforce/xgen-7b-8k-base", trust_remote_code=True)
15
- # model = AutoModelForCausalLM.from_pretrained("Salesforce/xgen-7b-8k-base", torch_dtype=torch.bfloat16)
 
16
 
 
 
17
 
18
- def predict(input, history=[]):
19
- # tokenize the new input sentence
20
- new_user_input_ids = tokenizer.encode(
21
- input + tokenizer.eos_token, return_tensors="pt"
22
- )
23
 
24
- # append the new user input tokens to the chat history
25
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
26
 
27
- # generate a response
28
- history = model.generate(
29
- bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
30
- ).tolist()
31
 
32
- # convert the tokens to text, and then split the responses into lines
33
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
34
- # print('decoded_response-->>'+str(response))
35
- response = [
36
- (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
37
- ] # convert to tuples of list
38
- # print('response-->>'+str(response))
39
- return response, history
40
 
 
 
41
 
42
- gr.Interface(
43
- fn=predict,
 
 
 
 
 
 
 
44
  title=title,
45
  description=description,
46
  examples=examples,
47
- inputs=["text", "state"],
48
- outputs=["chatbot", "state"],
49
- # theme="finlaymacklon/boxy_violet",
50
- theme="london",
51
- ).launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ title = "🤖 AI ChatBot"
 
6
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
  examples = [["How are you?"]]
8
 
 
9
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
10
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
11
 
12
+ def generate_response(input_text, chat_history=[]):
13
+ # Tokenize the new input sentence
14
+ new_user_input_ids = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt")
15
 
16
+ # Append the new user input tokens to the chat history
17
+ bot_input_ids = torch.cat([torch.tensor(chat_history), new_user_input_ids], dim=-1)
18
 
19
+ # Generate a response
20
+ chat_output = model.generate(bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id)
 
 
 
21
 
22
+ # Decode the response tokens into text
23
+ response = tokenizer.decode(chat_output[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
24
 
25
+ return response
 
 
 
26
 
27
+ def chatbot_interface(input_text):
28
+ # Generate response based on input text and chat history
29
+ response = generate_response(input_text, chat_history)
 
 
 
 
 
30
 
31
+ # Append the input and response to the chat history
32
+ chat_history.append(tokenizer.encode(input_text + response))
33
 
34
+ return response
35
+
36
+ chat_history = [] # Initialize chat history
37
+
38
+ iface = gr.Interface(
39
+ fn=chatbot_interface,
40
+ inputs=gr.inputs.Textbox(lines=2, label="Chat"),
41
+ outputs=gr.outputs.Textbox(label="Response"),
42
+ layout="vertical",
43
  title=title,
44
  description=description,
45
  examples=examples,
46
+ theme="london"
47
+ )
48
+
49
+ iface.launch()