Andrey Vorozhko commited on
Commit
8571554
·
1 Parent(s): 7ea8290

Exact worked version in colab

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -1,15 +1,17 @@
 
 
1
  import torch
2
  import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from util_funcs import getLengthParam, calcAnswerLengthByProbability, cropContext
5
 
6
  def chat_function(Message, History): # model, tokenizer
7
 
8
  input_user = Message
9
 
10
- History = History or []
11
 
12
- chat_history_ids = torch.zeros((1, 0), dtype=torch.int) if History == [] else torch.tensor(history[-1][2], dtype=torch.long)
13
 
14
  # encode the new user input, add parameters and return a tensor in Pytorch
15
  lengthId = getLengthParam(input_user, tokenizer)
@@ -78,7 +80,7 @@ def chat_function(Message, History): # model, tokenizer
78
  # Случай когда надо перегенерировать ответ наступил, берем изначальный тензор
79
  chat_history_ids = chat_history_ids_initial
80
 
81
- History.append((input_user, answer, chat_history_ids.tolist()))
82
  html = "<div class='chatbot'>"
83
  for user_msg, resp_msg, _ in history:
84
  if user_msg != '-':
@@ -86,7 +88,7 @@ def chat_function(Message, History): # model, tokenizer
86
  if resp_msg != '-':
87
  html += f"<div class='resp_msg'>{resp_msg}</div>"
88
  html += "</div>"
89
- return html, History
90
 
91
  # Download checkpoint:
92
 
@@ -105,8 +107,8 @@ description = """
105
  article = "<p style='text-align: center'><a href='https://huggingface.co/avorozhko/ruDialoGpt3-medium-finetuned-context'>Бот на основе дообученной GPT-3</a></p>"
106
 
107
  iface = gr.Interface(fn=chat_function,
108
- inputs=[gr.inputs.Textbox(lines=3, placeholder="Что вы хотите сказать боту..."), 'state'],
109
- outputs=['html', 'state'],
110
  title=title, description=description, article=article,
111
  theme='dark-grass',
112
  css= """
@@ -122,4 +124,4 @@ iface = gr.Interface(fn=chat_function,
122
  )
123
 
124
  if __name__ == "__main__":
125
- iface.launch(debug=False, share=True)
 
1
+ # app.py
2
+ import random
3
  import torch
4
  import gradio as gr
5
  from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ # from util_funcs import getLengthParam, calcAnswerLengthByProbability, cropContext
7
 
8
  def chat_function(Message, History): # model, tokenizer
9
 
10
  input_user = Message
11
 
12
+ history = History or []
13
 
14
+ chat_history_ids = torch.zeros((1, 0), dtype=torch.int) if history == [] else torch.tensor(history[-1][2], dtype=torch.long)
15
 
16
  # encode the new user input, add parameters and return a tensor in Pytorch
17
  lengthId = getLengthParam(input_user, tokenizer)
 
80
  # Случай когда надо перегенерировать ответ наступил, берем изначальный тензор
81
  chat_history_ids = chat_history_ids_initial
82
 
83
+ history.append((input_user, answer, chat_history_ids.tolist()))
84
  html = "<div class='chatbot'>"
85
  for user_msg, resp_msg, _ in history:
86
  if user_msg != '-':
 
88
  if resp_msg != '-':
89
  html += f"<div class='resp_msg'>{resp_msg}</div>"
90
  html += "</div>"
91
+ return html, history
92
 
93
  # Download checkpoint:
94
 
 
107
  article = "<p style='text-align: center'><a href='https://huggingface.co/avorozhko/ruDialoGpt3-medium-finetuned-context'>Бот на основе дообученной GPT-3</a></p>"
108
 
109
  iface = gr.Interface(fn=chat_function,
110
+ inputs=[gr.inputs.Textbox(lines=3, placeholder="Что вы хотите сказать боту..."), "state"],
111
+ outputs=["html", "state"],
112
  title=title, description=description, article=article,
113
  theme='dark-grass',
114
  css= """
 
124
  )
125
 
126
  if __name__ == "__main__":
127
+ iface.launch(debug=True)