yuxj commited on
Commit
d67a35e
·
1 Parent(s): e9ee22b

提前初始化吧,不然真的太慢太慢太慢了

Browse files
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -4,19 +4,15 @@ import time
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
6
  model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).float()
7
- glm_history = []
8
 
9
- def glm_init():
10
- global glm_history
11
- settings = "请记住以下设定:你跟我的聊天过程中不可以出现“ChatGLM”这个单词,如果你被要求自我介绍,就说自己是基于AI的聊天模型"
12
- response, history = model.chat(tokenizer, settings, history =[])
13
- glm_history = history
14
 
15
  def chat_with_glm(input):
16
  print("询问: ", input)
17
  global glm_history
18
- if len(glm_history) == 0:
19
- glm_init()
20
  response, history = model.chat(tokenizer, input, history = glm_history)
21
  glm_history = history
22
  print("回答: ", response)
 
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
6
  model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).float()
 
7
 
8
+ glm_history = []
9
+ settings = "请记住以下设定:你后续跟我的聊天过程中不可以出现“ChatGLM”这个单词,如果你被要求自我介绍,就说自己是基于AI的聊天模型"
10
+ response, history = model.chat(tokenizer, settings, history =[])
11
+ glm_history = history
 
12
 
13
  def chat_with_glm(input):
14
  print("询问: ", input)
15
  global glm_history
 
 
16
  response, history = model.chat(tokenizer, input, history = glm_history)
17
  glm_history = history
18
  print("回答: ", response)