wangzhang commited on
Commit
73c25af
·
1 Parent(s): 4a23366

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -51,7 +51,7 @@ def generate(
51
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
52
  conversation.append({"role": "user", "content": "### Instruction: \n" + "根据巨杉数据库SequoiaDB的相关问题进行回答。\n" + "### Input:\n" + message + "\n ### Response:"})
53
 
54
- chat = tokenizer.apply_chat_template(conversation, tokenize=False)
55
  inputs = tokenizer(chat, return_tensors="pt", add_special_tokens=False).to("cuda")
56
  if len(inputs) > MAX_INPUT_TOKEN_LENGTH:
57
  inputs = inputs[-MAX_INPUT_TOKEN_LENGTH:]
 
51
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
52
  conversation.append({"role": "user", "content": "### Instruction: \n" + "根据巨杉数据库SequoiaDB的相关问题进行回答。\n" + "### Input:\n" + message + "\n ### Response:"})
53
 
54
+ chat = tokenizer.apply_chat_template([{"role": "user", "content": "### Instruction: \n" + "根据巨杉数据库SequoiaDB的相关问题进行回答。\n" + "### Input:\n" + message + "\n ### Response:"}], tokenize=False)
55
  inputs = tokenizer(chat, return_tensors="pt", add_special_tokens=False).to("cuda")
56
  if len(inputs) > MAX_INPUT_TOKEN_LENGTH:
57
  inputs = inputs[-MAX_INPUT_TOKEN_LENGTH:]