JiangYH commited on
Commit
2b259f0
Β·
verified Β·
1 Parent(s): 0339b60

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. ChatWorld/ChatWorld.py +3 -2
  2. ChatWorld/models.py +18 -4
ChatWorld/ChatWorld.py CHANGED
@@ -66,8 +66,9 @@ class ChatWorld:
66
  self.model_role_nickname = role_nick_name
67
 
68
  def getSystemPrompt(self, role_name, role_nick_name):
69
- assert self.model_role_name and self.model_role_nickname, "Please set model role name first"
70
- return self.prompt.render(model_role_name=self.model_role_name, model_role_nickname=self.model_role_nickname, role_name=role_name, role_nickname=role_nick_name)
 
71
 
72
  def chat(self, user_role_name: str, text: str, user_role_nick_name: str = None, use_local_model=False):
73
  message = [self.getSystemPrompt(
 
66
  self.model_role_nickname = role_nick_name
67
 
68
  def getSystemPrompt(self, role_name, role_nick_name):
69
+ assert self.model_role_name, "Please set model role name first"
70
+
71
+ return {"role": "system", "content": self.prompt.render(model_role_name=self.model_role_name, model_role_nickname=self.model_role_nickname, role_name=role_name, role_nickname=role_nick_name)}
72
 
73
  def chat(self, user_role_name: str, text: str, user_role_nick_name: str = None, use_local_model=False):
74
  message = [self.getSystemPrompt(
ChatWorld/models.py CHANGED
@@ -3,9 +3,23 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  class qwen_model:
5
  def __init__(self, model_name):
6
- self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
7
- self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True).eval()
 
 
8
 
9
  def get_response(self, message):
10
- self.tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
11
- return "test"
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  class qwen_model:
5
  def __init__(self, model_name):
6
+ self.tokenizer = AutoTokenizer.from_pretrained(
7
+ model_name, trust_remote_code=True)
8
+ self.model = AutoModelForCausalLM.from_pretrained(
9
+ model_name, device_map="auto", trust_remote_code=True).eval()
10
 
11
  def get_response(self, message):
12
+ message = self.tokenizer.apply_chat_template(
13
+ message, tokenize=False, add_generation_prompt=True)
14
+ model_inputs = self.tokenizer([message], return_tensors="pt")
15
+ generated_ids = self.model.generate(
16
+ model_inputs.input_ids,
17
+ max_new_tokens=512
18
+ )
19
+ generated_ids = [
20
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
21
+ ]
22
+
23
+ response = self.tokenizer.batch_decode(
24
+ generated_ids, skip_special_tokens=True)[0]
25
+ return response