dar-tau commited on
Commit
497a54c
·
verified ·
1 Parent(s): 90e0ba3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -12,6 +12,7 @@ from dataclasses import dataclass
12
  # chatml_template = """{% for message in messages %}
13
  # {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
14
  # {% endfor %}"""
 
15
 
16
  prompt_format = '''<|im_start|>system
17
  {system_message}<|im_end|>
@@ -50,7 +51,6 @@ device = "cpu"
50
  model_name = "TheBloke/OpenHermes-2.5-Mistral-7B-GPTQ"
51
  token = os.environ['hf_token']
52
  pipe = pipeline("text-generation", model=model_name, device=device)
53
- pipe.tokenizer.chat_template = chatml_template # TheBloke says this is the right template for this model
54
  generate_kwargs = {'max_new_tokens': 20}
55
 
56
  # '''
 
12
  # chatml_template = """{% for message in messages %}
13
  # {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
14
  # {% endfor %}"""
15
+ # pipe.tokenizer.chat_template = chatml_template # TheBloke says this is the right template for this model
16
 
17
  prompt_format = '''<|im_start|>system
18
  {system_message}<|im_end|>
 
51
  model_name = "TheBloke/OpenHermes-2.5-Mistral-7B-GPTQ"
52
  token = os.environ['hf_token']
53
  pipe = pipeline("text-generation", model=model_name, device=device)
 
54
  generate_kwargs = {'max_new_tokens': 20}
55
 
56
  # '''