JordiBayarri commited on
Commit
c315d6a
verified
1 Parent(s): 8c4b287

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -8
README.md CHANGED
@@ -139,7 +139,7 @@ Use the code below to get started with the model. You can run conversational inf
139
  import transformers
140
  import torch
141
 
142
- model_id = "HPAI-BSC/Qwen2.5-Aloe-Beta-7B"
143
 
144
  pipeline = transformers.pipeline(
145
  "text-generation",
@@ -161,7 +161,7 @@ prompt = pipeline.tokenizer.apply_chat_template(
161
 
162
  terminators = [
163
  pipeline.tokenizer.eos_token_id,
164
- pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
165
  ]
166
 
167
  outputs = pipeline(
@@ -169,8 +169,10 @@ outputs = pipeline(
169
  max_new_tokens=256,
170
  eos_token_id=terminators,
171
  do_sample=True,
172
- temperature=0.6,
173
- top_p=0.9,
 
 
174
  )
175
  print(outputs[0]["generated_text"][len(prompt):])
176
  ```
@@ -181,7 +183,7 @@ print(outputs[0]["generated_text"][len(prompt):])
181
  from transformers import AutoTokenizer, AutoModelForCausalLM
182
  import torch
183
 
184
- model_id = "HPAI-BSC/Qwen2.5-Aloe-Beta-7B"
185
 
186
  tokenizer = AutoTokenizer.from_pretrained(model_id)
187
  model = AutoModelForCausalLM.from_pretrained(
@@ -203,7 +205,7 @@ input_ids = tokenizer.apply_chat_template(
203
 
204
  terminators = [
205
  tokenizer.eos_token_id,
206
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
207
  ]
208
 
209
  outputs = model.generate(
@@ -211,8 +213,10 @@ outputs = model.generate(
211
  max_new_tokens=256,
212
  eos_token_id=terminators,
213
  do_sample=True,
214
- temperature=0.6,
215
- top_p=0.9,
 
 
216
  )
217
  response = outputs[0][input_ids.shape[-1]:]
218
  print(tokenizer.decode(response, skip_special_tokens=True))
 
139
  import transformers
140
  import torch
141
 
142
+ model_id = "HPAI-BSC/Qwen2.5-Aloe-Beta-72B"
143
 
144
  pipeline = transformers.pipeline(
145
  "text-generation",
 
161
 
162
  terminators = [
163
  pipeline.tokenizer.eos_token_id,
164
+ pipeline.tokenizer.convert_tokens_to_ids("<|im_end|>")
165
  ]
166
 
167
  outputs = pipeline(
 
169
  max_new_tokens=256,
170
  eos_token_id=terminators,
171
  do_sample=True,
172
+ temperature=0.7,
173
+ top_p=0.8,
174
+ top_k=20,
175
+ repetition_penalty=1.05
176
  )
177
  print(outputs[0]["generated_text"][len(prompt):])
178
  ```
 
183
  from transformers import AutoTokenizer, AutoModelForCausalLM
184
  import torch
185
 
186
+ model_id = "HPAI-BSC/Qwen2.5-Aloe-Beta-72B"
187
 
188
  tokenizer = AutoTokenizer.from_pretrained(model_id)
189
  model = AutoModelForCausalLM.from_pretrained(
 
205
 
206
  terminators = [
207
  tokenizer.eos_token_id,
208
+ tokenizer.convert_tokens_to_ids("<|im_end|>")
209
  ]
210
 
211
  outputs = model.generate(
 
213
  max_new_tokens=256,
214
  eos_token_id=terminators,
215
  do_sample=True,
216
+ temperature=0.7,
217
+ top_p=0.8,
218
+ top_k=20,
219
+ repetition_penalty=1.05
220
  )
221
  response = outputs[0][input_ids.shape[-1]:]
222
  print(tokenizer.decode(response, skip_special_tokens=True))