DavidGF commited on
Commit
a725e00
1 Parent(s): cf015ac

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -58,7 +58,7 @@ tokenizer = model.tokenizer
58
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
59
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
60
  output_ids = model.generate(input_ids, max_length=250)
61
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
62
  ```
63
 
64
 
@@ -96,7 +96,7 @@ tokenizer = model.tokenizer
96
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
97
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
98
  output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=500)
99
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
100
  ```
101
 
102
  # Call the Python LoRA-Expert:
@@ -112,7 +112,7 @@ input_text = tokenizer.apply_chat_template(messages, tokenize=False)
112
  print(input_text)
113
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
114
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=400)
115
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
116
  ```
117
 
118
  # Call the SQL LoRA-expert:
@@ -129,7 +129,7 @@ tokenizer = model.tokenizer
129
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
130
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
131
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
132
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
133
  ```
134
 
135
  # Call the German LoRA-expert:
@@ -143,7 +143,7 @@ tokenizer = model.tokenizer
143
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
144
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
145
  output_ids = model.generate(input_ids, max_length=150)
146
- print(model.expert_tokenizer(text=input_text).decode(output_ids[0], skip_special_tokens=True))
147
  ```
148
 
149
 
 
58
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
59
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
60
  output_ids = model.generate(input_ids, max_length=250)
61
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
62
  ```
63
 
64
 
 
96
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
97
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
98
  output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=500)
99
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
100
  ```
101
 
102
  # Call the Python LoRA-Expert:
 
112
  print(input_text)
113
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
114
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=400)
115
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
116
  ```
117
 
118
  # Call the SQL LoRA-expert:
 
129
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
130
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
131
  output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
132
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
133
  ```
134
 
135
  # Call the German LoRA-expert:
 
143
  input_text = tokenizer.apply_chat_template(messages, tokenize=False)
144
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
145
  output_ids = model.generate(input_ids, max_length=150)
146
+ print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
147
  ```
148
 
149