Update README.md
Browse files
README.md
CHANGED
@@ -21,6 +21,7 @@ This llama model was trained 2x faster with [Unsloth](https://github.com/unsloth
|
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
23 |
|
|
|
24 |
|
25 |
#Google Colabでの動作を想定
|
26 |
|
@@ -89,4 +90,6 @@ for dt in tqdm(datasets):
|
|
89 |
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
|
90 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
91 |
|
92 |
-
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
|
|
|
|
|
21 |
|
22 |
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
|
23 |
|
24 |
+
---
|
25 |
|
26 |
#Google Colabでの動作を想定
|
27 |
|
|
|
90 |
outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
|
91 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
|
92 |
|
93 |
+
results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
|
94 |
+
|
95 |
+
---
|