Update README.md
Browse files
README.md
CHANGED
@@ -1,62 +1,61 @@
|
|
1 |
-
---
|
2 |
-
license: llama3
|
3 |
-
---
|
4 |
-
|
5 |
-
- Foundation Model [Bllossom 8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B)
|
6 |
-
- datasets
|
7 |
-
- [
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
import
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
tokenizer =
|
21 |
-
tokenizer.
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
pipe.tokenizer.
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
</pre>
|
|
|
1 |
+
---
|
2 |
+
license: llama3
|
3 |
+
---
|
4 |
+
|
5 |
+
- Foundation Model [Bllossom 8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B)
|
6 |
+
- datasets
|
7 |
+
- [jojo0217/korean_safe_conversation](https://huggingface.co/datasets/jojo0217/korean_safe_conversation)
|
8 |
+
|
9 |
+
# Query
|
10 |
+
```python
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
14 |
+
|
15 |
+
BASE_MODEL = "sh2orc/llama-3-korean-8b"
|
16 |
+
|
17 |
+
model = AutoModelForCausalLM.from_pretrained(BASE_MODEL,device_map="auto")
|
18 |
+
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
|
20 |
+
tokenizer.pad_token = tokenizer.eos_token
|
21 |
+
tokenizer.padding_side = 'right'
|
22 |
+
|
23 |
+
instruction = 'ν λΆ κ²°μ λν΄μ μ€λͺ
ν΄μ€'
|
24 |
+
|
25 |
+
pipe = pipeline("text-generation",
|
26 |
+
model=model,
|
27 |
+
tokenizer=tokenizer,
|
28 |
+
max_new_tokens=1024)
|
29 |
+
|
30 |
+
messages = [
|
31 |
+
{"role": "user", "content": instruction},
|
32 |
+
]
|
33 |
+
|
34 |
+
prompt = pipe.tokenizer.apply_chat_template(
|
35 |
+
messages,
|
36 |
+
tokenize=False,
|
37 |
+
add_generation_prompt=True
|
38 |
+
)
|
39 |
+
|
40 |
+
outputs = pipe(
|
41 |
+
prompt,
|
42 |
+
do_sample=True,
|
43 |
+
temperature=0.8,
|
44 |
+
top_k=10,
|
45 |
+
top_p=0.9,
|
46 |
+
add_special_tokens=True,
|
47 |
+
eos_token_id = [
|
48 |
+
pipe.tokenizer.eos_token_id,
|
49 |
+
pipe.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
50 |
+
]
|
51 |
+
)
|
52 |
+
|
53 |
+
print(outputs[0]['generated_text'][len(prompt):])
|
54 |
+
```
|
55 |
+
|
56 |
+
# Result
|
57 |
+
<pre>
|
58 |
+
|
59 |
+
ν λΆ κ²°μ λ λμΆκΈμ μΌμ κΈ°κ° λμ λλ μ μννλ κ²μ
λλ€. μλ₯Ό λ€μ΄, 100λ§μμ λμΆλ°κ³ 10λ§μμ© 10λ² κ±Έμ³μ μνν μ μμ΅λλ€. μ΄λ, 첫 λ²μ§Έ μν μμ 10λ§μμ μ§λΆνλ©΄ 90λ§μμ΄ λ¨μ΅λλ€. κ·Έλ¦¬κ³ λ λ²μ§Έ μν μμ 10λ¬λ¬λ₯Ό μ§λΆνλ©΄ 80λ§μμ΄ λ¨κ² λ©λλ€. μ΄λ° μμΌλ‘ 10λ§μμ©λ©΄μ 10λ² λ μ§λΆνλ©΄ λμΆκΈ μ μ‘μ μ§λΆνκ² λ©λλ€.
|
60 |
+
|
61 |
+
</pre>
|
|