Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,50 @@
|
|
1 |
-
---
|
2 |
-
license: llama3
|
3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: llama3
|
3 |
+
---
|
4 |
+
|
5 |
+
|
6 |
+
## how to use
|
7 |
+
|
8 |
+
```python
|
9 |
+
import torch
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
+
|
12 |
+
DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。"
|
13 |
+
text = "優秀なAIとはなんですか? またあなたの考える優秀なAIに重要なポイントを5つ挙げて下さい。"
|
14 |
+
|
15 |
+
model_name = "TeamDelta/llama3-8B-test"
|
16 |
+
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
18 |
+
model = AutoModelForCausalLM.from_pretrained(
|
19 |
+
model_name,
|
20 |
+
torch_dtype="auto",
|
21 |
+
device_map="auto",
|
22 |
+
)
|
23 |
+
model.eval()
|
24 |
+
|
25 |
+
messages = [
|
26 |
+
{"role": "system", "content": DEFAULT_SYSTEM_PROMPT},
|
27 |
+
{"role": "user", "content": text},
|
28 |
+
]
|
29 |
+
prompt = tokenizer.apply_chat_template(
|
30 |
+
messages,
|
31 |
+
tokenize=False,
|
32 |
+
add_generation_prompt=True
|
33 |
+
)
|
34 |
+
token_ids = tokenizer.encode(
|
35 |
+
prompt, add_special_tokens=False, return_tensors="pt"
|
36 |
+
)
|
37 |
+
|
38 |
+
with torch.no_grad():
|
39 |
+
output_ids = model.generate(
|
40 |
+
token_ids.to(model.device),
|
41 |
+
max_new_tokens=1200,
|
42 |
+
do_sample=True,
|
43 |
+
temperature=0.6,
|
44 |
+
top_p=0.9,
|
45 |
+
)
|
46 |
+
output = tokenizer.decode(
|
47 |
+
output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True
|
48 |
+
)
|
49 |
+
print(output)
|
50 |
+
```
|