Update README.md
Browse files
README.md
CHANGED
@@ -10,14 +10,14 @@ datasets:
|
|
10 |
import torch
|
11 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
12 |
|
13 |
-
device = "cuda:
|
14 |
|
15 |
model_id = "maywell/Synatra-7B-v0.3-Translation"
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=model_revision)
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_id, revision=model_revision, device_map=device, torch_dtype=torch.float16).eval()
|
18 |
|
19 |
# LoRA 어댑터 불러오기
|
20 |
-
model.load_adapter("heegyu/Synatra-7B-v0.3-Translation-glaive"
|
21 |
|
22 |
|
23 |
def generate(prompt, *messages):
|
|
|
10 |
import torch
|
11 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
12 |
|
13 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
model_id = "maywell/Synatra-7B-v0.3-Translation"
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=model_revision)
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_id, revision=model_revision, device_map=device, torch_dtype=torch.float16).eval()
|
18 |
|
19 |
# LoRA 어댑터 불러오기
|
20 |
+
model.load_adapter("heegyu/Synatra-7B-v0.3-Translation-glaive")
|
21 |
|
22 |
|
23 |
def generate(prompt, *messages):
|