Update README.md
Browse files
README.md
CHANGED
@@ -29,19 +29,16 @@ from peft import PeftModel
|
|
29 |
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
|
30 |
|
31 |
|
32 |
-
|
33 |
-
tokenizer = LlamaTokenizer.from_pretrained(base_model)
|
34 |
model = LlamaForCausalLM.from_pretrained(
|
35 |
-
|
36 |
load_in_8bit=True,
|
37 |
torch_dtype=torch.float16,
|
38 |
device_map="auto"
|
39 |
-
max_memory=max_memory
|
40 |
)
|
41 |
model = PeftModel.from_pretrained(
|
42 |
model,
|
43 |
-
|
44 |
-
torch_dtype=torch.float16
|
45 |
-
max_memory=max_memory
|
46 |
)
|
47 |
```
|
|
|
29 |
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
|
30 |
|
31 |
|
32 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
|
|
33 |
model = LlamaForCausalLM.from_pretrained(
|
34 |
+
"decapoda-research/llama-7b-hf",
|
35 |
load_in_8bit=True,
|
36 |
torch_dtype=torch.float16,
|
37 |
device_map="auto"
|
|
|
38 |
)
|
39 |
model = PeftModel.from_pretrained(
|
40 |
model,
|
41 |
+
"DataAgent/llama-7b-alpaca-zh-20k",
|
42 |
+
torch_dtype=torch.float16
|
|
|
43 |
)
|
44 |
```
|