Update README.md
Browse files
README.md
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
license: apache-2.0
|
3 |
datasets:
|
4 |
- shibing624/alpaca-zh
|
|
|
5 |
language:
|
6 |
- zh
|
7 |
tags:
|
@@ -29,19 +30,16 @@ from peft import PeftModel
|
|
29 |
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
|
30 |
|
31 |
|
32 |
-
|
33 |
-
tokenizer = LlamaTokenizer.from_pretrained(base_model)
|
34 |
model = LlamaForCausalLM.from_pretrained(
|
35 |
-
|
36 |
load_in_8bit=True,
|
37 |
torch_dtype=torch.float16,
|
38 |
device_map="auto"
|
39 |
-
max_memory=max_memory
|
40 |
)
|
41 |
model = PeftModel.from_pretrained(
|
42 |
model,
|
43 |
-
|
44 |
-
torch_dtype=torch.float16
|
45 |
-
max_memory=max_memory
|
46 |
)
|
47 |
-
```
|
|
|
2 |
license: apache-2.0
|
3 |
datasets:
|
4 |
- shibing624/alpaca-zh
|
5 |
+
- yahma/alpaca-cleaned
|
6 |
language:
|
7 |
- zh
|
8 |
tags:
|
|
|
30 |
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
|
31 |
|
32 |
|
33 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
|
|
34 |
model = LlamaForCausalLM.from_pretrained(
|
35 |
+
"decapoda-research/llama-7b-hf",
|
36 |
load_in_8bit=True,
|
37 |
torch_dtype=torch.float16,
|
38 |
device_map="auto"
|
|
|
39 |
)
|
40 |
model = PeftModel.from_pretrained(
|
41 |
model,
|
42 |
+
"DataAgent/llama-7b-alpaca-zh-120k",
|
43 |
+
torch_dtype=torch.float16
|
|
|
44 |
)
|
45 |
+
```
|