Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model:
|
3 |
+
- meta-llama/Llama-3.2-1B-Instruct
|
4 |
+
---
|
5 |
+
|
6 |
+
Llama 3.2 (1B) Instruct quantized using SparseGPT (4-bit)
|
7 |
+
|
8 |
+
```
|
9 |
+
import torch
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
+
|
12 |
+
model_id = "Almheiri/Llama-3.2-1B-Instruct-SparseGPT-INT4"
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto", device_map="auto")
|
15 |
+
|
16 |
+
prompt = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant, that responds as a pirate."},
|
18 |
+
{"role": "user", "content": "What's Deep Learning?"},
|
19 |
+
]
|
20 |
+
inputs = tokenizer.apply_chat_template(
|
21 |
+
prompt,
|
22 |
+
tokenize=True,
|
23 |
+
add_generation_prompt=True,
|
24 |
+
return_tensors="pt",
|
25 |
+
return_dict=True,
|
26 |
+
).to("cuda")
|
27 |
+
|
28 |
+
outputs = model.generate(**inputs, do_sample=True, max_new_tokens=256)
|
29 |
+
print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0].split("assistant")[-1])
|
30 |
+
|
31 |
+
```
|