adamo1139 commited on
Commit
472c60b
·
verified ·
1 Parent(s): eb6343c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +30 -3
README.md CHANGED
@@ -1,3 +1,30 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ Creation script
5
+
6
+ ```python
7
+ from llmcompressor.transformers import SparseAutoModelForCausalLM
8
+ from transformers import AutoTokenizer
9
+ from llmcompressor.transformers import oneshot
10
+ from llmcompressor.modifiers.quantization import QuantizationModifier
11
+
12
+
13
+ MODEL_ID = "teknium/OpenHermes-2.5-Mistral-7B"
14
+
15
+ model = SparseAutoModelForCausalLM.from_pretrained(
16
+ MODEL_ID, device_map="auto", torch_dtype="auto")
17
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
18
+
19
+ # Configure the simple PTQ quantization
20
+ recipe = QuantizationModifier(
21
+ targets="Linear", scheme="FP8_DYNAMIC", ignore=["lm_head"])
22
+
23
+ # Apply the quantization algorithm.
24
+ oneshot(model=model, recipe=recipe)
25
+
26
+ # Save the model.
27
+ SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic"
28
+ model.save_pretrained(SAVE_DIR)
29
+ tokenizer.save_pretrained(SAVE_DIR)
30
+ ```