Update README.md
Browse files
README.md
CHANGED
@@ -40,14 +40,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
|
|
40 |
model_id = "microsoft/Phi-4-mini-instruct"
|
41 |
|
42 |
from torchao.quantization import Int4WeightOnlyConfig
|
43 |
-
quant_config = Int4WeightOnlyConfig(group_size=128)
|
44 |
quantization_config = TorchAoConfig(quant_type=quant_config)
|
45 |
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config)
|
46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
47 |
|
48 |
# Push to hub
|
49 |
USER_ID = "YOUR_USER_ID"
|
50 |
-
save_to = f"{USER_ID}/{model_id}-int4wo"
|
51 |
quantized_model.push_to_hub(save_to, safe_serialization=False)
|
52 |
tokenizer.push_to_hub(save_to)
|
53 |
|
|
|
40 |
model_id = "microsoft/Phi-4-mini-instruct"
|
41 |
|
42 |
from torchao.quantization import Int4WeightOnlyConfig
|
43 |
+
quant_config = Int4WeightOnlyConfig(group_size=128, use_hqq=True)
|
44 |
quantization_config = TorchAoConfig(quant_type=quant_config)
|
45 |
quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config)
|
46 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
47 |
|
48 |
# Push to hub
|
49 |
USER_ID = "YOUR_USER_ID"
|
50 |
+
save_to = f"{USER_ID}/{model_id}-int4wo-hqq"
|
51 |
quantized_model.push_to_hub(save_to, safe_serialization=False)
|
52 |
tokenizer.push_to_hub(save_to)
|
53 |
|