Upload MBartForConditionalGeneration
Browse files- config.json +1 -3
- pytorch_model.bin +1 -1
config.json
CHANGED
@@ -53,9 +53,7 @@
|
|
53 |
"bits": 4,
|
54 |
"block_name_to_quantize": "model.decoder.layers",
|
55 |
"damp_percent": 0.1,
|
56 |
-
"dataset":
|
57 |
-
"auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
|
58 |
-
],
|
59 |
"desc_act": false,
|
60 |
"disable_exllama": true,
|
61 |
"group_size": 128,
|
|
|
53 |
"bits": 4,
|
54 |
"block_name_to_quantize": "model.decoder.layers",
|
55 |
"damp_percent": 0.1,
|
56 |
+
"dataset": "c4",
|
|
|
|
|
57 |
"desc_act": false,
|
58 |
"disable_exllama": true,
|
59 |
"group_size": 128,
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1949361861
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bdee49d2ad1fcdcaa1656d7db7490c5c5fc08027158da26ab7479ced55481680
|
3 |
size 1949361861
|