Upload folder using huggingface_hub (#5)
Browse files- 2c11922be9ad383e5dc272dfb3e3cea22860f979bea1ff804676ef5cfdeb9209 (a9f25ab515f7e1395b2cbc36854be8c024c895f0)
- config.json +1 -1
- smash_config.json +1 -1
config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
-
"_name_or_path": "/tmp/models/
|
4 |
"activation_function": "gelu_new",
|
5 |
"architectures": [
|
6 |
"GPT2LMHeadModel"
|
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
+
"_name_or_path": "/tmp/models/tmpjid4erhu/tmpkn8zhsmc",
|
4 |
"activation_function": "gelu_new",
|
5 |
"architectures": [
|
6 |
"GPT2LMHeadModel"
|
smash_config.json
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
"quant_hqq_weight_bits": 8,
|
12 |
"max_batch_size": 1,
|
13 |
"device": "cuda",
|
14 |
-
"cache_dir": "/tmp/models/
|
15 |
"task": "",
|
16 |
"save_load_fn": "hqq",
|
17 |
"save_load_fn_args": {},
|
|
|
11 |
"quant_hqq_weight_bits": 8,
|
12 |
"max_batch_size": 1,
|
13 |
"device": "cuda",
|
14 |
+
"cache_dir": "/tmp/models/tmpjid4erhu",
|
15 |
"task": "",
|
16 |
"save_load_fn": "hqq",
|
17 |
"save_load_fn_args": {},
|