Upload folder using huggingface_hub

#2
by sharpenb - opened
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmp1y8dm77tmmk3rdbh",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpc2mq9brc3w_v0eyt",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:253e3df8d72fbb7c6270e2e3675452ed44504bf0fc0f843bc58c87a8e661faf0
3
  size 4996772687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66d537ce1c300dddc10645b8d2e38bfd67f592ca900051a7be46103716b76cea
3
  size 4996772687
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbf52f0fffe48285ef3ade7dc63ee3b047797fc110b7f89ecd931b235c559144
3
  size 4090014257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d72f200dc6c6be06d6330da4e82f7d7f6710697c6ed99287bc396f610202dbc
3
  size 4090014257
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmp1y8dm77t",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpc2mq9brc",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}