Upload folder using huggingface_hub

#3
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpvbdylqmr9atxqox4",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmplmbeos846gp0qzq2",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae6ee1c6b4470100a855414089f6325565f16fa5fe7ddb01cd0ff28d61e8aeaf
3
  size 4996772687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c450c0cfaffe44b31d842dfeac14ad7ca7b56de0bbcd788110ad9f66627d78a
3
  size 4996772687
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b1073c6df39131599f060af541ba66202dbf04b9758452b06519417f88a251a
3
  size 4090014257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37a031abd3f6e20c15293ac0806631c538097cf36f8a5d6aff3a6ba1f5a5f144
3
  size 4090014257
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpvbdylqmr",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmplmbeos84",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}