sharpenb commited on
Commit
63ecd10
·
verified ·
1 Parent(s): 341e6cd

Upload folder using huggingface_hub (#2)

Browse files

- cc5946cd572e2fca7f19aecbce4ed6fafd97e381361469375dfc326a5d0cf85d (1933951869c65b2a8827636bdf90815734c6ef86)

Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmplry4ywexorrdfclk",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTNeoForCausalLM"
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpoa89dlwnedw9b2j8",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTNeoForCausalLM"
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f7391f1b072f6beccee06214d2533e5f57a326659f9c9bbfd4fe83e068679ff
3
  size 165826272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48bef7ebfa67f2fdb2560665548b6579dd8e4901d4957945341e561d30bfb23
3
  size 165826272
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmplry4ywex",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpoa89dlwn",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}