sharpenb commited on
Commit
e20b201
·
verified ·
1 Parent(s): 4687991

Upload folder using huggingface_hub (#9)

Browse files

- b2fb89ce8cec7827b61c1804549b4af41dc5fdb7e1f7b170decf4a5a4a725c9e (f1e84399b76d9c385c007c8b5fa2faf493ad89fd)
- 480c8d9b1c334387551e64e26e24d7e9f0bfbee5f5a22532adcb307e08ec6bd5 (af33719b3db8cf1f86e7ce820c12aa61c4282151)
- 0c8ad235131bccc5c445a3dd85077f2b3082106b01b1150720bd02d3d28703af (72e967f7c6e926f6c25b571b655c34492e47ed2f)

.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  models--distributed--optimized-gpt2-1b/blobs/84bd38785e135d735130cb4633155169b1046946034dff6dd9275ef96649e85a filter=lfs diff=lfs merge=lfs -text
37
  models--distributed--optimized-gpt2-1b/blobs/2525dfc6bc73a28af3c0e9c91ec6e51d2d06ed5043373d6f34559ff9de54c7ff filter=lfs diff=lfs merge=lfs -text
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  models--distributed--optimized-gpt2-1b/blobs/84bd38785e135d735130cb4633155169b1046946034dff6dd9275ef96649e85a filter=lfs diff=lfs merge=lfs -text
37
  models--distributed--optimized-gpt2-1b/blobs/2525dfc6bc73a28af3c0e9c91ec6e51d2d06ed5043373d6f34559ff9de54c7ff filter=lfs diff=lfs merge=lfs -text
38
+ models--distributed--optimized-gpt2-1b/blobs/f817092111231c54370459c20d8f6f94f3c6227f32eff7230c38aac95a9e74e7 filter=lfs diff=lfs merge=lfs -text
.locks/models--distributed--optimized-gpt2-1b/f817092111231c54370459c20d8f6f94f3c6227f32eff7230c38aac95a9e74e7.lock ADDED
File without changes
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmp5unhuka7ffg0pvj0",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTOptim"
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmprzpy46pucpek6h9q",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPTOptim"
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c1055227b3b7fef3dc30fd87ccb6d52f6cf001639b9936f7e9bcc70b35595b1
3
  size 1207575528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8fe8365e4b2d57207d05cc02eb618b3dc8b0a9b06ac4cc0b80fb4c7034bd1c
3
  size 1207575528
models--distributed--optimized-gpt2-1b/.no_exist/f5b59fe9dd690b435c6925363bfbd10f2c354a55/adapter_config.json ADDED
File without changes
models--distributed--optimized-gpt2-1b/blobs/f817092111231c54370459c20d8f6f94f3c6227f32eff7230c38aac95a9e74e7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f817092111231c54370459c20d8f6f94f3c6227f32eff7230c38aac95a9e74e7
3
+ size 4040701744
models--distributed--optimized-gpt2-1b/refs/main CHANGED
@@ -1 +1 @@
1
- 0f5f2c85309718087017f97adaabc63a05d6a493
 
1
+ f5b59fe9dd690b435c6925363bfbd10f2c354a55
models--distributed--optimized-gpt2-1b/snapshots/f5b59fe9dd690b435c6925363bfbd10f2c354a55/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distributed/optimized-gpt2-1b",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTOptim"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "distributed/optimized-gpt2-500m--configuration_gpt_optimized.GPTOptimConfig",
10
+ "AutoModelForCausalLM": "distributed/optimized-gpt2-500m--modeling_gpt_optimized.GPTOptim"
11
+ },
12
+ "block_size": 1024,
13
+ "bos_token_id": 50256,
14
+ "embd_pdrop": 0.1,
15
+ "eos_token_id": 50256,
16
+ "initializer_range": 0.02,
17
+ "layer_norm_epsilon": 1e-05,
18
+ "model_type": "gpt_optimized",
19
+ "n_embd": 1280,
20
+ "n_head": 32,
21
+ "n_inner": null,
22
+ "n_layer": 48,
23
+ "n_positions": 1024,
24
+ "reorder_and_upcast_attn": false,
25
+ "resid_pdrop": 0.1,
26
+ "scale_attn_by_inverse_layer_idx": false,
27
+ "scale_attn_weights": true,
28
+ "summary_activation": null,
29
+ "summary_first_dropout": 0.1,
30
+ "summary_proj_to_labels": true,
31
+ "summary_type": "cls_index",
32
+ "summary_use_proj": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.39.3",
35
+ "use_cache": true,
36
+ "vocab_size": 50257
37
+ }
models--distributed--optimized-gpt2-1b/snapshots/f5b59fe9dd690b435c6925363bfbd10f2c354a55/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f817092111231c54370459c20d8f6f94f3c6227f32eff7230c38aac95a9e74e7
3
+ size 4040701744
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmp5unhuka7",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmprzpy46pu",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {