krish-emissary commited on
Commit
d187254
·
verified ·
1 Parent(s): fccf7d5

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -1,35 +1,2 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.safetensors filter=lfs diff=lfs merge=lfs -text
2
+ *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 8192,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 28672,
14
+ "max_position_embeddings": 4096,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 64,
18
+ "num_hidden_layers": 80,
19
+ "num_key_value_heads": 8,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.55.0",
27
+ "use_cache": true,
28
+ "vocab_size": 32016
29
+ }
.ipynb_checkpoints/upload-checkpoint.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi
2
+ from concurrent.futures import ThreadPoolExecutor
3
+ import os
4
+
5
+ api = HfApi()
6
+ files = [f"model-000{i:02d}-of-00029.safetensors" for i in range(7, 30)]
7
+
8
+ def upload_file(filepath):
9
+ return api.upload_file(
10
+ path_or_fileobj=filepath,
11
+ path_in_repo=filepath,
12
+ repo_id="emissary-ai/Python-Tab-Completion-CodeLlama-70b",
13
+ repo_type="model"
14
+ )
15
+
16
+ with ThreadPoolExecutor(max_workers=50) as executor:
17
+ executor.map(upload_file, files)
tokenizer_config.json CHANGED
@@ -40,4 +40,4 @@
40
  "tokenizer_class": "LlamaTokenizer",
41
  "unk_token": "<unk>",
42
  "use_default_system_prompt": false
43
- }
 
40
  "tokenizer_class": "LlamaTokenizer",
41
  "unk_token": "<unk>",
42
  "use_default_system_prompt": false
43
+ }
upload.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi
2
+ from concurrent.futures import ThreadPoolExecutor
3
+ import os
4
+
5
+ api = HfApi()
6
+ files = [f"model-000{i:02d}-of-00029.safetensors" for i in range(7, 30)]
7
+
8
+ def upload_file(filepath):
9
+ return api.upload_file(
10
+ path_or_fileobj=filepath,
11
+ path_in_repo=filepath,
12
+ repo_id="emissary-ai/Python-Tab-Completion-CodeLlama-70b",
13
+ repo_type="model"
14
+ )
15
+
16
+ with ThreadPoolExecutor(max_workers=50) as executor:
17
+ executor.map(upload_file, files)