Divyasreepat commited on
Commit
f5c9a12
·
verified ·
1 Parent(s): e131ea5

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-hub
3
+ ---
4
+ This is a [`Llama` model](https://keras.io/api/keras_hub/models/llama) uploaded using the KerasHub library and can be used with JAX, TensorFlow, and PyTorch backends.
5
+ Model config:
6
+ * **name:** llama_backbone_1
7
+ * **trainable:** True
8
+ * **vocabulary_size:** 32000
9
+ * **num_layers:** 32
10
+ * **num_query_heads:** 32
11
+ * **hidden_dim:** 4096
12
+ * **intermediate_dim:** 11008
13
+ * **rope_max_wavelength:** 10000.0
14
+ * **rope_scaling_factor:** 1.0
15
+ * **num_key_value_heads:** 32
16
+ * **layer_norm_epsilon:** 1e-05
17
+ * **dropout:** 0
18
+
19
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/vocabulary.spm ADDED
Binary file (500 kB). View file
 
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.models.llama.llama_backbone",
3
+ "class_name": "LlamaBackbone",
4
+ "config": {
5
+ "name": "llama_backbone_1",
6
+ "trainable": true,
7
+ "vocabulary_size": 32000,
8
+ "num_layers": 32,
9
+ "num_query_heads": 32,
10
+ "hidden_dim": 4096,
11
+ "intermediate_dim": 11008,
12
+ "rope_max_wavelength": 10000.0,
13
+ "rope_scaling_factor": 1.0,
14
+ "num_key_value_heads": 32,
15
+ "layer_norm_epsilon": 1e-05,
16
+ "dropout": 0
17
+ },
18
+ "registered_name": "keras_nlp>LlamaBackbone",
19
+ "assets": [],
20
+ "weights": "model.weights.h5"
21
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.0.5",
3
+ "keras_nlp_version": "0.9.0",
4
+ "parameter_count": 6738415616,
5
+ "date_saved": "2024-03-27@19:03:39"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a021af2d1d0ea2a69b85347bfd3d1606f5cd5ed584dd125d4977834452c611ff
3
+ size 13477864216
tokenizer.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_nlp.models.llama.llama_tokenizer",
3
+ "class_name": "LlamaTokenizer",
4
+ "config": {
5
+ "name": "llama_tokenizer",
6
+ "trainable": true,
7
+ "dtype": "int32",
8
+ "proto": null,
9
+ "sequence_length": null
10
+ },
11
+ "registered_name": "keras_nlp>LlamaTokenizer",
12
+ "assets": [
13
+ "assets/tokenizer/vocabulary.spm"
14
+ ],
15
+ "weights": null
16
+ }