bytestorm commited on
Commit
987a840
·
verified ·
1 Parent(s): 994bd13

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - llama-2
5
+ - lora
6
+ - ranking
7
+ license: apache-2.0
8
+ ---
9
+
10
+ # bytestorm/SKIM-orcas-kdd25
11
+
12
+ This is a LoRA-tuned checkpoint of Llama-2-7b for ranking tasks.
13
+
14
+ ## Model Details
15
+
16
+ - **Base Model:** Llama-2-7b
17
+ - **Training Type:** LoRA fine-tuning
18
+ - **Task:** Ranking/Retrieval
19
+ - **Framework:** PyTorch
20
+
21
+ ## Usage
22
+
23
+ ```python
24
+ from transformers import AutoModelForCausalLM, AutoTokenizer
25
+
26
+ model = AutoModelForCausalLM.from_pretrained("bytestorm/SKIM-orcas-kdd25")
27
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
28
+ ```
config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "model_type": "llama",
6
+ "torch_dtype": "float16",
7
+ "transformers_version": "4.36.0",
8
+ "use_cache": true,
9
+ "_name_or_path": "meta-llama/Llama-2-7b-hf"
10
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": null,
4
+ "bos_token_id": 1,
5
+ "eos_token_id": 2,
6
+ "transformers_version": "4.36.0"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43ce4b4d57c45c98359f92e7be3c1e952db567ae806f47b565b85cc0cec4bd03
3
+ size 13476919178
tokenizer_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "meta-llama/Llama-2-7b-hf",
3
+ "pad_token": null,
4
+ "bos_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "unk_token": "<unk>",
7
+ "clean_up_tokenization_spaces": true
8
+ }