buesst1 commited on
Commit
d8f5385
·
1 Parent(s): b01a5d6

Update of model components from run

Browse files

Link to WandB run:
https://wandb.ai/t_buess/chatbot-qa/runs/ub5vv7gy

adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "flozi00/Llama-2-13b-german-assistant-v7",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39f2eab86876842e52a22b6cb63202e4d73082b23b3c8827adfa50dfe420764a
3
  size 39343112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2934dfbf73006ee7decef3c4114c0af23f357da5173526403c1cab68a7aefb2
3
  size 39343112
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "meta-llama/Llama-2-13b-hf",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -15,6 +15,7 @@
15
  "num_attention_heads": 40,
16
  "num_hidden_layers": 40,
17
  "num_key_value_heads": 40,
 
18
  "pretraining_tp": 1,
19
  "quantization_config": {
20
  "bnb_4bit_compute_dtype": "bfloat16",
@@ -35,5 +36,5 @@
35
  "torch_dtype": "float16",
36
  "transformers_version": "4.34.0",
37
  "use_cache": true,
38
- "vocab_size": 32000
39
  }
 
1
  {
2
+ "_name_or_path": "flozi00/Llama-2-13b-german-assistant-v7",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
15
  "num_attention_heads": 40,
16
  "num_hidden_layers": 40,
17
  "num_key_value_heads": 40,
18
+ "pad_token_id": 0,
19
  "pretraining_tp": 1,
20
  "quantization_config": {
21
  "bnb_4bit_compute_dtype": "bfloat16",
 
36
  "torch_dtype": "float16",
37
  "transformers_version": "4.34.0",
38
  "use_cache": true,
39
+ "vocab_size": 37632
40
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
- size 499723
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f440c53d2cc6f14a7ed7124dea5f5a7402fb4fc95bccb5d8be6d0f7e74d327ed
3
+ size 568229
tokenizer_config.json CHANGED
@@ -1,9 +1,11 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
5
  "lstrip": false,
6
- "normalized": false,
7
  "rstrip": false,
8
  "single_word": false,
9
  "special": true
@@ -11,7 +13,7 @@
11
  "1": {
12
  "content": "<s>",
13
  "lstrip": false,
14
- "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
@@ -19,7 +21,7 @@
19
  "2": {
20
  "content": "</s>",
21
  "lstrip": false,
22
- "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
@@ -32,8 +34,8 @@
32
  "legacy": false,
33
  "model_max_length": 1000000000000000019884624838656,
34
  "pad_token": "</p>",
35
- "padding_side": "right",
36
  "sp_model_kwargs": {},
 
37
  "tokenizer_class": "LlamaTokenizer",
38
  "unk_token": "<unk>",
39
  "use_default_system_prompt": true
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
7
  "lstrip": false,
8
+ "normalized": true,
9
  "rstrip": false,
10
  "single_word": false,
11
  "special": true
 
13
  "1": {
14
  "content": "<s>",
15
  "lstrip": false,
16
+ "normalized": true,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
 
21
  "2": {
22
  "content": "</s>",
23
  "lstrip": false,
24
+ "normalized": true,
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
 
34
  "legacy": false,
35
  "model_max_length": 1000000000000000019884624838656,
36
  "pad_token": "</p>",
 
37
  "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
  "tokenizer_class": "LlamaTokenizer",
40
  "unk_token": "<unk>",
41
  "use_default_system_prompt": true