sharpenb commited on
Commit
1f6c927
·
verified ·
1 Parent(s): 90b0efc

Upload folder using huggingface_hub (#2)

Browse files

- 6e982bd73ca2d59bfa4bbf1cb96f46a03a408e85855c4dd007bc7dc9d4188ef0 (f8782b1af0eeb657be95348b5427870a39c83e55)
- e3fab9c5f99a3144ea79c70f1e2a6858ed3e0e056d34cca26497f0708aec6f07 (54be2b0dc7d298e6b8c8e2a8dd5233ccc9ec35b6)
- 4d31e9a038c1a23e7162ea081bc02f611aa301021070fa7bdafdb39d4c7afff6 (cd4ae3e4463cfa9daba866bbe67444865af06749)

config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/codegemma-7b",
3
  "architectures": [
4
  "GemmaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "google/codegemma-7b-it",
3
  "architectures": [
4
  "GemmaForCausalLM"
5
  ],
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9aa1cd129682ffdc5068ea33bc1e502edd94d522afcd153277cf24b416e82ad6
3
  size 4948559144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c23880642fd4c3f4de0390f7d61f5275c877cb204e18b4e2676c9c7675ba96c
3
  size 4948559144
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cb28005518ef2de3aa48f574ff1067e36eac832370ba144a60f505c2b73833d
3
  size 4383347016
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd090e13ab421c1efd4083d9c52febd3e817ee26b0b5e2a4e2dfe305be86882c
3
  size 4383347016
tokenizer_config.json CHANGED
@@ -1740,6 +1740,7 @@
1740
  }
1741
  },
1742
  "bos_token": "<bos>",
 
1743
  "clean_up_tokenization_spaces": false,
1744
  "eos_token": "<eos>",
1745
  "model_max_length": 1000000000000000019884624838656,
 
1740
  }
1741
  },
1742
  "bos_token": "<bos>",
1743
+ "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
1744
  "clean_up_tokenization_spaces": false,
1745
  "eos_token": "<eos>",
1746
  "model_max_length": 1000000000000000019884624838656,