Update generation_config.json
Browse filesPull in upstream second stop token.
Fixes issue where inference does not stop.
See upstream: https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct/blob/main/generation_config.json
- generation_config.json +1 -1
generation_config.json
CHANGED
@@ -2,6 +2,6 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 128000,
|
4 |
"do_sample": true,
|
5 |
-
"eos_token_id": 128001,
|
6 |
"transformers_version": "4.40.0"
|
7 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 128000,
|
4 |
"do_sample": true,
|
5 |
+
"eos_token_id": [128001, 128009],
|
6 |
"transformers_version": "4.40.0"
|
7 |
}
|