Fix tokenizer.eos_token_id
Browse files
app.py
CHANGED
@@ -47,7 +47,7 @@ def predict(message, history, system_prompt, temperature, max_tokens):
|
|
47 |
temperature=temperature,
|
48 |
max_new_tokens=DEFAULT_MAX_NEW_TOKENS,
|
49 |
use_cache=True,
|
50 |
-
eos_token_id=
|
51 |
)
|
52 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
53 |
t.start()
|
@@ -61,7 +61,6 @@ def predict(message, history, system_prompt, temperature, max_tokens):
|
|
61 |
if __name__ == "__main__":
|
62 |
args = parse_args()
|
63 |
tokenizer = AutoTokenizer.from_pretrained("lliu01/fortios_cli")
|
64 |
-
tokenizer = AutoTokenizer.from_pretrained("lliu01/fortios_cli")
|
65 |
model = AutoModelForCausalLM.from_pretrained(
|
66 |
"lliu01/fortios_cli",
|
67 |
torch_dtype=torch.bfloat16,
|
|
|
47 |
temperature=temperature,
|
48 |
max_new_tokens=DEFAULT_MAX_NEW_TOKENS,
|
49 |
use_cache=True,
|
50 |
+
eos_token_id=tokenizer.eos_token_id # <|im_end|>
|
51 |
)
|
52 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
53 |
t.start()
|
|
|
61 |
if __name__ == "__main__":
|
62 |
args = parse_args()
|
63 |
tokenizer = AutoTokenizer.from_pretrained("lliu01/fortios_cli")
|
|
|
64 |
model = AutoModelForCausalLM.from_pretrained(
|
65 |
"lliu01/fortios_cli",
|
66 |
torch_dtype=torch.bfloat16,
|