mitsu-koh commited on
Commit
8bfa4bc
1 Parent(s): f5b620a
Files changed (1) hide show
  1. app.py +0 -3
app.py CHANGED
@@ -83,9 +83,6 @@ def chat_llama3_8b(message: str,
83
 
84
  # Need to set add_generation_prompt=True to ensure the model generates the response.
85
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
86
-
87
- print(input_ids)
88
- print(tokenizer.decode(input_ids.tolist()[0]))
89
 
90
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
91
 
 
83
 
84
  # Need to set add_generation_prompt=True to ensure the model generates the response.
85
  input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
 
 
 
86
 
87
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
88