lunarflu HF staff commited on
Commit
4a88289
·
verified ·
1 Parent(s): 66793d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -6,10 +6,11 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
6
  # Load the model and tokenizer
7
  tokenizer = AutoTokenizer.from_pretrained("TheBloke/Chronoboros-33B-GPTQ")
8
  model = AutoModelForCausalLM.from_pretrained("TheBloke/Chronoboros-33B-GPTQ", device_map="auto")
9
- model.eval() # set model to evaluation mode
10
 
11
- # Optional: Use torch.compile() if you're on PyTorch 2.0+ for further speed-up
12
- # model = torch.compile(model)
 
 
13
 
14
  @spaces.GPU
15
  def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
@@ -33,12 +34,13 @@ def respond(message, history: list[tuple[str, str]], system_message, max_tokens,
33
  temperature=temperature,
34
  top_p=top_p,
35
  do_sample=True,
 
36
  )
37
 
38
- # Extract the new tokens
39
  new_tokens = output_ids[0][input_ids.shape[1]:]
40
 
41
- # Stream output in chunks (e.g., 5 tokens per chunk)
42
  chunk_size = 5
43
  for i in range(0, new_tokens.shape[0], chunk_size):
44
  current_response = tokenizer.decode(new_tokens[: i + chunk_size], skip_special_tokens=True)
 
6
  # Load the model and tokenizer
7
  tokenizer = AutoTokenizer.from_pretrained("TheBloke/Chronoboros-33B-GPTQ")
8
  model = AutoModelForCausalLM.from_pretrained("TheBloke/Chronoboros-33B-GPTQ", device_map="auto")
 
9
 
10
+ # Set a valid pad_token_id to avoid generation errors
11
+ model.generation_config.pad_token_id = tokenizer.eos_token_id
12
+
13
+ model.eval() # Ensure the model is in evaluation mode
14
 
15
  @spaces.GPU
16
  def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
 
34
  temperature=temperature,
35
  top_p=top_p,
36
  do_sample=True,
37
+ pad_token_id=tokenizer.eos_token_id, # also pass it here to be safe
38
  )
39
 
40
+ # Extract the new tokens (tokens generated after the prompt)
41
  new_tokens = output_ids[0][input_ids.shape[1]:]
42
 
43
+ # Stream output in chunks (here yielding every 5 tokens)
44
  chunk_size = 5
45
  for i in range(0, new_tokens.shape[0], chunk_size):
46
  current_response = tokenizer.decode(new_tokens[: i + chunk_size], skip_special_tokens=True)