crystal99 commited on
Commit
f0ffc11
·
verified ·
1 Parent(s): 2395396

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -38,7 +38,8 @@ model.to(device)
38
  def generate_text(prompt):
39
  # Prevent gradient calculation to speed up inference
40
  with torch.no_grad():
41
- inputs = tokenizer(f"<|STARTOFTEXT|> <|USER|> {prompt} <|BOT|>", return_tensors="pt").to(device)
 
42
  outputs = model.generate(inputs['input_ids'], max_length=100, num_return_sequences=1, do_sample=False)
43
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)
44
  result2 = generated_text.split("<|ENDOFTEXT|>")
 
38
  def generate_text(prompt):
39
  # Prevent gradient calculation to speed up inference
40
  with torch.no_grad():
41
+ # inputs = tokenizer(f"<|STARTOFTEXT|> <|USER|> {prompt} <|BOT|>", return_tensors="pt").to(device)
42
+ inputs = tokenizer(f"{prompt}", return_tensors="pt").to(device)
43
  outputs = model.generate(inputs['input_ids'], max_length=100, num_return_sequences=1, do_sample=False)
44
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=False)
45
  result2 = generated_text.split("<|ENDOFTEXT|>")