zmbfeng commited on
Commit
ce249e6
·
verified ·
1 Parent(s): bebb895

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -44,7 +44,7 @@ def create_response_original(input_str,
44
  input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
45
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
46
  output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
47
- outputs = []]
48
  for output_id in output_ids:
49
  output = tokenizer.decode(output_id, skip_special_tokens=True)
50
  outputs.append(output)
 
44
  input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
45
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
46
  output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
47
+ outputs = []
48
  for output_id in output_ids:
49
  output = tokenizer.decode(output_id, skip_special_tokens=True)
50
  outputs.append(output)