zmbfeng commited on
Commit
621db44
·
verified ·
1 Parent(s): a3a369c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -52,7 +52,7 @@ def create_response_untethered_paraphrased(input_str,
52
  attention_mask = encoded["attention_mask"]
53
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
54
  #output_ids = untethered_paraphrased_model.generate(input_ids,do_sample=do_sample, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
55
- output_ids = untethered_paraphrased_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=True,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
56
  outputs = ""
57
  for output_id in output_ids:
58
  output = tokenizer.decode(output_id, skip_special_tokens=True)
@@ -102,7 +102,7 @@ def create_response_untethered(input_str,
102
 
103
 
104
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
105
- output_ids = untethered_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample, attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
106
  outputs = ""
107
  for output_id in output_ids:
108
  output = tokenizer.decode(output_id, skip_special_tokens=True)
@@ -148,7 +148,7 @@ def create_response_original(input_str,
148
  input_ids = encoded["input_ids"]
149
  attention_mask = encoded["attention_mask"]
150
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
151
- output_ids = original_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
152
  outputs = ""
153
  for output_id in output_ids:
154
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
52
  attention_mask = encoded["attention_mask"]
53
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
54
  #output_ids = untethered_paraphrased_model.generate(input_ids,do_sample=do_sample, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
55
+ output_ids = untethered_paraphrased_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=True,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
56
  outputs = ""
57
  for output_id in output_ids:
58
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
102
 
103
 
104
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
105
+ output_ids = untethered_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample, attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences )
106
  outputs = ""
107
  for output_id in output_ids:
108
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
148
  input_ids = encoded["input_ids"]
149
  attention_mask = encoded["attention_mask"]
150
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
151
+ output_ids = original_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
152
  outputs = ""
153
  for output_id in output_ids:
154
  output = tokenizer.decode(output_id, skip_special_tokens=True)