zmbfeng commited on
Commit
a3a369c
·
verified ·
1 Parent(s): a16ea65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -46,10 +46,13 @@ def create_response_untethered_paraphrased(input_str,
46
  # num_beams=num_beams,
47
  # num_return_sequences=num_return_sequences)[0])
48
 
49
- input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
 
 
 
50
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
51
  #output_ids = untethered_paraphrased_model.generate(input_ids,do_sample=do_sample, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
52
- output_ids = untethered_paraphrased_model.generate(input_ids,do_sample=True, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
53
  outputs = ""
54
  for output_id in output_ids:
55
  output = tokenizer.decode(output_id, skip_special_tokens=True)
@@ -99,7 +102,7 @@ def create_response_untethered(input_str,
99
 
100
 
101
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
102
- output_ids = untethered_model.generate(input_ids,do_sample=do_sample, attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
103
  outputs = ""
104
  for output_id in output_ids:
105
  output = tokenizer.decode(output_id, skip_special_tokens=True)
@@ -140,9 +143,12 @@ def create_response_original(input_str,
140
  # num_beams=num_beams,
141
  # num_return_sequences=num_return_sequences)[0])
142
 
143
- input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
 
 
 
144
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
145
- output_ids = original_model.generate(input_ids,do_sample=do_sample, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
146
  outputs = ""
147
  for output_id in output_ids:
148
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
46
  # num_beams=num_beams,
47
  # num_return_sequences=num_return_sequences)[0])
48
 
49
+ #input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
50
+ encoded = tokenizer.encode_plus(input_str + tokenizer.eos_token, return_tensors="pt")
51
+ input_ids = encoded["input_ids"]
52
+ attention_mask = encoded["attention_mask"]
53
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
54
  #output_ids = untethered_paraphrased_model.generate(input_ids,do_sample=do_sample, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
55
+ output_ids = untethered_paraphrased_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=True,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
56
  outputs = ""
57
  for output_id in output_ids:
58
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
102
 
103
 
104
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
105
+ output_ids = untethered_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample, attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
106
  outputs = ""
107
  for output_id in output_ids:
108
  output = tokenizer.decode(output_id, skip_special_tokens=True)
 
143
  # num_beams=num_beams,
144
  # num_return_sequences=num_return_sequences)[0])
145
 
146
+ #input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
147
+ encoded = tokenizer.encode_plus(input_str + tokenizer.eos_token, return_tensors="pt")
148
+ input_ids = encoded["input_ids"]
149
+ attention_mask = encoded["attention_mask"]
150
  #output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
151
+ output_ids = original_model.generate(input_ids,pad_token_id=tokenizer.eos_token_id,do_sample=do_sample,attention_mask=attention_mask, max_length=100, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences, num_beams = num_beams)
152
  outputs = ""
153
  for output_id in output_ids:
154
  output = tokenizer.decode(output_id, skip_special_tokens=True)