Spaces:
Sleeping
Sleeping
paraphrase
Browse files
app.py
CHANGED
@@ -60,6 +60,31 @@ def create_response_question_generation(input_str, max_length=64):
|
|
60 |
|
61 |
return question_generation_tokenizer.decode(output[0])
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
def create_response(input_str,
|
65 |
temperature,
|
@@ -108,21 +133,19 @@ interface_original = gr.Interface(fn=create_response_question_generation,
|
|
108 |
|
109 |
|
110 |
|
111 |
-
interface_untethered_model = gr.Interface(fn=
|
112 |
-
title="
|
113 |
-
description="
|
114 |
#examples=examples,
|
115 |
inputs=[
|
116 |
-
gr.Textbox(label="input text here", lines=3),
|
|
|
117 |
gr.Number(
|
118 |
-
label="
|
119 |
-
|
120 |
-
" makes the output more deterministic and focused",
|
121 |
-
value=default_temperature),
|
122 |
gr.Number(
|
123 |
-
label="
|
124 |
-
|
125 |
-
gr.Textbox(label="model", lines=3, value="untethered_model",visible=False)
|
126 |
],
|
127 |
outputs="html"
|
128 |
)
|
|
|
60 |
|
61 |
return question_generation_tokenizer.decode(output[0])
|
62 |
|
63 |
+
def create_response_paraphrase(input_str, max_length,num_return_sequences):
|
64 |
+
text = "paraphrase: " + input_str + " </s>"
|
65 |
+
|
66 |
+
encoding = paraphrase_tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt")
|
67 |
+
input_ids, attention_masks = encoding["input_ids"], encoding["attention_mask"]
|
68 |
+
|
69 |
+
outputs = paraphrase_model.generate(
|
70 |
+
input_ids=input_ids, attention_mask=attention_masks,
|
71 |
+
# max_length=256,
|
72 |
+
max_length=max_length,
|
73 |
+
do_sample=True,
|
74 |
+
top_k=120,
|
75 |
+
top_p=0.95,
|
76 |
+
early_stopping=True,
|
77 |
+
num_return_sequences=num_return_sequences,
|
78 |
+
repetition_penalty=1.5
|
79 |
+
|
80 |
+
)
|
81 |
+
result_output_str=""
|
82 |
+
for output in outputs:
|
83 |
+
line = paraphrase_tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
84 |
+
result_output_str=result_output_str+line+"<br/>"
|
85 |
+
# results.append(line)
|
86 |
+
# return results
|
87 |
+
return result_output_str
|
88 |
|
89 |
def create_response(input_str,
|
90 |
temperature,
|
|
|
133 |
|
134 |
|
135 |
|
136 |
+
interface_untethered_model = gr.Interface(fn=create_response_paraphrase,
|
137 |
+
title="Paraphrase",
|
138 |
+
description="Paraphrase sentences",
|
139 |
#examples=examples,
|
140 |
inputs=[
|
141 |
+
gr.Textbox(label="input text here", lines=3, value="It is truly a great cosmic paradox that one of the best teachers in all of life turns out to be death. No person or situation could ever teach you as much as death has to teach you. "),
|
142 |
+
|
143 |
gr.Number(
|
144 |
+
label="max length",
|
145 |
+
value=512),
|
|
|
|
|
146 |
gr.Number(
|
147 |
+
label="num of responses",
|
148 |
+
value=2)
|
|
|
149 |
],
|
150 |
outputs="html"
|
151 |
)
|