solitarycodefinement commited on
Commit
b70e6b4
·
1 Parent(s): 11a1ad9

christmas models

Browse files
Files changed (1) hide show
  1. app.py +7 -37
app.py CHANGED
@@ -157,10 +157,9 @@ class ImageEditor(object):
157
 
158
  def get_target_latent(self, source_latent, alter, generators):
159
  np_source_latent = source_latent.squeeze(0).cpu().detach().numpy()
160
- edit = interface_gan_map[alter]
161
- if not edit:
162
  return [source_latent.squeeze(0), ] * max((len(generators) - 1), 1)
163
-
164
  projected_code_np = project_code_by_edit_name(np_source_latent, edit[0], edit[1])
165
  return torch.from_numpy(projected_code_np).float().to(self.device)
166
 
@@ -168,16 +167,10 @@ class ImageEditor(object):
168
  def edit_image(self, input, output_styles, edit_choices):
169
  return self.predict(input, output_styles, edit_choices=edit_choices)
170
 
171
- @_pack_edits
172
- def edit_video(self, input, output_styles, loop_styles, edit_choices):
173
- return self.predict(input, output_styles, generate_video=True, loop_styles=loop_styles,
174
- edit_choices=edit_choices)
175
-
176
  def predict(
177
  self,
178
  input, # Input image path
179
  output_styles, # Style checkbox options.
180
- generate_video=False, # Generate a video instead of an output image
181
  loop_styles=False, # Loop back to the initial style
182
  edit_choices=None, # Optional dictionary with edit choice arguments
183
  ):
@@ -205,20 +198,6 @@ class ImageEditor(object):
205
 
206
  return output_paths
207
 
208
- def generate_vid(self, generators, source_latent, target_latents, out_dir):
209
-
210
- fps = 24
211
-
212
- with tempfile.TemporaryDirectory() as dirpath:
213
- generate_frames(source_latent, target_latents, generators, dirpath)
214
- video_from_interpolations(fps, dirpath)
215
-
216
- gen_path = os.path.join(dirpath, "out.mp4")
217
- out_path = os.path.join(out_dir, "out.mp4")
218
-
219
- shutil.copy2(gen_path, out_path)
220
-
221
- return out_path
222
 
223
  def run_alignment(self, image_path):
224
  aligned_image = align_face(filepath=image_path, predictor=self.shape_predictor)
@@ -243,7 +222,7 @@ with blocks:
243
  )
244
  gr.Markdown(
245
  "<h4 style='font-size: 110%;margin-top:.5em'>Usage</h4><div>Upload an image of your face, pick your desired output styles, and apply StyleGAN-based editing.</div>"
246
- "<div>Choose the edit image tab to create static images in all chosen styles. Choose the video tab in order to interpolate between all chosen styles</div><div>(To make it easier on the servers, we've limited video length. If you add too many styles (we recommend no more than 3!), they'll pass in the blink of an eye! 🤗)</div>"
247
  )
248
  with gr.Row():
249
  with gr.Column():
@@ -252,23 +231,14 @@ with blocks:
252
  with gr.Column():
253
  style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value",
254
  label="Choose your styles!")
 
 
 
 
255
 
256
  with gr.Row():
257
- with gr.Column():
258
- img_button = gr.Button("Edit Image")
259
  img_output = gr.Gallery(label="Output Images")
260
 
261
- with gr.Column():
262
- gr.Markdown(
263
- "Move the sliders to make the chosen attribute stronger (e.g. the person older) or leave at 0 to disable editing.")
264
- gr.Markdown(
265
- "If multiple options are provided, they will be used randomly between images (or sequentially for a video), <u>not</u> together.")
266
- gr.Markdown(
267
- "Please note that some directions may be entangled. For example, hair length adjustments are likely to also modify the perceived gender.")
268
-
269
- alter = gr.Dropdown(
270
- choices=["None", "Masculine", "Feminine", "Smiling", "Frowning", "Young", "Old", "Short Hair",
271
- "Long Hair"], value="None")
272
 
273
  img_button.click(fn=editor.edit_image, inputs=[alter, input_img, style_choice], outputs=img_output)
274
 
 
157
 
158
  def get_target_latent(self, source_latent, alter, generators):
159
  np_source_latent = source_latent.squeeze(0).cpu().detach().numpy()
160
+ if alter == "None":
 
161
  return [source_latent.squeeze(0), ] * max((len(generators) - 1), 1)
162
+ edit = interface_gan_map[alter]
163
  projected_code_np = project_code_by_edit_name(np_source_latent, edit[0], edit[1])
164
  return torch.from_numpy(projected_code_np).float().to(self.device)
165
 
 
167
  def edit_image(self, input, output_styles, edit_choices):
168
  return self.predict(input, output_styles, edit_choices=edit_choices)
169
 
 
 
 
 
 
170
  def predict(
171
  self,
172
  input, # Input image path
173
  output_styles, # Style checkbox options.
 
174
  loop_styles=False, # Loop back to the initial style
175
  edit_choices=None, # Optional dictionary with edit choice arguments
176
  ):
 
198
 
199
  return output_paths
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
  def run_alignment(self, image_path):
203
  aligned_image = align_face(filepath=image_path, predictor=self.shape_predictor)
 
222
  )
223
  gr.Markdown(
224
  "<h4 style='font-size: 110%;margin-top:.5em'>Usage</h4><div>Upload an image of your face, pick your desired output styles, and apply StyleGAN-based editing.</div>"
225
+ "<div>Choose the edit image tab to create static images in all chosen styles.</div>"
226
  )
227
  with gr.Row():
228
  with gr.Column():
 
231
  with gr.Column():
232
  style_choice = gr.inputs.CheckboxGroup(choices=editor.get_style_list(), type="value",
233
  label="Choose your styles!")
234
+ alter = gr.Dropdown(
235
+ choices=["None", "Masculine", "Feminine", "Smiling", "Frowning", "Young", "Old", "Short Hair",
236
+ "Long Hair"], value="None")
237
+ img_button = gr.Button("Edit Image")
238
 
239
  with gr.Row():
 
 
240
  img_output = gr.Gallery(label="Output Images")
241
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  img_button.click(fn=editor.edit_image, inputs=[alter, input_img, style_choice], outputs=img_output)
244