amildravid4292 commited on
Commit
9274044
·
verified ·
1 Parent(s): 8fb9cf8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -137,7 +137,7 @@ def sample_then_run(net):
137
  cfg = 3.0
138
  steps = 25
139
  image = inference(net, prompt, negative_prompt, cfg, steps, seed)
140
- return net, image
141
 
142
  @torch.no_grad()
143
  @spaces.GPU()
@@ -182,9 +182,9 @@ def inference(net, prompt, negative_prompt, guidance_scale, ddim_steps, seed):
182
  for i,t in enumerate(tqdm.tqdm(noise_scheduler.timesteps)):
183
  latent_model_input = torch.cat([latents] * 2)
184
  latent_model_input = noise_scheduler.scale_model_input(latent_model_input, timestep=t)
185
-
186
- with network:
187
- noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings, timestep_cond= None).sample
188
 
189
  #guidance
190
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
@@ -511,7 +511,7 @@ with gr.Blocks(css="style.css") as demo:
511
  # outputs = [input_image, file_output])
512
 
513
 
514
- sample.click(fn=sample_then_run,inputs = [net], outputs=[net, input_image])
515
 
516
  submit.click(
517
  fn=edit_inference, inputs=[net, prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[net, gallery]
 
137
  cfg = 3.0
138
  steps = 25
139
  image = inference(net, prompt, negative_prompt, cfg, steps, seed)
140
+ return net,net,image
141
 
142
  @torch.no_grad()
143
  @spaces.GPU()
 
182
  for i,t in enumerate(tqdm.tqdm(noise_scheduler.timesteps)):
183
  latent_model_input = torch.cat([latents] * 2)
184
  latent_model_input = noise_scheduler.scale_model_input(latent_model_input, timestep=t)
185
+ with torch.no_grad():
186
+ with network:
187
+ noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings, timestep_cond= None).sample
188
 
189
  #guidance
190
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
 
511
  # outputs = [input_image, file_output])
512
 
513
 
514
+ sample.click(fn=sample_then_run,inputs = [net], outputs=[net, file_output, input_image])
515
 
516
  submit.click(
517
  fn=edit_inference, inputs=[net, prompt, negative_prompt, cfg, steps, seed, injection_step, a1, a2, a3, a4], outputs=[net, gallery]