Ahsen Khaliq commited on
Commit
a528f1f
Β·
1 Parent(s): 4eb2fb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -198,6 +198,8 @@ print('Using device:', device)
198
  model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
199
  perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
200
  def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight, target_images, cutn, cut_pow):
 
 
201
  all_frames = []
202
  size=[width, height]
203
  texts = text
 
198
  model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
199
  perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
200
  def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight, target_images, cutn, cut_pow):
201
+ torch.cuda.empty_cache()
202
+ torch.cuda.memory_summary(device=None, abbreviated=False)
203
  all_frames = []
204
  size=[width, height]
205
  texts = text