Chaerin5 commited on
Commit
bcfe92f
·
1 Parent(s): b159308

enable zerogpu

Browse files
Files changed (2) hide show
  1. app.py +9 -54
  2. vqvae.py +1 -0
app.py CHANGED
@@ -256,60 +256,15 @@ hands = mp_hands.Hands(
256
  min_detection_confidence=0.1,
257
  )
258
 
259
- # @spaces.GPU(duration=60)
260
- # def make_ref_cond(
261
- # image
262
- # ):
263
- # print("ready to run autoencoder")
264
- # # print(f"image.device: {image.device}, type(image): {type(image)}")
265
- # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
266
- # torch.cuda.set_device(0)
267
- # image = image.to("cuda")
268
- # latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
269
- # return image[None, ...], latent
270
-
271
- # @spaces.GPU(duration=60)
272
- # def make_ref_cond(image):
273
- # def initialize_and_process(image):
274
- # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
275
- # torch.cuda.set_device(0)
276
- # print("Initializing autoencoder in worker process")
277
- # image = image.to("cuda")
278
- # latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
279
- # return image[None, ...], latent
280
-
281
- # from multiprocessing import Process, Queue
282
- # queue = Queue()
283
-
284
- # def worker(image, queue):
285
- # result = initialize_and_process(image)
286
- # queue.put(result)
287
-
288
- # process = Process(target=worker, args=(image, queue))
289
- # process.start()
290
- # process.join()
291
-
292
- # return queue.get()
293
-
294
- @spaces.GPU(duration=60)
295
- def make_ref_cond(image):
296
- result = []
297
-
298
- def initialize_and_process(image):
299
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
300
- torch.cuda.set_device(0)
301
-
302
- print("Initializing autoencoder in worker thread")
303
- image = image.to("cuda")
304
- latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
305
- result.append((image[None, ...], latent))
306
-
307
- from threading import Thread
308
- thread = Thread(target=initialize_and_process, args=(image,))
309
- thread.start()
310
- thread.join()
311
-
312
- return result[0]
313
 
314
  def get_ref_anno(ref):
315
  print("inside get_ref_anno")
 
256
  min_detection_confidence=0.1,
257
  )
258
 
259
+ def make_ref_cond(
260
+ image
261
+ ):
262
+ print("ready to run autoencoder")
263
+ # print(f"image.device: {image.device}, type(image): {type(image)}")
264
+ image = image.to("cuda")
265
+ latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
266
+ return image[None, ...], latent
267
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  def get_ref_anno(ref):
270
  print("inside get_ref_anno")
vqvae.py CHANGED
@@ -49,6 +49,7 @@ class Autoencoder(nn.Module):
49
  # embedding space
50
  self.post_quant_conv = nn.Conv2d(emb_channels, z_channels, 1)
51
 
 
52
  def encode(self, img: torch.Tensor) -> "GaussianDistribution":
53
  """
54
  ### Encode images to latent representation
 
49
  # embedding space
50
  self.post_quant_conv = nn.Conv2d(emb_channels, z_channels, 1)
51
 
52
+ @spaces.GPU
53
  def encode(self, img: torch.Tensor) -> "GaussianDistribution":
54
  """
55
  ### Encode images to latent representation