Chaerin5 commited on
Commit
b159308
·
1 Parent(s): 55fa8f8

enable zerogpu

Browse files
Files changed (1) hide show
  1. app.py +33 -13
app.py CHANGED
@@ -268,28 +268,48 @@ hands = mp_hands.Hands(
268
  # latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
269
  # return image[None, ...], latent
270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  @spaces.GPU(duration=60)
272
  def make_ref_cond(image):
 
 
273
  def initialize_and_process(image):
274
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
275
  torch.cuda.set_device(0)
276
- print("Initializing autoencoder in worker process")
 
277
  image = image.to("cuda")
278
  latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
279
- return image[None, ...], latent
280
-
281
- from multiprocessing import Process, Queue
282
- queue = Queue()
283
-
284
- def worker(image, queue):
285
- result = initialize_and_process(image)
286
- queue.put(result)
287
 
288
- process = Process(target=worker, args=(image, queue))
289
- process.start()
290
- process.join()
 
291
 
292
- return queue.get()
293
 
294
  def get_ref_anno(ref):
295
  print("inside get_ref_anno")
 
268
  # latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
269
  # return image[None, ...], latent
270
 
271
+ # @spaces.GPU(duration=60)
272
+ # def make_ref_cond(image):
273
+ # def initialize_and_process(image):
274
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
275
+ # torch.cuda.set_device(0)
276
+ # print("Initializing autoencoder in worker process")
277
+ # image = image.to("cuda")
278
+ # latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
279
+ # return image[None, ...], latent
280
+
281
+ # from multiprocessing import Process, Queue
282
+ # queue = Queue()
283
+
284
+ # def worker(image, queue):
285
+ # result = initialize_and_process(image)
286
+ # queue.put(result)
287
+
288
+ # process = Process(target=worker, args=(image, queue))
289
+ # process.start()
290
+ # process.join()
291
+
292
+ # return queue.get()
293
+
294
  @spaces.GPU(duration=60)
295
  def make_ref_cond(image):
296
+ result = []
297
+
298
  def initialize_and_process(image):
299
  os.environ["CUDA_VISIBLE_DEVICES"] = "0"
300
  torch.cuda.set_device(0)
301
+
302
+ print("Initializing autoencoder in worker thread")
303
  image = image.to("cuda")
304
  latent = opts.latent_scaling_factor * autoencoder.encode(image[None, ...]).sample()
305
+ result.append((image[None, ...], latent))
 
 
 
 
 
 
 
306
 
307
+ from threading import Thread
308
+ thread = Thread(target=initialize_and_process, args=(image,))
309
+ thread.start()
310
+ thread.join()
311
 
312
+ return result[0]
313
 
314
  def get_ref_anno(ref):
315
  print("inside get_ref_anno")