YiftachEde commited on
Commit
818fb4f
·
1 Parent(s): fd20b88
Files changed (2) hide show
  1. app.py +2 -0
  2. zero123plus/pipeline.py +5 -0
app.py CHANGED
@@ -13,6 +13,7 @@ from shap_e.diffusion.sample import sample_latents
13
  from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
14
  from shap_e.models.download import load_model, load_config
15
  from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, create_custom_cameras
 
16
 
17
  from src.utils.train_util import instantiate_from_config
18
  from src.utils.camera_util import (
@@ -193,6 +194,7 @@ class ShapERenderer:
193
  self.diffusion = diffusion_from_config(load_config('diffusion'))
194
  print("Shap-E models loaded!")
195
 
 
196
  def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
197
  # Generate latents using the text-to-3D model
198
  batch_size = 1
 
13
  from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
14
  from shap_e.models.download import load_model, load_config
15
  from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, create_custom_cameras
16
+ import spaces
17
 
18
  from src.utils.train_util import instantiate_from_config
19
  from src.utils.camera_util import (
 
194
  self.diffusion = diffusion_from_config(load_config('diffusion'))
195
  print("Shap-E models loaded!")
196
 
197
+ @spaces.GPU(duration=60)
198
  def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
199
  # Generate latents using the text-to-3D model
200
  batch_size = 1
zero123plus/pipeline.py CHANGED
@@ -31,6 +31,7 @@ from diffusers.models.attention_processor import (
31
  AttnProcessor2_0,
32
  )
33
  from diffusers.utils.import_utils import is_xformers_available
 
34
 
35
 
36
  def extract_into_tensor(a, t, x_shape):
@@ -416,6 +417,7 @@ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
416
  image = self.vae.encode(image).latent_dist.sample()
417
  return image
418
 
 
419
  @torch.no_grad()
420
  def edit_latents(
421
  self,
@@ -530,6 +532,7 @@ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
530
  latents = scale_latents(latents)
531
  return latents
532
 
 
533
  @torch.no_grad()
534
  def sdedit(
535
  self,
@@ -619,6 +622,7 @@ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
619
 
620
  return ImagePipelineOutput(images=image)
621
 
 
622
  @torch.no_grad()
623
  def refine(
624
  self,
@@ -1030,6 +1034,7 @@ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
1030
  images=image, nsfw_content_detected=has_nsfw_concept
1031
  )
1032
 
 
1033
  @torch.no_grad()
1034
  def __call__(
1035
  self,
 
31
  AttnProcessor2_0,
32
  )
33
  from diffusers.utils.import_utils import is_xformers_available
34
+ import spaces
35
 
36
 
37
  def extract_into_tensor(a, t, x_shape):
 
417
  image = self.vae.encode(image).latent_dist.sample()
418
  return image
419
 
420
+ @spaces.GPU(duration=60)
421
  @torch.no_grad()
422
  def edit_latents(
423
  self,
 
532
  latents = scale_latents(latents)
533
  return latents
534
 
535
+ @spaces.GPU(duration=60)
536
  @torch.no_grad()
537
  def sdedit(
538
  self,
 
622
 
623
  return ImagePipelineOutput(images=image)
624
 
625
+ @spaces.GPU(duration=60)
626
  @torch.no_grad()
627
  def refine(
628
  self,
 
1034
  images=image, nsfw_content_detected=has_nsfw_concept
1035
  )
1036
 
1037
+ @spaces.GPU(duration=60)
1038
  @torch.no_grad()
1039
  def __call__(
1040
  self,