emilios commited on
Commit
548adb2
·
verified ·
1 Parent(s): d149eaf

Update app.py

Browse files

cuda -> cpu

Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -20,10 +20,10 @@ checkpoints = {
20
  }
21
  loaded = None
22
 
23
-
24
  # Ensure model and scheduler are initialized in GPU-enabled function
25
- if torch.cuda.is_available():
26
- pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
27
 
28
  if SAFETY_CHECKER:
29
  from safety_checker import StableDiffusionSafetyChecker
@@ -39,10 +39,12 @@ if SAFETY_CHECKER:
39
  def check_nsfw_images(
40
  images: list[Image.Image],
41
  ) -> tuple[list[Image.Image], list[bool]]:
42
- safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
 
43
  has_nsfw_concepts = safety_checker(
44
  images=[images],
45
- clip_input=safety_checker_input.pixel_values.to("cuda")
 
46
  )
47
 
48
  return images, has_nsfw_concepts
@@ -58,7 +60,8 @@ def generate_image(prompt, ckpt):
58
 
59
  if loaded != num_inference_steps:
60
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
61
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, checkpoint), device="cuda"))
 
62
  loaded = num_inference_steps
63
 
64
  results = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=0)
 
20
  }
21
  loaded = None
22
 
23
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16")
24
  # Ensure model and scheduler are initialized in GPU-enabled function
25
+ #if torch.cuda.is_available():
26
+ #pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
27
 
28
  if SAFETY_CHECKER:
29
  from safety_checker import StableDiffusionSafetyChecker
 
39
  def check_nsfw_images(
40
  images: list[Image.Image],
41
  ) -> tuple[list[Image.Image], list[bool]]:
42
+ #safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
43
+ safety_checker_input = feature_extractor(images, return_tensors="pt")
44
  has_nsfw_concepts = safety_checker(
45
  images=[images],
46
+ #clip_input=safety_checker_input.pixel_values.to("cuda")
47
+ clip_input=safety_checker_input.pixel_values
48
  )
49
 
50
  return images, has_nsfw_concepts
 
60
 
61
  if loaded != num_inference_steps:
62
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample" if num_inference_steps==1 else "epsilon")
63
+ #pipe.unet.load_state_dict(load_file(hf_hub_download(repo, checkpoint), device="cuda"))
64
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, checkpoint), device="cpu"))
65
  loaded = num_inference_steps
66
 
67
  results = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=0)