craftgamesnetwork commited on
Commit
064cc42
·
verified ·
1 Parent(s): b35eefe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -12,7 +12,7 @@ import spaces
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, EulerAncestralDiscreteScheduler
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
@@ -61,24 +61,20 @@ def generate(
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
64
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
65
 
66
  if use_vae:
67
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
68
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
69
  pipe.enable_model_cpu_offload()
70
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
71
 
72
  if use_img2img:
73
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
74
  pipe.enable_model_cpu_offload()
75
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
76
 
77
  if use_vae:
78
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
79
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
80
  pipe.enable_model_cpu_offload()
81
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
82
 
83
  response = requests.get(url)
84
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
 
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
 
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
 
64
 
65
  if use_vae:
66
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
67
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
68
  pipe.enable_model_cpu_offload()
 
69
 
70
  if use_img2img:
71
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
72
  pipe.enable_model_cpu_offload()
 
73
 
74
  if use_vae:
75
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
  pipe.enable_model_cpu_offload()
 
78
 
79
  response = requests.get(url)
80
  init_image = Image.open(BytesIO(response.content)).convert("RGB")