craftgamesnetwork commited on
Commit
89494cf
·
verified ·
1 Parent(s): 561e535

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -12,7 +12,7 @@ import spaces
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, DPMSolverMultistepScheduler
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
@@ -61,24 +61,20 @@ def generate(
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
64
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
65
 
66
  if use_vae:
67
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
68
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
69
  pipe.enable_model_cpu_offload()
70
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
71
 
72
  if use_img2img:
73
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
74
  pipe.enable_model_cpu_offload()
75
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
76
 
77
  if use_vae:
78
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
79
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
80
  pipe.enable_model_cpu_offload()
81
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
82
 
83
  response = requests.get(url)
84
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
@@ -101,6 +97,7 @@ def generate(
101
  negative_prompt_2 = None # type: ignore
102
 
103
  if not use_img2img:
 
104
  return pipe(
105
  prompt=prompt,
106
  negative_prompt=negative_prompt,
@@ -111,9 +108,11 @@ def generate(
111
  guidance_scale=guidance_scale_base,
112
  num_inference_steps=num_inference_steps_base,
113
  generator=generator,
 
114
  output_type="pil",
115
  ).images[0]
116
  else:
 
117
  images = pipe(
118
  prompt=prompt,
119
  image=init_image,
@@ -126,6 +125,7 @@ def generate(
126
  guidance_scale=guidance_scale_base,
127
  num_inference_steps=num_inference_steps_base,
128
  generator=generator,
 
129
  output_type="pil",
130
  ).images[0]
131
  return images
 
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, DPMSolverMultistepScheduler, StableDiffusionKDiffusionPipeline
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
 
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_model_cpu_offload()
 
64
 
65
  if use_vae:
66
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
67
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
68
  pipe.enable_model_cpu_offload()
 
69
 
70
  if use_img2img:
71
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
72
  pipe.enable_model_cpu_offload()
 
73
 
74
  if use_vae:
75
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
  pipe.enable_model_cpu_offload()
 
78
 
79
  response = requests.get(url)
80
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
 
97
  negative_prompt_2 = None # type: ignore
98
 
99
  if not use_img2img:
100
+ pipe.set_scheduler('sample_dpmpp_2m')
101
  return pipe(
102
  prompt=prompt,
103
  negative_prompt=negative_prompt,
 
108
  guidance_scale=guidance_scale_base,
109
  num_inference_steps=num_inference_steps_base,
110
  generator=generator,
111
+ use_karras_sigmas=True,
112
  output_type="pil",
113
  ).images[0]
114
  else:
115
+ pipe.set_scheduler('sample_dpmpp_2m')
116
  images = pipe(
117
  prompt=prompt,
118
  image=init_image,
 
125
  guidance_scale=guidance_scale_base,
126
  num_inference_steps=num_inference_steps_base,
127
  generator=generator,
128
+ use_karras_sigmas=True,
129
  output_type="pil",
130
  ).images[0]
131
  return images