craftgamesnetwork commited on
Commit
9a212e2
·
verified ·
1 Parent(s): 89494cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -12,7 +12,7 @@ import spaces
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, DPMSolverMultistepScheduler, StableDiffusionKDiffusionPipeline
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
@@ -97,7 +97,6 @@ def generate(
97
  negative_prompt_2 = None # type: ignore
98
 
99
  if not use_img2img:
100
- pipe.set_scheduler('sample_dpmpp_2m')
101
  return pipe(
102
  prompt=prompt,
103
  negative_prompt=negative_prompt,
@@ -108,11 +107,9 @@ def generate(
108
  guidance_scale=guidance_scale_base,
109
  num_inference_steps=num_inference_steps_base,
110
  generator=generator,
111
- use_karras_sigmas=True,
112
  output_type="pil",
113
  ).images[0]
114
  else:
115
- pipe.set_scheduler('sample_dpmpp_2m')
116
  images = pipe(
117
  prompt=prompt,
118
  image=init_image,
@@ -125,7 +122,6 @@ def generate(
125
  guidance_scale=guidance_scale_base,
126
  num_inference_steps=num_inference_steps_base,
127
  generator=generator,
128
- use_karras_sigmas=True,
129
  output_type="pil",
130
  ).images[0]
131
  return images
 
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, DPMSolverMultistepScheduler
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
 
97
  negative_prompt_2 = None # type: ignore
98
 
99
  if not use_img2img:
 
100
  return pipe(
101
  prompt=prompt,
102
  negative_prompt=negative_prompt,
 
107
  guidance_scale=guidance_scale_base,
108
  num_inference_steps=num_inference_steps_base,
109
  generator=generator,
 
110
  output_type="pil",
111
  ).images[0]
112
  else:
 
113
  images = pipe(
114
  prompt=prompt,
115
  image=init_image,
 
122
  guidance_scale=guidance_scale_base,
123
  num_inference_steps=num_inference_steps_base,
124
  generator=generator,
 
125
  output_type="pil",
126
  ).images[0]
127
  return images