Profakerr commited on
Commit
d6818b9
·
verified ·
1 Parent(s): 47525ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -1,16 +1,14 @@
1
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
2
  import torch
3
  import gradio as gr
4
  import spaces
5
 
6
 
7
  lora_path = "OedoSoldier/detail-tweaker-lora"
 
8
 
9
  @spaces.GPU
10
  def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_scale=7.0,model="Real6.0"):
11
- """
12
- Generate an image using Stable Diffusion based on the input prompt
13
- """
14
 
15
  if model == "Real5.0":
16
  model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
@@ -22,7 +20,7 @@ def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_sca
22
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
23
 
24
 
25
- pipe = DiffusionPipeline.from_pretrained(model_id).to("cuda")
26
 
27
  if model == "Real6.0":
28
  pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
@@ -43,8 +41,8 @@ def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_sca
43
  cross_attention_kwargs = {"scale":1},
44
  num_inference_steps = num_inference_steps,
45
  guidance_scale = guidance_scale,
46
- width = 960,
47
- height = 960
48
  ).images[0]
49
 
50
  return image
 
1
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
2
  import torch
3
  import gradio as gr
4
  import spaces
5
 
6
 
7
  lora_path = "OedoSoldier/detail-tweaker-lora"
8
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to("cuda")
9
 
10
  @spaces.GPU
11
  def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_scale=7.0,model="Real6.0"):
 
 
 
12
 
13
  if model == "Real5.0":
14
  model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
 
20
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
21
 
22
 
23
+ pipe = DiffusionPipeline.from_pretrained(model_id, vae=vae).to("cuda")
24
 
25
  if model == "Real6.0":
26
  pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
 
41
  cross_attention_kwargs = {"scale":1},
42
  num_inference_steps = num_inference_steps,
43
  guidance_scale = guidance_scale,
44
+ width = 800,
45
+ height = 800
46
  ).images[0]
47
 
48
  return image