gokaygokay commited on
Commit
e8864dd
β€’
1 Parent(s): 69d6988

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -12,15 +12,8 @@ import numpy as np
12
  from diffusers.models.attention_processor import AttnProcessor2_0
13
  import gradio as gr
14
 
15
- # Constants
16
- SD15_WEIGHTS = "weights"
17
- CONTROLNET_CACHE = "controlnet-cache"
18
- SCHEDULERS = {
19
- "DDIM": DDIMScheduler,
20
- "DPMSolverMultistep": DPMSolverMultistepScheduler,
21
- "K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler,
22
- "K_EULER": EulerDiscreteScheduler,
23
- }
24
 
25
  # Function to download files
26
  def download_file(url, folder_path, filename):
@@ -42,12 +35,7 @@ def download_file(url, folder_path, filename):
42
 
43
  # Download necessary models and files
44
 
45
- @spaces.GPU
46
- def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
47
- prompt = "masterpiece, best quality, highres"
48
- negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
49
- result = process_image(input_image, prompt, negative_prompt, resolution, num_inference_steps, guidance_scale, strength, hdr)
50
- return result
51
 
52
  # MODEL
53
  download_file(
@@ -147,8 +135,6 @@ pipe.load_lora_weights("models/Lora/more_details.safetensors")
147
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
148
 
149
  # Move the pipeline to the device and enable memory efficient attention
150
- pipe = pipe.to(device)
151
- pipe.unet.set_attn_processor(AttnProcessor2_0())
152
 
153
  # Enable FreeU
154
  pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
@@ -250,6 +236,15 @@ def process_image(input_image, prompt, negative_prompt, resolution=2048, num_inf
250
 
251
  return result
252
 
 
 
 
 
 
 
 
 
 
253
  # Simple options
254
  simple_options = [
255
  gr.Image(type="pil", label="Input Image"),
 
12
  from diffusers.models.attention_processor import AttnProcessor2_0
13
  import gradio as gr
14
 
15
+ USE_TORCH_COMPILE = 0
16
+ ENABLE_CPU_OFFLOAD = 0
 
 
 
 
 
 
 
17
 
18
  # Function to download files
19
  def download_file(url, folder_path, filename):
 
35
 
36
  # Download necessary models and files
37
 
38
+
 
 
 
 
 
39
 
40
  # MODEL
41
  download_file(
 
135
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
136
 
137
  # Move the pipeline to the device and enable memory efficient attention
 
 
138
 
139
  # Enable FreeU
140
  pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
 
236
 
237
  return result
238
 
239
+ @spaces.GPU
240
+ def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
241
+ pipe = pipe.to(device)
242
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
243
+ prompt = "masterpiece, best quality, highres"
244
+ negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
245
+ result = process_image(input_image, prompt, negative_prompt, resolution, num_inference_steps, guidance_scale, strength, hdr)
246
+ return result
247
+
248
  # Simple options
249
  simple_options = [
250
  gr.Image(type="pil", label="Input Image"),