Spaces:
Runtime error
Runtime error
gokaygokay
commited on
Commit
β’
e4c639b
1
Parent(s):
a7665b5
Update app.py
Browse files
app.py
CHANGED
@@ -135,17 +135,14 @@ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
|
|
135 |
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
136 |
|
137 |
@timer_func
|
138 |
-
def resize_and_upscale(input_image,
|
|
|
139 |
input_image = input_image.convert("RGB")
|
140 |
W, H = input_image.size
|
141 |
-
|
142 |
-
scale = 2 if target_size <= 2048 else 4
|
143 |
-
|
144 |
-
k = float(target_size) / min(H, W)
|
145 |
H = int(round(H * k / 64.0)) * 64
|
146 |
W = int(round(W * k / 64.0)) * 64
|
147 |
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
148 |
-
|
149 |
if scale == 2:
|
150 |
img = lazy_realesrgan_x2.predict(img)
|
151 |
else:
|
@@ -169,18 +166,18 @@ def create_hdr_effect(original_image, hdr):
|
|
169 |
lazy_pipe = LazyLoadPipeline()
|
170 |
lazy_pipe.load()
|
171 |
|
172 |
-
def prepare_image(input_image,
|
173 |
-
condition_image = resize_and_upscale(input_image,
|
174 |
condition_image = create_hdr_effect(condition_image, hdr)
|
175 |
return condition_image
|
176 |
|
177 |
@spaces.GPU
|
178 |
@timer_func
|
179 |
-
def gradio_process_image(input_image,
|
180 |
print("Starting image processing...")
|
181 |
torch.cuda.empty_cache()
|
182 |
|
183 |
-
condition_image = prepare_image(input_image,
|
184 |
|
185 |
prompt = "masterpiece, best quality, highres"
|
186 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
@@ -225,24 +222,24 @@ with gr.Blocks() as demo:
|
|
225 |
with gr.Column():
|
226 |
output_slider = ImageSlider(label="Before / After", type="numpy")
|
227 |
with gr.Accordion("Advanced Options", open=False):
|
228 |
-
|
229 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
|
230 |
strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
|
231 |
hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
|
232 |
guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
|
233 |
|
234 |
run_button.click(fn=gradio_process_image,
|
235 |
-
inputs=[input_image,
|
236 |
outputs=output_slider)
|
237 |
|
238 |
# Add examples with all required inputs
|
239 |
gr.Examples(
|
240 |
examples=[
|
241 |
-
["image1.jpg",
|
242 |
-
["image2.png",
|
243 |
-
["image3.png",
|
244 |
],
|
245 |
-
inputs=[input_image,
|
246 |
outputs=output_slider,
|
247 |
fn=gradio_process_image,
|
248 |
cache_examples=True,
|
|
|
135 |
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
136 |
|
137 |
@timer_func
|
138 |
+
def resize_and_upscale(input_image, resolution):
|
139 |
+
scale = 2 if resolution <= 2048 else 4
|
140 |
input_image = input_image.convert("RGB")
|
141 |
W, H = input_image.size
|
142 |
+
k = float(resolution) / min(H, W)
|
|
|
|
|
|
|
143 |
H = int(round(H * k / 64.0)) * 64
|
144 |
W = int(round(W * k / 64.0)) * 64
|
145 |
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
|
|
146 |
if scale == 2:
|
147 |
img = lazy_realesrgan_x2.predict(img)
|
148 |
else:
|
|
|
166 |
lazy_pipe = LazyLoadPipeline()
|
167 |
lazy_pipe.load()
|
168 |
|
169 |
+
def prepare_image(input_image, resolution, hdr):
|
170 |
+
condition_image = resize_and_upscale(input_image, resolution)
|
171 |
condition_image = create_hdr_effect(condition_image, hdr)
|
172 |
return condition_image
|
173 |
|
174 |
@spaces.GPU
|
175 |
@timer_func
|
176 |
+
def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
|
177 |
print("Starting image processing...")
|
178 |
torch.cuda.empty_cache()
|
179 |
|
180 |
+
condition_image = prepare_image(input_image, resolution, hdr)
|
181 |
|
182 |
prompt = "masterpiece, best quality, highres"
|
183 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
|
|
222 |
with gr.Column():
|
223 |
output_slider = ImageSlider(label="Before / After", type="numpy")
|
224 |
with gr.Accordion("Advanced Options", open=False):
|
225 |
+
resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
|
226 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
|
227 |
strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
|
228 |
hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
|
229 |
guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
|
230 |
|
231 |
run_button.click(fn=gradio_process_image,
|
232 |
+
inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
|
233 |
outputs=output_slider)
|
234 |
|
235 |
# Add examples with all required inputs
|
236 |
gr.Examples(
|
237 |
examples=[
|
238 |
+
["image1.jpg", 512, 20, 0.4, 0, 3],
|
239 |
+
["image2.png", 512, 20, 0.4, 0, 3],
|
240 |
+
["image3.png", 512, 20, 0.4, 0, 3],
|
241 |
],
|
242 |
+
inputs=[input_image, resolution, num_inference_steps, strength, hdr, guidance_scale],
|
243 |
outputs=output_slider,
|
244 |
fn=gradio_process_image,
|
245 |
cache_examples=True,
|