Spaces:
Running
Running
linkdom
commited on
Commit
·
4512358
1
Parent(s):
ba8d1a5
modify the clip function, maybe just clip is the best
Browse files
app.py
CHANGED
@@ -2,29 +2,35 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import time
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
# Renamed for clarity and consistency
|
6 |
def fake_diffusion_denoise(image, steps):
|
7 |
-
if image is None:
|
8 |
-
yield np.zeros((100, 100, 3), dtype=np.uint8)
|
9 |
-
return
|
10 |
original_image = image.astype(np.float32) / 255.0
|
11 |
# Add initial noise
|
12 |
noisy_start_image = original_image + np.random.normal(0, 0.7, original_image.shape)
|
13 |
-
noisy_start_image =
|
14 |
|
15 |
for i in range(steps):
|
16 |
time.sleep(0.2)
|
17 |
# Simulate denoising: gradually revert to the original image (linear progress)
|
18 |
progress = (i + 1) / steps
|
19 |
denoised_step = (1 - progress) * noisy_start_image + progress * original_image
|
20 |
-
denoised_step =
|
21 |
yield (denoised_step * 255).astype(np.uint8)
|
22 |
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
|
23 |
|
24 |
def real_diffusion_add_noise(image, steps):
|
25 |
-
if image is None:
|
26 |
-
yield np.zeros((100, 100, 3), dtype=np.uint8)
|
27 |
-
return
|
28 |
base_image = image.astype(np.float32) / 255.0
|
29 |
max_noise_std = 0.8 # Maximum noise level to reach
|
30 |
|
@@ -34,22 +40,19 @@ def real_diffusion_add_noise(image, steps):
|
|
34 |
current_noise_std = max_noise_std * ((i + 1) / steps)
|
35 |
noise = np.random.normal(0, current_noise_std, base_image.shape)
|
36 |
noisy_step = base_image + noise
|
37 |
-
noisy_step =
|
38 |
yield (noisy_step * 255).astype(np.uint8)
|
39 |
# Yield the most noisy version as the final step
|
40 |
final_noise = np.random.normal(0, max_noise_std, base_image.shape)
|
41 |
-
final_noisy_image =
|
42 |
yield (final_noisy_image * 255).astype(np.uint8)
|
43 |
|
44 |
|
45 |
def flow_matching_denoise(image, steps):
|
46 |
-
if image is None:
|
47 |
-
yield np.zeros((100, 100, 3), dtype=np.uint8)
|
48 |
-
return
|
49 |
original_image = image.astype(np.float32) / 255.0
|
50 |
# Start with a significantly noisy image
|
51 |
very_noisy_image = original_image + np.random.normal(0, 1.0, original_image.shape) # High initial noise
|
52 |
-
very_noisy_image =
|
53 |
|
54 |
for i in range(steps):
|
55 |
time.sleep(0.2)
|
@@ -60,16 +63,13 @@ def flow_matching_denoise(image, steps):
|
|
60 |
flow_progress = 1 / (1 + np.exp(-sigmoid_input))
|
61 |
|
62 |
denoised_step = (1 - flow_progress) * very_noisy_image + flow_progress * original_image
|
63 |
-
denoised_step =
|
64 |
yield (denoised_step * 255).astype(np.uint8)
|
65 |
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
|
66 |
|
67 |
# Main processing function that routes to different methods
|
68 |
def process_image_selected_method(method_selection, input_image, num_steps):
|
69 |
if input_image is None:
|
70 |
-
# This case should ideally be handled by Gradio if a default image URL is provided
|
71 |
-
# or prevented by making the image input mandatory.
|
72 |
-
# Yielding a placeholder if it somehow becomes None during processing.
|
73 |
yield np.zeros((200, 200, 3), dtype=np.uint8)
|
74 |
return
|
75 |
|
@@ -80,23 +80,29 @@ def process_image_selected_method(method_selection, input_image, num_steps):
|
|
80 |
elif method_selection == "Flow Matching (Denoise)":
|
81 |
yield from flow_matching_denoise(input_image, num_steps)
|
82 |
else:
|
83 |
-
# Default behavior: return the original image as is, or an error image
|
84 |
yield input_image
|
85 |
|
86 |
|
87 |
method_choices = ["Fake Diffusion (Denoise)", "Real Diffusion (Add Noise)", "Flow Matching (Denoise)"]
|
88 |
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
gr.
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
# define queue - required for generators
|
102 |
demo.queue()
|
|
|
2 |
import numpy as np
|
3 |
import time
|
4 |
|
5 |
+
def normalize(image):
|
6 |
+
# different methods to map image values to [0, 1]
|
7 |
+
# scale to [0, 1] using min-max normalization
|
8 |
+
# image = (image - np.min(image, keepdims=True)) / (np.max(image, keepdims=True) - np.min(image, keepdims=True))
|
9 |
+
|
10 |
+
# standardize to zero mean and unit variance, then scale to [0, 1]
|
11 |
+
# image = (image - np.mean(image, keepdims=True)) / (np.std(image, keepdims=True) + 1e-8) # Avoid division by zero
|
12 |
+
# image = (image + 1) / 2 # Scale to [0, 1]
|
13 |
+
|
14 |
+
# just clip to [0, 1]
|
15 |
+
return np.clip(image, 0, 1)
|
16 |
+
|
17 |
# Renamed for clarity and consistency
|
18 |
def fake_diffusion_denoise(image, steps):
|
|
|
|
|
|
|
19 |
original_image = image.astype(np.float32) / 255.0
|
20 |
# Add initial noise
|
21 |
noisy_start_image = original_image + np.random.normal(0, 0.7, original_image.shape)
|
22 |
+
noisy_start_image = normalize(noisy_start_image)
|
23 |
|
24 |
for i in range(steps):
|
25 |
time.sleep(0.2)
|
26 |
# Simulate denoising: gradually revert to the original image (linear progress)
|
27 |
progress = (i + 1) / steps
|
28 |
denoised_step = (1 - progress) * noisy_start_image + progress * original_image
|
29 |
+
denoised_step = normalize(denoised_step)
|
30 |
yield (denoised_step * 255).astype(np.uint8)
|
31 |
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
|
32 |
|
33 |
def real_diffusion_add_noise(image, steps):
|
|
|
|
|
|
|
34 |
base_image = image.astype(np.float32) / 255.0
|
35 |
max_noise_std = 0.8 # Maximum noise level to reach
|
36 |
|
|
|
40 |
current_noise_std = max_noise_std * ((i + 1) / steps)
|
41 |
noise = np.random.normal(0, current_noise_std, base_image.shape)
|
42 |
noisy_step = base_image + noise
|
43 |
+
noisy_step = normalize(noisy_step)
|
44 |
yield (noisy_step * 255).astype(np.uint8)
|
45 |
# Yield the most noisy version as the final step
|
46 |
final_noise = np.random.normal(0, max_noise_std, base_image.shape)
|
47 |
+
final_noisy_image = normalize(base_image + final_noise)
|
48 |
yield (final_noisy_image * 255).astype(np.uint8)
|
49 |
|
50 |
|
51 |
def flow_matching_denoise(image, steps):
|
|
|
|
|
|
|
52 |
original_image = image.astype(np.float32) / 255.0
|
53 |
# Start with a significantly noisy image
|
54 |
very_noisy_image = original_image + np.random.normal(0, 1.0, original_image.shape) # High initial noise
|
55 |
+
very_noisy_image = normalize(very_noisy_image)
|
56 |
|
57 |
for i in range(steps):
|
58 |
time.sleep(0.2)
|
|
|
63 |
flow_progress = 1 / (1 + np.exp(-sigmoid_input))
|
64 |
|
65 |
denoised_step = (1 - flow_progress) * very_noisy_image + flow_progress * original_image
|
66 |
+
denoised_step = normalize(denoised_step)
|
67 |
yield (denoised_step * 255).astype(np.uint8)
|
68 |
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
|
69 |
|
70 |
# Main processing function that routes to different methods
|
71 |
def process_image_selected_method(method_selection, input_image, num_steps):
|
72 |
if input_image is None:
|
|
|
|
|
|
|
73 |
yield np.zeros((200, 200, 3), dtype=np.uint8)
|
74 |
return
|
75 |
|
|
|
80 |
elif method_selection == "Flow Matching (Denoise)":
|
81 |
yield from flow_matching_denoise(input_image, num_steps)
|
82 |
else:
|
|
|
83 |
yield input_image
|
84 |
|
85 |
|
86 |
method_choices = ["Fake Diffusion (Denoise)", "Real Diffusion (Add Noise)", "Flow Matching (Denoise)"]
|
87 |
|
88 |
+
with gr.Blocks() as demo:
|
89 |
+
gr.Markdown("# Diffusion Processing Demo")
|
90 |
+
gr.Markdown("Select a method: 'Fake Diffusion (Denoise)' and 'Flow Matching (Denoise)' will denoise an image. 'Real Diffusion (Add Noise)' will progressively add noise to the image. Adjust steps for granularity.")
|
91 |
+
|
92 |
+
with gr.Row():
|
93 |
+
method_selection = gr.Dropdown(choices=method_choices, label="Select Method", value="Fake Diffusion (Denoise)")
|
94 |
+
num_steps = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Processing Steps")
|
95 |
+
|
96 |
+
with gr.Row():
|
97 |
+
input_image = gr.Image(type="numpy", label="Input Image", value="https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg")
|
98 |
+
output_image = gr.Image(type="numpy", label="Processed Image")
|
99 |
+
|
100 |
+
process_button = gr.Button("Process Image")
|
101 |
+
process_button.click(
|
102 |
+
fn=process_image_selected_method,
|
103 |
+
inputs=[method_selection, input_image, num_steps],
|
104 |
+
outputs=output_image
|
105 |
+
)
|
106 |
|
107 |
# define queue - required for generators
|
108 |
demo.queue()
|