linkdom
modify the clip function, maybe just clip is the best
4512358
import gradio as gr
import numpy as np
import time
def normalize(image):
# different methods to map image values to [0, 1]
# scale to [0, 1] using min-max normalization
# image = (image - np.min(image, keepdims=True)) / (np.max(image, keepdims=True) - np.min(image, keepdims=True))
# standardize to zero mean and unit variance, then scale to [0, 1]
# image = (image - np.mean(image, keepdims=True)) / (np.std(image, keepdims=True) + 1e-8) # Avoid division by zero
# image = (image + 1) / 2 # Scale to [0, 1]
# just clip to [0, 1]
return np.clip(image, 0, 1)
# Renamed for clarity and consistency
def fake_diffusion_denoise(image, steps):
original_image = image.astype(np.float32) / 255.0
# Add initial noise
noisy_start_image = original_image + np.random.normal(0, 0.7, original_image.shape)
noisy_start_image = normalize(noisy_start_image)
for i in range(steps):
time.sleep(0.2)
# Simulate denoising: gradually revert to the original image (linear progress)
progress = (i + 1) / steps
denoised_step = (1 - progress) * noisy_start_image + progress * original_image
denoised_step = normalize(denoised_step)
yield (denoised_step * 255).astype(np.uint8)
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
def real_diffusion_add_noise(image, steps):
base_image = image.astype(np.float32) / 255.0
max_noise_std = 0.8 # Maximum noise level to reach
for i in range(steps):
time.sleep(0.2)
# Increase noise progressively
current_noise_std = max_noise_std * ((i + 1) / steps)
noise = np.random.normal(0, current_noise_std, base_image.shape)
noisy_step = base_image + noise
noisy_step = normalize(noisy_step)
yield (noisy_step * 255).astype(np.uint8)
# Yield the most noisy version as the final step
final_noise = np.random.normal(0, max_noise_std, base_image.shape)
final_noisy_image = normalize(base_image + final_noise)
yield (final_noisy_image * 255).astype(np.uint8)
def flow_matching_denoise(image, steps):
original_image = image.astype(np.float32) / 255.0
# Start with a significantly noisy image
very_noisy_image = original_image + np.random.normal(0, 1.0, original_image.shape) # High initial noise
very_noisy_image = normalize(very_noisy_image)
for i in range(steps):
time.sleep(0.2)
# Non-linear progress using a sigmoid-like curve for smoother transition
p_norm = (i + 1) / steps # Normalized progress 0 to 1
# Transform p_norm to a range like -5 to 5 for sigmoid
sigmoid_input = 10 * (p_norm - 0.5)
flow_progress = 1 / (1 + np.exp(-sigmoid_input))
denoised_step = (1 - flow_progress) * very_noisy_image + flow_progress * original_image
denoised_step = normalize(denoised_step)
yield (denoised_step * 255).astype(np.uint8)
yield (original_image * 255).astype(np.uint8) # Ensure final image is clean
# Main processing function that routes to different methods
def process_image_selected_method(method_selection, input_image, num_steps):
if input_image is None:
yield np.zeros((200, 200, 3), dtype=np.uint8)
return
if method_selection == "Fake Diffusion (Denoise)":
yield from fake_diffusion_denoise(input_image, num_steps)
elif method_selection == "Real Diffusion (Add Noise)":
yield from real_diffusion_add_noise(input_image, num_steps)
elif method_selection == "Flow Matching (Denoise)":
yield from flow_matching_denoise(input_image, num_steps)
else:
yield input_image
method_choices = ["Fake Diffusion (Denoise)", "Real Diffusion (Add Noise)", "Flow Matching (Denoise)"]
with gr.Blocks() as demo:
gr.Markdown("# Diffusion Processing Demo")
gr.Markdown("Select a method: 'Fake Diffusion (Denoise)' and 'Flow Matching (Denoise)' will denoise an image. 'Real Diffusion (Add Noise)' will progressively add noise to the image. Adjust steps for granularity.")
with gr.Row():
method_selection = gr.Dropdown(choices=method_choices, label="Select Method", value="Fake Diffusion (Denoise)")
num_steps = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Processing Steps")
with gr.Row():
input_image = gr.Image(type="numpy", label="Input Image", value="https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg")
output_image = gr.Image(type="numpy", label="Processed Image")
process_button = gr.Button("Process Image")
process_button.click(
fn=process_image_selected_method,
inputs=[method_selection, input_image, num_steps],
outputs=output_image
)
# define queue - required for generators
demo.queue()
demo.launch()