import gradio as gr
import cv2
import numpy as np
from PIL import Image

def exposure_fusion(image_paths):
    try:
        # Open images from filepaths and convert to OpenCV format (BGR)
        images_cv = [cv2.cvtColor(np.array(Image.open(path)), cv2.COLOR_RGB2BGR) for path in image_paths]
        
        # Align images using AlignMTB
        align_mtb = cv2.createAlignMTB()
        aligned_images = images_cv.copy()
        align_mtb.process(images_cv, aligned_images)
        
        # Merge images using exposure fusion (Mertens)
        merge_mertens = cv2.createMergeMertens()
        fused = merge_mertens.process(aligned_images)
        
        # Convert result from float32 to uint8 and back to RGB
        fused = np.clip(fused * 255, 0, 255).astype('uint8')
        fused = cv2.cvtColor(fused, cv2.COLOR_BGR2RGB)
        return fused
    except Exception as e:
        return f"Error: {e}"

def stabilize_crop_and_exposure_fusion(image_paths):
    try:
        # Open images from filepaths and convert to OpenCV format (BGR)
        images_cv = [cv2.cvtColor(np.array(Image.open(path)), cv2.COLOR_RGB2BGR) for path in image_paths]

        # Align images using AlignMTB
        align_mtb = cv2.createAlignMTB()
        aligned_images = images_cv.copy()
        align_mtb.process(images_cv, aligned_images)

        # Determine valid regions in each image (to remove black borders)
        bounding_rects = []
        for img in aligned_images:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            _, mask = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
            coords = cv2.findNonZero(mask)
            if coords is not None:
                x, y, w, h = cv2.boundingRect(coords)
                bounding_rects.append((x, y, w, h))
            else:
                bounding_rects.append((0, 0, img.shape[1], img.shape[0]))

        # Compute the common intersection rectangle
        if not bounding_rects:
            return "No valid images provided."
        x_min, y_min, w, h = bounding_rects[0]
        x_max = x_min + w
        y_max = y_min + h
        for (x, y, w, h) in bounding_rects[1:]:
            x_min = max(x_min, x)
            y_min = max(y_min, y)
            x_max = min(x_max, x + w)
            y_max = min(y_max, y + h)
        if x_max <= x_min or y_max <= y_min:
            return "Images do not overlap enough for cropping."

        # Crop each aligned image to the intersection region
        cropped_images = [img[y_min:y_max, x_min:x_max] for img in aligned_images]

        # Merge the cropped images using exposure fusion (Mertens)
        merge_mertens = cv2.createMergeMertens()
        fused = merge_mertens.process(cropped_images)
        fused = np.clip(fused * 255, 0, 255).astype('uint8')
        fused = cv2.cvtColor(fused, cv2.COLOR_BGR2RGB)
        return fused
    except Exception as e:
        return f"Error: {e}"

def process_images(image_paths, advanced):
    if not image_paths:
        return None
    if advanced:
        return stabilize_crop_and_exposure_fusion(image_paths)
    else:
        return exposure_fusion(image_paths)

# Gradio Interface: Upload multiple images and choose the processing method.
inputs = [
    gr.File(type="filepath", label="Upload Images", file_count="multiple"),
    gr.Checkbox(label="Advanced: Stabilize & Crop Before Fusion", value=False)
]

iface = gr.Interface(
    fn=process_images,
    inputs=inputs,
    outputs="image",
    title="Exposure Fusion with Stabilization",
    description=(
        "Upload multiple images with varying exposures. "
        "If 'Advanced: Stabilize & Crop Before Fusion' is selected, "
        "the app aligns the images, crops out extra borders, then fuses them."
    ),
)

iface.launch()