new-spacev2 / app.py
Vaibhavnaik12's picture
initial commit
3adca77 verified
import gradio as gr
import cv2
import numpy as np
# Function to compare two images and highlight differences
def compare_images(mockup, ui_screenshot, check_text, check_color, check_spacing):
# Convert images to numpy arrays
mockup_array = np.array(mockup)
ui_screenshot_array = np.array(ui_screenshot)
# Resize images to the same dimensions
if mockup_array.shape != ui_screenshot_array.shape:
height, width = max(mockup_array.shape[0], ui_screenshot_array.shape[0]), max(mockup_array.shape[1], ui_screenshot_array.shape[1])
mockup_array = cv2.resize(mockup_array, (width, height))
ui_screenshot_array = cv2.resize(ui_screenshot_array, (width, height))
# Convert images to grayscale
mockup_gray = cv2.cvtColor(mockup_array, cv2.COLOR_RGB2GRAY)
ui_screenshot_gray = cv2.cvtColor(ui_screenshot_array, cv2.COLOR_RGB2GRAY)
# Compute the absolute difference between the two images
difference = cv2.absdiff(mockup_gray, ui_screenshot_gray)
# Threshold the difference image to get a binary image
_, thresh = cv2.threshold(difference, 30, 255, cv2.THRESH_BINARY)
# Find contours of the differences
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Create a copy of the original image to draw the differences
highlighted_image = mockup_array.copy()
# Draw red circles around the differences
for contour in contours:
if cv2.contourArea(contour) > 100: # Filter out small differences
x, y, w, h = cv2.boundingRect(contour)
cv2.circle(highlighted_image, (x + w // 2, y + h // 2), 10, (255, 0, 0), -1)
# Convert the highlighted image back to RGB for display
highlighted_image_rgb = cv2.cvtColor(highlighted_image, cv2.COLOR_BGR2RGB)
# Generate a report based on the selected options
report = "Comparison Report:\n"
if check_text:
report += "Text Differences: This feature requires advanced text recognition.\n"
if check_color:
report += "Color Differences: This feature requires color analysis.\n"
if check_spacing:
report += "Spacing Differences: This feature requires layout analysis.\n"
return highlighted_image_rgb, report
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("## Welcome to the UI Difference Spotter!")
gr.Markdown("Please upload the design mockup and the developed UI screenshot you want to compare.")
with gr.Row():
mockup_upload = gr.Image(label="Upload Design Mockup", type="pil")
ui_screenshot_upload = gr.Image(label="Upload Developed UI Screenshot", type="pil")
with gr.Row():
check_text = gr.Checkbox(label="Check Text Differences", value=True)
check_color = gr.Checkbox(label="Check Color Differences", value=True)
check_spacing = gr.Checkbox(label="Check Spacing Differences", value=True)
with gr.Row():
compare_button = gr.Button("Compare Images")
with gr.Row():
highlighted_image = gr.Image(label="Highlighted Differences")
report_output = gr.Textbox(label="Comparison Details", interactive=False)
# Define the event listener for the compare button
compare_button.click(
fn=compare_images,
inputs=[mockup_upload, ui_screenshot_upload, check_text, check_color, check_spacing],
outputs=[highlighted_image, report_output]
)
# Launch the Gradio app
if __name__ == "__main__":
demo.launch(show_error=True)