File size: 23,313 Bytes
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
c2b98b5
3d22019
 
 
c2b98b5
3d22019
 
 
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
 
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
 
3d22019
 
 
 
 
 
 
 
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
 
c2b98b5
 
3d22019
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
c2b98b5
 
3d22019
 
 
 
 
 
 
 
c2b98b5
 
 
 
 
 
3d22019
 
 
 
c2b98b5
 
 
3d22019
 
 
 
 
 
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
 
 
 
 
 
c2b98b5
 
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
 
 
c2b98b5
3d22019
 
 
 
 
 
c2b98b5
 
 
 
 
 
 
 
 
3d22019
 
 
 
 
c2b98b5
 
3d22019
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
 
 
3d22019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b98b5
 
3d22019
 
 
 
 
c2b98b5
 
3d22019
 
 
 
 
c2b98b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d22019
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
 
 
 
c2b98b5
3d22019
 
 
 
 
 
c2b98b5
3d22019
 
c2b98b5
 
 
 
 
 
3d22019
c2b98b5
3d22019
c2b98b5
3d22019
c2b98b5
 
 
3d22019
 
c2b98b5
3d22019
c2b98b5
3d22019
 
c2b98b5
 
 
3d22019
 
 
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
c2b98b5
3d22019
 
c2b98b5
3d22019
 
 
c2b98b5
 
3d22019
c2b98b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d22019
 
c2b98b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d22019
c2b98b5
 
3d22019
 
c2b98b5
 
 
 
 
 
 
 
 
 
 
 
 
3d22019
 
 
c2b98b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d22019
c2b98b5
 
 
 
 
 
 
 
 
 
3d22019
 
 
c2b98b5
3d22019
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
c2b98b5
3d22019
 
c2b98b5
3d22019
c2b98b5
3d22019
 
 
 
 
 
 
 
 
 
c2b98b5
3d22019
c2b98b5
 
 
 
 
 
 
 
 
3d22019
 
 
 
 
 
 
 
 
c2b98b5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
# Gradio Interface for Optical Illusion Predictor

import gradio as gr
import numpy as np
import pandas as pd
import joblib
import matplotlib.pyplot as plt
from PIL import Image
import io
import os

# Define the image folder path
image_folder = "Optical Illusion Images"

# Define the enriched folder path
enriched_folder = 'Optical Illusion Enriched Data'
master_df = pd.read_csv(f'{enriched_folder}/combined_engineered_data.csv')

# Define the trained models folder path
trained_models_folder = 'Optical Illusion - Trained Models'

# Constants for image dimensions
DISPLAY_WIDTH = 1920
DISPLAY_HEIGHT = 1080

# Image descriptions for better user understanding
IMAGE_DESCRIPTIONS = {
    'duck-rabbit': 'A classic ambiguous figure that can be seen as either a duck or a rabbit',
    'face-vase': 'The famous Rubin\'s vase - you might see two faces in profile or a vase',
    'young-old': 'This image can appear as either a young woman or an old woman',
    'princess-oldMan': 'Can be perceived as either a princess or an old man',
    'lily-woman': 'This ambiguous image shows either a lily flower or a woman',
    'tiger-monkey': 'You might see either a tiger or a monkey in this image'
}

# Load all saved models at startup
def load_all_models():
    """Load all saved models into memory"""
    models = {}
    for image_name in master_df['image_type'].unique():
        try:
            model_path = f'{trained_models_folder}/{image_name}_models.pkl'
            models[image_name] = joblib.load(model_path)
            print(f"βœ“ Loaded model for {image_name}")
        except:
            print(f"βœ— Could not load model for {image_name}")
    return models

# Load models
all_models = load_all_models()

# Function to load and resize images
def load_illusion_images(image_folder):
    """Load optical illusion images from a folder and resize to 1920x1080"""
    images = {}
    for image_name in all_models.keys():
        image_path = f'{image_folder}/{image_name}.png'
        if os.path.exists(image_path):
            # Load and resize image to 1920x1080
            img = Image.open(image_path)
            img_resized = img.resize((DISPLAY_WIDTH, DISPLAY_HEIGHT), Image.Resampling.LANCZOS)
            images[image_name] = img_resized
            print(f"βœ“ Loaded and resized image for {image_name}")
        else:
            print(f"βœ— Image not found for {image_name} at {image_path}")
    return images

# Load images
illusion_images = load_illusion_images(image_folder)

# Create placeholder image with correct dimensions
def create_placeholder_image(image_name):
    """Create a placeholder image with the correct dimensions"""
    fig, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)
    
    # Handle None or empty image_name
    if image_name is None:
        display_text = 'πŸ–ΌοΈ NO IMAGE SELECTED\n\nπŸ‘† Select an image from the dropdown above'
    else:
        display_text = f'πŸ–ΌοΈ {image_name.upper()}\n\nπŸ‘† Click where you first look\n\n⚠️ (Image not found)'
    
    ax.text(0.5, 0.5, display_text,
           transform=ax.transAxes, ha='center', va='center',
           fontsize=28, fontweight='bold', color='#666666')
    ax.set_xlim(0, DISPLAY_WIDTH)
    ax.set_ylim(0, DISPLAY_HEIGHT)
    ax.axis('off')
    ax.set_facecolor('#f8f9fa')  # Light gray background for placeholder

    buf = io.BytesIO()
    plt.savefig(buf, format='png', dpi=100, bbox_inches='tight', pad_inches=0)
    buf.seek(0)
    plt.close()

    img = Image.open(buf)
    # Ensure it's exactly 1920x1080
    img_resized = img.resize((DISPLAY_WIDTH, DISPLAY_HEIGHT), Image.Resampling.LANCZOS)
    return img_resized

def process_click(image_name, model_type, evt: gr.SelectData):
    """Process click on image and return prediction"""

    if evt is None:
        return "❗ Please click on the image where you first looked!", None, None
    
    if image_name is None:
        return "❗ Please select an image first!", None, None

    # Get click coordinates (Gradio provides them in image coordinates)
    click_x_img, click_y_img = evt.index

    # Convert to normalized coordinates
    # Gradio coordinates: (0,0) is top-left
    # Our coordinates: (0,0) is center
    # x range: -960 to 960, y range: -540 to 540
    click_x_norm = click_x_img - (DISPLAY_WIDTH / 2)    # Convert to -960 to 960
    click_y_norm = (DISPLAY_HEIGHT / 2) - click_y_img   # INVERTED: Convert to -540 to 540

    # Get model data
    if image_name not in all_models:
        return f"❗ No model found for {image_name}", None, None

    model_data = all_models[image_name]

    # Calculate features
    centroid_left = np.array([model_data['centroid_left_x'], model_data['centroid_left_y']])
    centroid_right = np.array([model_data['centroid_right_x'], model_data['centroid_right_y']])
    fixation = np.array([click_x_norm, click_y_norm])

    dist_left = np.linalg.norm(fixation - centroid_left)
    dist_right = np.linalg.norm(fixation - centroid_right)
    bias = dist_right - dist_left

    # Make prediction
    X = pd.DataFrame([[dist_left, dist_right, bias]], 
                    columns=['dist_to_left', 'dist_to_right', 'bias_to_left'])
    model = model_data[f'{model_type}_model']
    prediction = model.predict(X)[0]
    probability = model.predict_proba(X)[0]

    # Decode prediction
    predicted_class = model_data['label_classes'][prediction]
    confidence = probability[prediction]

    # Create confidence level description
    if confidence >= 0.8:
        confidence_level = "Very High 🟒"
    elif confidence >= 0.65:
        confidence_level = "High 🟑"
    elif confidence >= 0.5:
        confidence_level = "Moderate 🟠"
    else:
        confidence_level = "Low πŸ”΄"

    # Create detailed message
    message = f"""
    <div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1.5rem; border-radius: 10px; color: white; margin: 0.5rem 0;">
    <h2 style="color: white; margin-top: 0;">πŸ” Prediction Results</h2>

    <p><strong>πŸ‘† Click Location:</strong> ({click_x_img}, {click_y_img}) pixels from top-left<br>
    <strong>🎯 Normalized Position:</strong> ({click_x_norm:.1f}, {click_y_norm:.1f}) from center</p>
    
    <hr style="border-color: rgba(255,255,255,0.3);">
    
    <p><strong>πŸ“ Distance to Left Region:</strong> {dist_left:.1f} pixels<br>
    <strong>πŸ“ Distance to Right Region:</strong> {dist_right:.1f} pixels<br>
    <strong>βš–οΈ Bias Score:</strong> {bias:.1f}</p>
    
    <hr style="border-color: rgba(255,255,255,0.3);">

    <h3 style="color: white;">🧠 Prediction: You likely see the {predicted_class.upper()} interpretation</h3>
    <h3 style="color: white;">πŸ“Š Confidence: {confidence:.1%} ({confidence_level})</h3>
    """

    # Create visualization
    viz = create_visualization(image_name, click_x_norm, click_y_norm,
                             predicted_class, confidence, model_type)

    # Get example interpretations
    interpretations = {
        'duck-rabbit': {'left': 'Duck πŸ¦†', 'right': 'Rabbit 🐰'},
        'face-vase': {'left': 'Two Faces πŸ‘₯', 'right': 'Vase 🏺'},
        'young-old': {'left': 'Young Woman πŸ‘©', 'right': 'Old Woman πŸ‘΅'},
        'princess-oldMan': {'left': 'Princess πŸ‘Έ', 'right': 'Old Man πŸ‘΄'},
        'lily-woman': {'left': 'Lily 🌸', 'right': 'Woman πŸ‘©'},
        'tiger-monkey': {'left': 'Tiger πŸ…', 'right': 'Monkey πŸ’'}
    }

    if image_name in interpretations:
        specific = interpretations[image_name][predicted_class]
        message += f"<p><strong>🎨 What you see:</strong> {specific}</p>"
    
    message += "</div>"

    return message, viz, create_stats_table(image_name, model_type)

def create_visualization(image_name, click_x, click_y, prediction, confidence, model_type='rf'):
    """Create a visualization showing the click point, centroids, and prediction"""

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6), facecolor='#f8f9fa')

    # Get model data
    model_data = all_models[image_name]
    centroid_left = np.array([model_data['centroid_left_x'], model_data['centroid_left_y']])
    centroid_right = np.array([model_data['centroid_right_x'], model_data['centroid_right_y']])

    # Left plot: Decision boundary with proper axis range
    resolution = 100
    x_range = np.linspace(-960, 960, resolution)  # Full x range
    y_range = np.linspace(-540, 540, resolution)  # Full y range
    xx, yy = np.meshgrid(x_range, y_range)

    # Calculate features for grid
    points = np.c_[xx.ravel(), yy.ravel()]
    features = []
    for point in points:
        dist_left = np.linalg.norm(point - centroid_left)
        dist_right = np.linalg.norm(point - centroid_right)
        bias = dist_right - dist_left
        features.append([dist_left, dist_right, bias])

    X = pd.DataFrame(features, columns=['dist_to_left', 'dist_to_right', 'bias_to_left'])
    model = model_data[f'{model_type}_model']
    Z = model.predict(X)
    Z = Z.reshape(xx.shape)

    # Plot decision boundary
    from matplotlib.colors import ListedColormap
    colors = ListedColormap(['#a8d5ff', '#ffb3b3'])  # Softer blue and red
    ax1.contourf(xx, yy, Z, alpha=0.7, cmap=colors)

    # Plot centroids
    ax1.scatter(centroid_left[0], centroid_left[1],
               c='blue', marker='*', s=500, edgecolors='black', label='Left centroid')
    ax1.scatter(centroid_right[0], centroid_right[1],
               c='red', marker='*', s=500, edgecolors='black', label='Right centroid')

    # Plot user's click
    ax1.scatter(click_x, click_y, c='green', marker='X', s=300,
               edgecolors='black', linewidth=2, label='Your fixation', zorder=10)

    # Draw lines to centroids
    ax1.plot([click_x, centroid_left[0]], [click_y, centroid_left[1]],
            'b--', alpha=0.5, linewidth=2)
    ax1.plot([click_x, centroid_right[0]], [click_y, centroid_right[1]],
            'r--', alpha=0.5, linewidth=2)

    ax1.set_xlabel('X (pixels from center)')
    ax1.set_ylabel('Y (pixels from center)')
    ax1.set_title(f'Decision Space - {model_type.upper()} Model')
    ax1.grid(True, alpha=0.3)
    ax1.legend(loc='upper right', framealpha=0.9)
    ax1.set_xlim(-960, 960)  # Full width range
    ax1.set_ylim(-540, 540)  # Full height range
    ax1.set_aspect('equal')
    ax1.set_facecolor('#f8f9fa')  # Light background

    # Right plot: Statistics
    image_df = master_df[master_df['image_type'] == image_name]

    # Create bar chart of choices
    choice_counts = image_df['choice'].value_counts()
    bars = ax2.bar(choice_counts.index, choice_counts.values,
           color=['#4b86db' if x == 'left' else '#db4b4b' for x in choice_counts.index])
    
    # Add values on top of bars
    for bar in bars:
        height = bar.get_height()
        ax2.text(bar.get_x() + bar.get_width()/2., height + 0.5,
                f'{height:.0f}',
                ha='center', va='bottom', fontsize=10)

    # Add prediction annotation
    ax2.text(0.5, 0.95, f'Your Predicted Choice: {prediction.upper()}',
            transform=ax2.transAxes, ha='center', va='top',
            fontsize=16, fontweight='bold',
            bbox=dict(boxstyle='round,pad=0.5', facecolor='#c2f0c2' if prediction == 'left' else '#f0c2c2', 
                     alpha=0.9, edgecolor='gray'))

    ax2.text(0.5, 0.85, f'Confidence: {confidence:.1%}',
            transform=ax2.transAxes, ha='center', va='top', fontsize=14)

    ax2.set_xlabel('Interpretation')
    ax2.set_ylabel('Number of Participants')
    ax2.set_title(f'Overall Distribution for {image_name}')

    # Add model accuracy info
    ax2.text(0.5, 0.05, f'Model CV Accuracy: {model_data[f"cv_accuracy_{model_type}"]:.1%}',
            transform=ax2.transAxes, ha='center', va='bottom', fontsize=12,
            style='italic', alpha=0.7)
    
    ax2.set_facecolor('#f8f9fa')  # Light background
    
    plt.tight_layout()

    # Convert plot to image
    buf = io.BytesIO()
    plt.savefig(buf, format='png', dpi=100, bbox_inches='tight')
    buf.seek(0)
    plt.close()

    return Image.open(buf)

def create_stats_table(image_name, model_type):
    """Create a statistics table for the selected image"""
    model_data = all_models[image_name]
    image_df = master_df[master_df['image_type'] == image_name]

    stats = {
        'Metric': ['πŸ‘₯ Total Participants', '⬅️ Left Choices', '➑️ Right Choices',
                   f'🎯 {model_type.upper()} Accuracy', 'βš–οΈ Class Balance', 'πŸ“Š Majority Choice'],
        'Value': [
            len(image_df),
            model_data['class_distribution'].get('left', 0),
            model_data['class_distribution'].get('right', 0),
            f"{model_data[f'cv_accuracy_{model_type}']:.1%}",
            f"{min(model_data['class_distribution'].values()) / len(image_df):.1%}",
            f"{image_df['choice'].mode()[0].title()} ({image_df['choice'].value_counts().max()}/{len(image_df)})"
        ]
    }

    return pd.DataFrame(stats)

# Custom CSS for better styling
css = """
.gradio-container {
    font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}

.main-header {
    text-align: center;
    margin-bottom: 2rem;
    padding: 1.5rem;
    background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
    border-radius: 15px;
    color: white;
    box-shadow: 0 4px 15px rgba(0,0,0,0.1);
}

.instruction-box {
    background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
    padding: 1rem;
    border-radius: 10px;
    color: white;
    margin: 1rem 0;
}

.stats-highlight {
    background-color: #f8f9fa;
    border-left: 4px solid #007bff;
    padding: 1rem;
    margin: 0.5rem 0;
}
"""

# Create Gradio Interface
with gr.Blocks(title="🧠 Optical Illusion First Fixation Predictor", 
               theme=gr.themes.Soft(), css=css) as demo:
    
    gr.HTML("""
    <div class="main-header">
        <h1>🧠 Optical Illusion First Fixation Predictor</h1>
        <h3>Can we predict what you see based on where you look?</h3>
        <p>This AI-powered tool analyzes your first fixation point to predict which interpretation of an ambiguous image you'll perceive!</p>
    </div>
    """)

    with gr.Row():
        with gr.Column(scale=2):
            # Image selection with description
            available_images = list(all_models.keys()) if all_models else []
            default_image = available_images[0] if available_images else None
            
            image_choice = gr.Dropdown(
                choices=available_images,
                value=default_image,
                label="πŸ–ΌοΈ Select Optical Illusion",
                info="Choose which ambiguous image to analyze"
            )
            
            # Display image description
            image_description = gr.Markdown(
                value=IMAGE_DESCRIPTIONS.get(default_image, "Select an image to see its description.") if default_image else "Select an image to see its description.",
                label="πŸ“– Image Description"
            )

            # Model selection with enhanced info
            model_type = gr.Radio(
                choices=[("Random Forest (Recommended)", "rf"), ("Logistic Regression", "lr")],
                value="rf",
                label="πŸ” Prediction Model",
                info="Random Forest typically provides better accuracy for this task",
                container=True
            )

            # Display image with better styling
            image_display = gr.Image(
                label="πŸ‘† Click where your eyes first landed on the image",
                interactive=True,
                type="pil",
                height=540,  # Reduced for better mobile compatibility
                width=960,
                elem_classes="main-image"
            )

        with gr.Column(scale=1):
            # Results section with enhanced styling
            prediction_output = gr.Markdown(
                label="🧠 Prediction Results",
                value="""<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 10px; color: white;">
                <strong>πŸ‘† Click on the image to get your prediction!</strong><br><br>
                The AI will analyze where you looked first and predict what you're likely to see.
                </div>""",
                elem_classes="stats-highlight"
            )
            stats_table = gr.DataFrame(label="πŸ“Š Image Statistics")

    # Visualization output with better layout
    with gr.Row():
        visualization_output = gr.Image(
            label="πŸ“ˆ Analysis Visualization",
            type="pil"
        )

    # Enhanced information sections
    with gr.Accordion("ℹ️ How It Works", open=False):
        gr.Markdown("""
        ### πŸ€– The Science Behind the Prediction
        
        **🎯 Feature Extraction:**
        - We calculate the distance from your click point to the centroid of each interpretation region
        - A "bias score" measures which region you're closer to
        
        **🧠 Machine Learning Models:**
        - **Random Forest:** Uses multiple decision trees for robust predictions
        - **Logistic Regression:** A linear approach that's fast and interpretable
        
        **πŸ“Š Training Process:**
        - Trained on eye-tracking data from multiple participants
        - Uses Leave-One-Participant-Out Cross-Validation for unbiased evaluation
        - Ensures the model generalizes to new users
        
        **🎨 Coordinate System:**
        - Center of image = (0, 0)
        - X-axis: -960 to +960 pixels (left to right)
        - Y-axis: -540 to +540 pixels (bottom to top)
        """)

    with gr.Accordion("πŸ“Š Model Performance", open=False):
        if all_models:
            summary_data = []
            for img_name, model_data in all_models.items():
                summary_data.append({
                    'Image': img_name.replace('-', ' ').title(),
                    'RF Accuracy': f"{model_data['cv_accuracy_rf']:.1%}",
                    'LR Accuracy': f"{model_data['cv_accuracy_lr']:.1%}",
                    'Participants': model_data['total_samples'],
                    'Best Model': 'RF' if model_data['cv_accuracy_rf'] > model_data['cv_accuracy_lr'] else 'LR'
                })

            gr.DataFrame(
                value=pd.DataFrame(summary_data),
                label="Cross-Validation Performance Summary"
            )

    # Function to update image and description
    def update_image_and_description(image_name):
        # Handle None case
        if image_name is None:
            empty_stats = pd.DataFrame({
                'Metric': ['Select an image to see statistics'],
                'Value': ['']
            })
            return (create_placeholder_image(None), 
                   "Select an image to see its description.",
                   """<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 10px; color: white;">
                   <strong>πŸ‘† Please select an image first!</strong>
                   </div>""", 
                   empty_stats)
        
        # Update description
        description = IMAGE_DESCRIPTIONS.get(image_name, "Description not available.")
        
        # Use real images if available, otherwise use placeholder
        if image_name in illusion_images:
            # Create initial stats table with proper data
            model_data = all_models[image_name]
            image_df = master_df[master_df['image_type'] == image_name]
            
            stats = {
                'Metric': ['πŸ‘₯ Total Participants', '⬅️ Left Choices', '➑️ Right Choices',
                          '🎯 RF Accuracy', 'βš–οΈ Class Balance', 'πŸ“Š Majority Choice'],
                'Value': [
                    len(image_df),
                    model_data['class_distribution'].get('left', 0),
                    model_data['class_distribution'].get('right', 0),
                    f"{model_data['cv_accuracy_rf']:.1%}",
                    f"{min(model_data['class_distribution'].values()) / len(image_df):.1%}",
                    f"{image_df['choice'].mode()[0].title()} ({image_df['choice'].value_counts().max()}/{len(image_df)})"
                ]
            }
            return (illusion_images[image_name], 
                   f"**{description}**", 
                   """<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 10px; color: white;">
                   <strong>πŸ‘† Click on the image to get your prediction!</strong><br><br>
                   The AI will analyze where you looked first and predict what you're likely to see.
                   </div>""", 
                   pd.DataFrame(stats))
        else:
            empty_stats = pd.DataFrame({
                'Metric': ['Image not found'],
                'Value': ['']
            })
            return (create_placeholder_image(image_name), 
                   f"**{description}**", 
                   """<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 10px; color: white;">
                   <strong>⚠️ Image file not found!</strong>
                   </div>""", 
                   empty_stats)

    # Connect events
    image_choice.change(
        fn=update_image_and_description,
        inputs=[image_choice],
        outputs=[image_display, image_description, prediction_output, stats_table]
    )

    # Handle click event
    image_display.select(
        fn=process_click,
        inputs=[image_choice, model_type],
        outputs=[prediction_output, visualization_output, stats_table]
    )

    # Load initial image
    demo.load(
        fn=update_image_and_description,
        inputs=[image_choice],
        outputs=[image_display, image_description, prediction_output, stats_table]
    )

    # Enhanced examples section
    if available_images:
        gr.Markdown("## πŸ“Œ Quick Examples")
        with gr.Row():
            example_list = []
            for img in ["duck-rabbit", "face-vase", "young-old", "tiger-monkey"]:
                if img in available_images:
                    example_list.append([img, "rf"])
            
            if example_list:
                gr.Examples(
                    examples=example_list,
                    inputs=[image_choice, model_type],
                    label="Try these popular illusions"
                )
                
    # Enhanced footer
    gr.HTML("""
    <div style="text-align: center; margin-top: 2rem; padding: 1.5rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white;">
        <h4>πŸ”¬ WID2003 Cognitive Science Group Assignment - OCC 2 Group 2</h4>
        <p><strong>Universiti Malaya</strong> | 2025</p>
        <p style="font-size: 0.9em; opacity: 0.8;">Vote for Us!</p>
    </div>
    """)

# Debug info
print(f"\nImage folder: {image_folder}")
print(f"Images loaded: {list(illusion_images.keys())}")
print(f"Models loaded: {list(all_models.keys())}")
print(f"Image dimensions: {DISPLAY_WIDTH}x{DISPLAY_HEIGHT}")

# Launch the app
if __name__ == "__main__":
    demo.launch(
        # share=True,
        # debug=True
    )