import gradio as gr from keras.models import load_model from PIL import Image, ImageOps import numpy as np import time import json np.set_printoptions(suppress=True) class AIVisionSystem: def __init__(self, model_path="keras_model.h5", labels_path="labels.txt"): try: # Load the model self.model = load_model(model_path, compile=False) # Load the labels with open(labels_path, "r", encoding="utf-8") as f: self.class_names = f.readlines() print(self.class_names) self.model_loaded = True except Exception as e: print(f"❌ Model loading failed: {e}") self.model_loaded = False self.class_names = [] def preprocess_image(self, image): if image is None: return None image = ImageOps.fit(image.convert("RGB"), (224, 224), Image.Resampling.LANCZOS) image_array = np.asarray(image) return np.expand_dims(image_array, axis=0) def predict(self, image): if not self.model_loaded: fake_predictions = np.random.rand(len(self.class_names)) fake_predictions = fake_predictions / fake_predictions.sum() # Normalize return fake_predictions processed_image = self.preprocess_image(image) if processed_image is None: return None prediction = self.model.predict(processed_image, verbose=0) print(prediction) return prediction[0] def analyze_image(self, image): if image is None: return { "status": "❌ No image detected", "prediction": "", "confidence": 0, "all_predictions": {}, "processing_time": 0 } # Start timing start_time = time.time() # Perform prediction predictions = self.predict(image) if predictions is None: return { "status": "❌ Identification failed", "prediction": "", "confidence": 0, "all_predictions": {}, "processing_time": 0 } # Calculate processing time processing_time = time.time() - start_time # Find the prediction with the highest confidence max_index = np.argmax(predictions) max_confidence = predictions[max_index] predicted_class = self.class_names[max_index].strip() # Clean up class name if len(predicted_class.split(' ', 1)) > 1: class_name = predicted_class.split(' ', 1)[1] else: class_name = predicted_class # Prepare all prediction results all_predictions = {} for i, (class_line, confidence) in enumerate(zip(self.class_names, predictions)): clean_name = class_line.strip() if len(clean_name.split(' ', 1)) > 1: clean_name = clean_name.split(' ', 1)[1] all_predictions[clean_name] = float(confidence) print(f"{clean_name}: {confidence}") return { "status": "✅ Analysis complete", "prediction": class_name, "confidence": float(max_confidence), "all_predictions": all_predictions, "processing_time": processing_time } def process_image(image): result = client.analyze_image(image) # Format the result display if result["confidence"] > 0: status_text = f""" 🔍 **AI Analysis Report** **Status**: {result["status"]}
**Prediction**: `{result["prediction"]}`
**Confidence**: `{result["confidence"]:.2%}`
**Processing Time**: `{result["processing_time"]:.3f}s` --- **📊 Detailed Analysis Results:** """ # Add all prediction results sorted_predictions = sorted(result["all_predictions"].items(), key=lambda x: x[1], reverse=True) for class_name, confidence in sorted_predictions: bar_length = int(confidence * 20) # 20 character width progress bar bar = "█" * bar_length + "░" * (20 - bar_length) status_text += f"
`{class_name}`: {bar} `{confidence:.1%}`" # Prepare Gradio label format gradio_labels = {name: conf for name, conf in result["all_predictions"].items()} else: status_text = result["status"] gradio_labels = {} return status_text, gradio_labels # Custom CSS styles custom_css = """ /* Main body background */ .gradio-container { background: linear-gradient(135deg, #0c0c0c 0%, #1a1a2e 50%, #16213e 100%) !important; color: #ffffff !important; font-family: 'IBM Plex Mono', monospace !important; } .gradio-container hr { margin: 0 !important; border-color: #8000ff !important; } /* Title style */ .main-header { text-align: center; background: linear-gradient(45deg, #00f5ff, #0080ff, #8000ff); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; font-size: 3em !important; font-weight: bold !important; text-shadow: 0 0 30px rgba(0, 245, 255, 0.5); margin: 20px 0 !important; animation: glow 2s ease-in-out infinite alternate; } @keyframes glow { from { filter: drop-shadow(0 0 20px #00f5ff); } to { filter: drop_shadow(0 0 30px #8000ff); } } /* Subtitle */ .sub-header { text-align: center; color: #00f5ff !important; font-size: 1.2em !important; margin-bottom: 30px !important; opacity: 0.8; } /* Input area */ .input-section { background: rgba(0, 245, 255, 0.1) !important; border: 2px solid rgba(0, 245, 255, 0.3) !important; border-radius: 15px !important; padding: 20px !important; box-shadow: 0 0 25px rgba(0, 245, 255, 0.2) !important; } /* Output area */ .output-section { background: rgba(128, 0, 255, 0.1) !important; border: 2px solid rgba(128, 0, 255, 0.3) !important; border-radius: 15px !important; padding: 20px !important; box-shadow: 0 0 25px rgba(128, 0, 255, 0.2) !important; } /* Button style */ .gr-button { background: linear-gradient(45deg, #00f5ff, #8000ff) !important; border: none !important; color: white !important; font-weight: bold !important; border-radius: 25px !important; box-shadow: 0 4px 15px rgba(0, 245, 255, 0.3) !important; transition: all 0.3s ease !important; } .gr-button:hover { transform: translateY(-2px) !important; box-shadow: 0 6px 20px rgba(128, 0, 255, 0.4) !important; } /* Progress bar and labels */ .gr-label { color: #00f5ff !important; font-weight: bold !important; } /* Input box and text area */ .gr-textbox, .gr-markdown { background: rgba(0, 0, 0, 0.5) !important; border: 1px solid rgba(0, 245, 255, 0.3) !important; color: #ffffff !important; border-radius: 10px !important; } /* Image preview */ .gr-image { border: 2px solid rgba(0, 245, 255, 0.3) !important; border-radius: 15px !important; box-shadow: 0 0 20px rgba(0, 245, 255, 0.2) !important; } /* Label display */ .gr-label-list { background: rgba(0, 0, 0, 0.7) !important; border-radius: 10px !important; padding: 15px !important; } /* Flashing animation */ .processing { animation: pulse 1.5s ease-in-out infinite; } @keyframes pulse { 0% { opacity: 1; } 50% { opacity: 0.5; } 100% { opacity: 1; } } /* Sci-fi style background pattern */ body::before { content: ""; position: fixed; top: 0; left: 0; width: 100%; height: 100%; background-image: radial-gradient(circle at 25% 25%, rgba(0, 245, 255, 0.1) 0%, transparent 25%), radial-gradient(circle at 75% 75%, rgba(128, 0, 255, 0.1) 0%, transparent 25%); pointer-events: none; z-index: -1; } """ MODEL_PATH = "keras_model.h5" LABELS_PATH = "labels.txt" # Initialize the AI system client = AIVisionSystem( model_path=MODEL_PATH, labels_path=LABELS_PATH ) # Create Gradio interface with gr.Blocks(css=custom_css, title="AI 智慧回收站:次世代垃圾分類系統", theme=gr.themes.Soft(), js=""" function refresh() { const url = new URL(window.location); if (url.searchParams.get('__theme') !== 'dark') { url.searchParams.set('__theme', 'dark'); window.location.href = url.href; } } """) as app: # Title area gr.HTML("""
🤖 AI 智慧回收站:次世代垃圾分類系統
⚡ Designed by 李冠勳、陳品杉、楊恩婕、王竣毅 ⚡
🔬 塑膠 • 金屬 • 紙類 • 玻璃 🔬
""") with gr.Row(): # Left side - Input area with gr.Column(scale=1): gr.HTML('
📡 INPUT INTERFACE
') with gr.Group(elem_classes="input-section"): image_input = gr.Image( label="Image Input Portal", sources=["upload", "webcam", "clipboard"], type="pil", height=300 ) analyze_btn = gr.Button( "🚀 INITIATE AI ANALYSIS", variant="primary", size="lg" ) # Right side - Output area with gr.Column(scale=1): gr.HTML('
📊 ANALYSIS RESULTS
') with gr.Group(elem_classes="output-section"): # Text results result_text = gr.Markdown( label="📋 Detailed Analysis Report", value="🔮 **Awaiting input...** \n\nPlease upload an image to start AI analysis", height=200 ) # Label distribution chart result_labels = gr.Label( label="🎯 Confidence Distribution", num_top_classes=5 ) gr.HTML('
💡 Quick Start Guide
') gr.HTML("""
1️⃣ Click the image area above to upload an image
2️⃣ Or use the WebCam for live capture
3️⃣ Or paste an image directly from the clipboard
4️⃣ Click "INITIATE AI ANALYSIS" to start analysis
5️⃣ View the real-time analysis results on the right!
""") # Set up event handling analyze_btn.click( fn=process_image, inputs=[image_input], outputs=[result_text,result_labels] ) # Automatic analysis (when image changes) image_input.change( fn=process_image, inputs=[image_input], outputs=[result_text,result_labels] ) app.launch( share=False, # Set to True to generate a public link debug=False, show_error=True, show_api=False )