File size: 4,197 Bytes
7008c9f
 
 
 
 
 
 
2db11bd
 
7008c9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2db11bd
7008c9f
 
 
2db11bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7008c9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2db11bd
7008c9f
 
 
 
 
 
 
 
 
 
 
2db11bd
7008c9f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import streamlit as st
import tensorflow as tf
import numpy as np
import cv2
from huggingface_hub import hf_hub_download
from tensorflow.keras.models import load_model
from io import BytesIO
from PIL import Image
import requests

# Authenticate and download model from Hugging Face
repo_id = "Hammad712/closed_eye_detection"
filename = "Closed_Eye_Detection_98.h5"
model_path = hf_hub_download(repo_id=repo_id, filename=filename)

# Load the downloaded model
model = load_model(model_path)

# Set image dimensions
img_height, img_width = 150, 150

# Custom CSS
def set_css(style):
    st.markdown(f"<style>{style}</style>", unsafe_allow_html=True)

combined_css = """
    .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
    .block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); }
    .stButton>button, .stDownloadButton>button { background: linear-gradient(135deg, #ff7e5f, #feb47b); color: white; border: none; padding: 10px 24px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; cursor: pointer; border-radius: 5px; }
    .stSpinner { color: #4CAF50; }
    .title {
        font-size: 3rem;
        font-weight: bold;
        display: flex;
        align-items: center;
        justify-content: center;
    }
    .colorful-text {
        background: -webkit-linear-gradient(135deg, #ff7e5f, #feb47b);
        -webkit-background-clip: text;
        -webkit-text-fill-color: transparent;
    }
    .black-white-text {
        color: black;
    }
    .small-input .stTextInput>div>input {
        height: 2rem;
        font-size: 0.9rem;
    }
    .small-file-uploader .stFileUploader>div>div {
        height: 2rem;
        font-size: 0.9rem;
    }
    .custom-text {
        font-size: 1.2rem;
        color: #feb47b;
        text-align: center;
        margin-top: -20px;
        margin-bottom: 20px;
    }
"""

# Streamlit application
st.set_page_config(layout="wide")

st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)

st.markdown('<div class="title"><span class="colorful-text">Eye</span> <span class="black-white-text">Detection Model</span></div>', unsafe_allow_html=True)
st.markdown('<div class="custom-text">Upload an image or provide a URL to predict whether the eyes are open or closed.</div>', unsafe_allow_html=True)

# Input for image URL or path
with st.expander("Input Options", expanded=True):
    url = st.text_input("Enter image URL", "")
    uploaded_file = st.file_uploader("Or upload an image", type=["jpg", "jpeg", "png"])

def load_image_from_url(url):
    response = requests.get(url)
    img = Image.open(BytesIO(response.content)).convert('RGB')
    return np.array(img)

if uploaded_file is not None or url:
    if uploaded_file is not None:
        # Read the uploaded image
        file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
        image = cv2.imdecode(file_bytes, 1)
    elif url:
        # Read the image from URL
        image = load_image_from_url(url)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    # Resize and preprocess the image
    resized_image = cv2.resize(image, (img_height, img_width))
    input_image = resized_image.reshape((1, img_height, img_width, 3)) / 255.0

    # Perform inference
    predictions = model.predict(input_image)
    prediction = predictions[0][0]

    def get_label(prediction):
        return "Open Eye" if prediction >= 0.5 else "Closed Eye"

    label = get_label(prediction)

    # Display the image and prediction
    st.image(image, channels="BGR", caption='Uploaded Image' if uploaded_file is not None else 'Image from URL')
    st.markdown(f"### Prediction: {prediction:.2f}, Label: {label}")

    # Provide a download button for the uploaded image (optional)
    img_byte_arr = BytesIO()
    img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    img.save(img_byte_arr, format='JPEG')
    img_byte_arr = img_byte_arr.getvalue()

    st.download_button(
        label="Download Image",
        data=img_byte_arr,
        file_name="processed_image.jpg",
        mime="image/jpeg"
    )