File size: 2,903 Bytes
1d782a2
9643386
 
 
313728a
 
 
93e8de0
313728a
 
 
9643386
313728a
9643386
313728a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9643386
313728a
 
9643386
313728a
 
 
 
 
 
 
 
9643386
 
 
 
313728a
 
 
 
 
 
 
 
9643386
 
313728a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9643386
313728a
 
 
 
 
 
 
 
9643386
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import joblib
import numpy as np
import pandas as pd
import cv2
from skimage.color import rgb2hsv
from skimage.measure import shannon_entropy
from scipy.ndimage import generic_filter

# Extract features from the image (same as your previous code)
def extract_features(image_path):
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    
    # Extract RGB means
    meanr = np.mean(image[:, :, 0])  # Red channel
    meang = np.mean(image[:, :, 1])  # Green channel
    meanb = np.mean(image[:, :, 2])  # Blue channel
    
    # Convert to HSI and compute HHR
    hsv_image = rgb2hsv(image)
    hue = hsv_image[:, :, 0]
    high_hue_pixels = np.sum(hue > 0.95)
    total_pixels = hue.size
    HHR = high_hue_pixels / total_pixels
    
    # Convert to Grayscale
    gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    
    # Compute Entropy
    Ent = shannon_entropy(gray_image)
    
    # Compute Brightness
    B = np.mean(gray_image)
    
    # Sliding window for gray-level features
    def g1_filter(window):
        return window[4] - np.min(window)

    def g2_filter(window):
        return np.max(window) - window[4]

    def g3_filter(window):
        return window[4] - np.mean(window)

    def g4_filter(window):
        return np.std(window)

    def g5_filter(window):
        return window[4]

    # Apply filters with 3x3 window
    g1 = generic_filter(gray_image, g1_filter, size=3).mean()
    g2 = generic_filter(gray_image, g2_filter, size=3).mean()
    g3 = generic_filter(gray_image, g3_filter, size=3).mean()
    g4 = generic_filter(gray_image, g4_filter, size=3).mean()
    g5 = generic_filter(gray_image, g5_filter, size=3).mean()

    # Return features
    return {
        "meanr": meanr,
        "meang": meang,
        "meanb": meanb,
        "HHR": HHR,
        "Ent": Ent,
        "B": B,
        "g1": g1,
        "g2": g2,
        "g3": g3,
        "g4": g4,
        "g5": g5,
    }

# Function to make predictions
def predict_hemoglobin(age, gender, image_path):
    # Extract features from the image
    features = extract_features(image_path)
    
    # Add age and gender to the features
    features['age'] = age
    features['gender'] = 1 if gender.lower() == 'male' else 0
    
    # Convert features to DataFrame
    features_df = pd.DataFrame([features])
    
    # Load the pre-trained models
    svr_model = joblib.load('svr_model.pkl')  # SVR model
    scaler = joblib.load('minmax_scaler.pkl')        # MinMaxScaler
    label_encoder = joblib.load('label_encoder.pkl')  # LabelEncoder

    # Apply MinMaxScaler and LabelEncoder transformations
    # For age and gender, you can scale them or leave them as-is, depending on your training procedure
    features_df_scaled = scaler.transform(features_df)
    
    # Make the prediction
    hemoglobin = svr_model.predict(features_df_scaled)[0]
    
    return hemoglobin