KameliaZaman commited on
Commit
dfd5176
·
verified ·
1 Parent(s): 10ef39f

Upload butterfly_classification_with_cnn.py

Browse files
Files changed (1) hide show
  1. butterfly_classification_with_cnn.py +202 -0
butterfly_classification_with_cnn.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Butterfly classification with CNN.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/18Jo5pBel2xJCse_nNq61zkDnPN_zzg_u
8
+
9
+ # Import Libraries and Load Data
10
+ """
11
+
12
+ ## Remove Warnings ##
13
+ import warnings
14
+ warnings.filterwarnings("ignore")
15
+
16
+ ## Data ##
17
+ import numpy as np
18
+ import pandas as pd
19
+ import os
20
+
21
+ ## Visualization ##
22
+ import matplotlib.pyplot as plt
23
+ import plotly.express as px
24
+ import seaborn as sns
25
+ import plotly.graph_objects as go
26
+
27
+ ## Image ##
28
+ import cv2
29
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
30
+
31
+ ## Tensorflow ##
32
+ from tensorflow.keras.models import Sequential, Model
33
+ from tensorflow.keras.layers import Input, Dense , Conv2D , Dropout , Flatten , Activation, MaxPooling2D , GlobalAveragePooling2D
34
+ from tensorflow.keras.optimizers import Adam , RMSprop
35
+ from tensorflow.keras.layers import BatchNormalization
36
+ from tensorflow.keras.callbacks import ReduceLROnPlateau , EarlyStopping , ModelCheckpoint , LearningRateScheduler
37
+ from tensorflow.keras.applications import ResNet50V2
38
+
39
+ df = pd.read_csv('C:/Users/kamel/Documents/Image Classification/butterfly-dataset/butterflies and moths.csv')
40
+ IMAGE_DIR = 'C:/Users/kamel/Documents/Image Classification/butterfly-dataset'
41
+ df['filepaths'] = IMAGE_DIR + '/' + df['filepaths']
42
+ df.head()
43
+
44
+ train_df = df.loc[df['data set'] == 'train']
45
+ val_df = df.loc[df['data set'] == 'valid']
46
+ test_df = df.loc[df['data set'] == 'test']
47
+
48
+ """# Exploratory Data Analysis"""
49
+
50
+ label_counts = df['labels'].value_counts()[:10]
51
+
52
+ fig = px.bar(x=label_counts.index,
53
+ y=label_counts.values,
54
+ color=label_counts.values,
55
+ text=label_counts.values,
56
+ color_continuous_scale='Blues')
57
+
58
+ fig.update_layout(
59
+ title_text='Labels Distribution',
60
+ template='plotly_white',
61
+ xaxis=dict(
62
+ title='Label',
63
+ ),
64
+ yaxis=dict(
65
+ title='Count',
66
+ )
67
+ )
68
+
69
+ fig.update_traces(marker_line_color='black',
70
+ marker_line_width=1.5,
71
+ opacity=0.8)
72
+
73
+ fig.show()
74
+
75
+ """# Generate Image using ImageDataGenerator"""
76
+
77
+ # only train data needs to be augmented
78
+ train_gen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, rescale=1/255.)
79
+ val_gen = ImageDataGenerator(rescale=1/255.)
80
+
81
+ train_dir = 'C:/Users/kamel/Documents/Image Classification/butterfly-dataset/train'
82
+ val_dir = 'C:/Users/kamel/Documents/Image Classification/butterfly-dataset/valid'
83
+
84
+ BATCH_SIZE = 16
85
+ SEED = 56
86
+ IMAGE_SIZE = (244, 244)
87
+
88
+ train_flow_gen = train_gen.flow_from_directory(directory=train_dir,
89
+ class_mode='sparse',
90
+ batch_size=BATCH_SIZE,
91
+ target_size=IMAGE_SIZE,
92
+ seed=SEED)
93
+
94
+ val_flow_gen = val_gen.flow_from_directory(directory=val_dir,
95
+ class_mode='sparse',
96
+ batch_size=BATCH_SIZE,
97
+ target_size=IMAGE_SIZE,
98
+ seed=SEED)
99
+
100
+ """# Create Model"""
101
+
102
+ verbose=False
103
+
104
+ input_tensor = Input(shape=(224, 224, 3))
105
+
106
+ base_model = ResNet50V2(input_tensor=input_tensor, include_top=False, weights='imagenet')
107
+
108
+ bm_output = base_model.output
109
+
110
+ x = GlobalAveragePooling2D()(bm_output)
111
+ x = Dense(1024, activation='relu')(x)
112
+ x = Dropout(rate=0.5)(x)
113
+ output = Dense(100, activation='softmax')(x)
114
+ model = Model(inputs=input_tensor, outputs=output)
115
+
116
+ if verbose:
117
+ model.summary()
118
+
119
+ """# ResNet Modelling"""
120
+
121
+ model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
122
+
123
+ rlr_cb = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, mode='min', verbose=0)
124
+ early_cb = EarlyStopping(monitor='val_loss', patience=5, mode='min', verbose=0)
125
+
126
+ model.fit(train_flow_gen, epochs=5,
127
+ steps_per_epoch=int(np.ceil(train_df.shape[0]/BATCH_SIZE)),
128
+ validation_data=val_flow_gen,
129
+ validation_steps=int(np.ceil(val_df.shape[0]/BATCH_SIZE)),
130
+ callbacks=[rlr_cb, early_cb])
131
+
132
+ test_dir = 'C:/Users/kamel/Documents/Image Classification/butterfly-dataset/test'
133
+ test_gen = ImageDataGenerator(rescale=1/255.)
134
+ test_flow_gen = test_gen.flow_from_directory(directory=test_dir,
135
+ class_mode='sparse',
136
+ batch_size=BATCH_SIZE,
137
+ target_size=IMAGE_SIZE,
138
+ seed=SEED)
139
+
140
+ print('ResNet Test Data Accuracy: {0}'.format(model.evaluate(test_flow_gen)[1:][0]))
141
+
142
+ # Save the current weights manually
143
+ model.save('C:/Users/kamel/Documents/Image Classification/model_checkpoint_manual_effnet.h5')
144
+
145
+ """# Deployment"""
146
+
147
+ import gradio as gr
148
+ import tensorflow as tf
149
+ from tensorflow.keras.models import load_model
150
+ import numpy as np
151
+ import cv2
152
+
153
+ # Load the trained model
154
+ model_path = 'C:/Users/kamel/Documents/Image Classification/model_checkpoint_manual_effnet.h5'
155
+ model = load_model(model_path)
156
+
157
+ class_names = ['ADONIS', 'AFRICAN GIANT SWALLOWTAIL', 'AMERICAN SNOOT', 'AN 88', 'APPOLLO', 'ARCIGERA FLOWER MOTH', 'ATALA', 'ATLAS MOTH', 'BANDED ORANGE HELICONIAN', 'BANDED PEACOCK']
158
+
159
+ # Define a function to preprocess the input image
160
+ def preprocess_image(img):
161
+ # Check if img is a file path or an image object
162
+ if isinstance(img, str):
163
+ # Load and preprocess the image
164
+ img = cv2.imread(img)
165
+ img = cv2.resize(img, (224, 224))
166
+ img = img / 255.0 # Normalize pixel values
167
+ img = np.expand_dims(img, axis=0) # Add batch dimension
168
+ elif isinstance(img, np.ndarray):
169
+ # If img is already an image array, resize it
170
+ img = cv2.resize(img, (224, 224))
171
+ img = img / 255.0 # Normalize pixel values
172
+ img = np.expand_dims(img, axis=0) # Add batch dimension
173
+ else:
174
+ raise ValueError("Unsupported input type. Please provide a file path or a NumPy array.")
175
+
176
+ return img
177
+
178
+ # Define the classification function
179
+ def classify_image(img):
180
+ # Preprocess the image
181
+ img = preprocess_image(img)
182
+
183
+ # Make predictions
184
+ predictions = model.predict(img)
185
+
186
+ # Get the predicted class label
187
+ predicted_class = np.argmax(predictions)
188
+
189
+ # Get the predicted class name
190
+ predicted_class_name = class_names[predicted_class]
191
+
192
+ return f"Predicted Class: {predicted_class_name}"
193
+
194
+ # Create a Gradio interface
195
+ iface = gr.Interface(fn=classify_image,
196
+ inputs="image",
197
+ outputs="text",
198
+ live=True)
199
+
200
+ # Launch the Gradio app
201
+ iface.launch()
202
+