Spaces:
Runtime error
Runtime error
HelenGuohx
commited on
Commit
·
ea4f3a2
1
Parent(s):
0892298
add model
Browse files- .gitattributes +1 -0
- app.py +99 -3
- examples/cats.jpg +0 -0
- examples/happy_cat.jpeg +0 -0
- examples/happy_dog.jpeg +0 -0
- inference.py +102 -0
- my_model_weights.h5 +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
.h5 filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -1,7 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
iface.launch()
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[1]:
|
5 |
+
|
6 |
+
|
7 |
+
#export
|
8 |
+
import tensorflow as tf
|
9 |
+
from tensorflow import keras
|
10 |
+
from tensorflow.keras.models import Sequential
|
11 |
+
from tensorflow.keras.optimizers import Adam, Adamax
|
12 |
+
from tensorflow.keras.metrics import categorical_crossentropy
|
13 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
14 |
+
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout, BatchNormalization
|
15 |
+
from tensorflow.keras import regularizers
|
16 |
+
from keras.callbacks import EarlyStopping, LearningRateScheduler
|
17 |
+
import numpy as np
|
18 |
+
from tensorflow.keras.preprocessing import image
|
19 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input
|
20 |
import gradio as gr
|
21 |
+
# In[2]:
|
22 |
+
|
23 |
+
|
24 |
+
# Create Model Structure
|
25 |
+
class_labels = ['Angry', 'Other', 'Sad', 'Happy']
|
26 |
+
|
27 |
+
|
28 |
+
img_size = (224, 224)
|
29 |
+
channels = 3
|
30 |
+
img_shape = (img_size[0], img_size[1], channels)
|
31 |
+
class_count = len(class_labels) # to define number of classes in dense layer
|
32 |
+
|
33 |
+
# create pre-trained model (you can built on pretrained model such as : efficientnet, VGG , Resnet )
|
34 |
+
# we will use efficientnetb3 from EfficientNet family.
|
35 |
+
base_model = tf.keras.applications.efficientnet.EfficientNetB5(include_top= False, weights= "imagenet", input_shape= img_shape, pooling= 'max')
|
36 |
+
base_model.trainable = False
|
37 |
+
|
38 |
+
model = Sequential([
|
39 |
+
base_model,
|
40 |
+
BatchNormalization(axis= -1, momentum= 0.99, epsilon= 0.001),
|
41 |
+
Dense(256, activation='relu'),
|
42 |
+
Dense(128, kernel_regularizer= regularizers.l2(l= 0.016), activity_regularizer= regularizers.l1(0.006),
|
43 |
+
bias_regularizer= regularizers.l1(0.006), activation= 'relu'),
|
44 |
+
Dropout(rate= 0.45, seed= 123),
|
45 |
+
Dense(class_count, activation= 'softmax')
|
46 |
+
])
|
47 |
+
|
48 |
+
model.trainable = False
|
49 |
+
|
50 |
+
# model.compile(Adamax(learning_rate= 0.001), loss= 'categorical_crossentropy', metrics= ['accuracy'])
|
51 |
|
52 |
+
model.summary()
|
|
|
53 |
|
54 |
+
# In[6]:
|
55 |
+
|
56 |
+
|
57 |
+
from tensorflow.keras.preprocessing import image
|
58 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input
|
59 |
+
import matplotlib.pyplot as plt
|
60 |
+
|
61 |
+
class_labels = ['Angry', 'Other', 'Sad', 'Happy']
|
62 |
+
model.load_weights('my_model_weights.h5')
|
63 |
+
|
64 |
+
|
65 |
+
def predict_and_display(img_array):
|
66 |
+
|
67 |
+
# img = image.load_img(image_path, target_size=(224, 224))
|
68 |
+
# img_array = image.img_to_array(img)
|
69 |
+
# print(imimg_arrayg)
|
70 |
+
img_array = np.expand_dims(img_array, axis=0)
|
71 |
+
img_array = preprocess_input(img_array)
|
72 |
+
|
73 |
+
prediction = model.predict(img_array)
|
74 |
+
predicted_class_index = np.argmax(prediction)
|
75 |
+
|
76 |
+
# class_indices = train_gen.class_indices
|
77 |
+
# class_labels = list(class_indices.keys())
|
78 |
+
predicted_class_label = class_labels[predicted_class_index]
|
79 |
+
|
80 |
+
# plt.imshow(img)
|
81 |
+
# plt.axis('off')
|
82 |
+
# if predicted_class_label == 'Other':
|
83 |
+
# plt.title(f"The pet is normal")
|
84 |
+
# else:
|
85 |
+
# plt.title(f"The Pet is {predicted_class_label}")
|
86 |
+
# plt.show()
|
87 |
+
return f"This pet is {predicted_class_label}"
|
88 |
+
|
89 |
+
|
90 |
+
# In[11]:
|
91 |
+
|
92 |
+
# In[ ]:
|
93 |
+
|
94 |
+
iface = gr.Interface(fn=predict_and_display,
|
95 |
+
inputs=gr.Image(shape=(224, 224)),
|
96 |
+
outputs="label",
|
97 |
+
title="Pet Emotion Detection",
|
98 |
+
description="Fine tune EfficientNet on Pet's emotion datasets",
|
99 |
+
examples=[["examples/happy_cat.jpeg"], ["examples/happy_dog.jpeg"], ["examples/cats.jpg"]]
|
100 |
+
)
|
101 |
iface.launch()
|
102 |
+
|
103 |
+
# %%
|
examples/cats.jpg
ADDED
![]() |
examples/happy_cat.jpeg
ADDED
![]() |
examples/happy_dog.jpeg
ADDED
![]() |
inference.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
# In[1]:
|
5 |
+
|
6 |
+
|
7 |
+
#export
|
8 |
+
import tensorflow as tf
|
9 |
+
from tensorflow import keras
|
10 |
+
from tensorflow.keras.models import Sequential
|
11 |
+
from tensorflow.keras.optimizers import Adam, Adamax
|
12 |
+
from tensorflow.keras.metrics import categorical_crossentropy
|
13 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
14 |
+
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout, BatchNormalization
|
15 |
+
from tensorflow.keras import regularizers
|
16 |
+
from keras.callbacks import EarlyStopping, LearningRateScheduler
|
17 |
+
import numpy as np
|
18 |
+
from tensorflow.keras.preprocessing import image
|
19 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input
|
20 |
+
|
21 |
+
# In[2]:
|
22 |
+
|
23 |
+
|
24 |
+
# Create Model Structure
|
25 |
+
class_labels = ['Angry', 'Other', 'Sad', 'Happy']
|
26 |
+
|
27 |
+
|
28 |
+
img_size = (224, 224)
|
29 |
+
channels = 3
|
30 |
+
img_shape = (img_size[0], img_size[1], channels)
|
31 |
+
class_count = len(class_labels) # to define number of classes in dense layer
|
32 |
+
|
33 |
+
# create pre-trained model (you can built on pretrained model such as : efficientnet, VGG , Resnet )
|
34 |
+
# we will use efficientnetb3 from EfficientNet family.
|
35 |
+
base_model = tf.keras.applications.efficientnet.EfficientNetB5(include_top= False, weights= "imagenet", input_shape= img_shape, pooling= 'max')
|
36 |
+
base_model.trainable = False
|
37 |
+
|
38 |
+
model = Sequential([
|
39 |
+
base_model,
|
40 |
+
BatchNormalization(axis= -1, momentum= 0.99, epsilon= 0.001),
|
41 |
+
Dense(256, activation='relu'),
|
42 |
+
Dense(128, kernel_regularizer= regularizers.l2(l= 0.016), activity_regularizer= regularizers.l1(0.006),
|
43 |
+
bias_regularizer= regularizers.l1(0.006), activation= 'relu'),
|
44 |
+
Dropout(rate= 0.45, seed= 123),
|
45 |
+
Dense(class_count, activation= 'softmax')
|
46 |
+
])
|
47 |
+
|
48 |
+
model.trainable = False
|
49 |
+
|
50 |
+
# model.compile(Adamax(learning_rate= 0.001), loss= 'categorical_crossentropy', metrics= ['accuracy'])
|
51 |
+
|
52 |
+
model.summary()
|
53 |
+
|
54 |
+
# In[6]:
|
55 |
+
|
56 |
+
|
57 |
+
from tensorflow.keras.preprocessing import image
|
58 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input
|
59 |
+
import matplotlib.pyplot as plt
|
60 |
+
|
61 |
+
class_labels = ['Angry', 'Other', 'Sad', 'Happy']
|
62 |
+
|
63 |
+
|
64 |
+
def predict_and_display(image_path, model):
|
65 |
+
|
66 |
+
img = image.load_img(image_path, target_size=(224, 224))
|
67 |
+
img_array = image.img_to_array(img)
|
68 |
+
img_array = np.expand_dims(img_array, axis=0)
|
69 |
+
img_array = preprocess_input(img_array)
|
70 |
+
|
71 |
+
prediction = model.predict(img_array)
|
72 |
+
predicted_class_index = np.argmax(prediction)
|
73 |
+
|
74 |
+
# class_indices = train_gen.class_indices
|
75 |
+
# class_labels = list(class_indices.keys())
|
76 |
+
predicted_class_label = class_labels[predicted_class_index]
|
77 |
+
|
78 |
+
plt.imshow(img)
|
79 |
+
plt.axis('off')
|
80 |
+
if predicted_class_label == 'Other':
|
81 |
+
plt.title(f"The pet is normal")
|
82 |
+
else:
|
83 |
+
plt.title(f"The Pet is {predicted_class_label}")
|
84 |
+
plt.show()
|
85 |
+
|
86 |
+
model.load_weights('my_model_weights.h5')
|
87 |
+
|
88 |
+
|
89 |
+
# Replace 'path_to_test_image' with the path to the image you want to test
|
90 |
+
image_path_to_test = 'pets-facial-expression-dataset/Angry/02.jpg'
|
91 |
+
predict_and_display(image_path_to_test, model)
|
92 |
+
|
93 |
+
# In[11]:
|
94 |
+
|
95 |
+
|
96 |
+
image_path_to_test = 'IMG_0243.jpg'
|
97 |
+
predict_and_display(image_path_to_test, model)
|
98 |
+
|
99 |
+
# In[ ]:
|
100 |
+
|
101 |
+
|
102 |
+
|
my_model_weights.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:db40006daa10fd8afd6717e3adac116fd1bcfa7de1010b62c3f54e9b41484113
|
3 |
+
size 116948920
|