File size: 8,785 Bytes
b156f43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import streamlit as st
import pandas as pd
import numpy as np
import os, zipfile, shutil
from utils.image_classification import classifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay,accuracy_score,classification_report
import matplotlib.pyplot as plt
import splitfolders
from io import StringIO
import sys
# import tensorflow as tf
# from tensorflow.keras.preprocessing.image import ImageDataGenerator
# from tensorflow.keras.applications import MobileNetV2
# from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense
# from tensorflow.keras.models import Model
# from sklearn.metrics import classification_report, confusion_matrix
# import matplotlib.pyplot as plt
# from io import BytesIO
# import numpy as np
# from PIL import Image
c=classifier()

def delete_directory(directory_path):
    try:
        shutil.rmtree(directory_path)
        print(f"Directory '{directory_path}' successfully deleted.")
    except Exception as e:
        print(f"Error deleting directory '{directory_path}': {e}")

# Example usage:
# st.subheader('This is a subheader with a divider', divider='blue')

st.title('Image Classification Model Training')
st.image('elements/image_banner.jpeg')

# Page title
st.title("Custom Image Classification Trainer")

st.sidebar.subheader('Folder Format')
st.sidebar.code('''custom_dataset/
β”œβ”€β”€ train/
β”‚   β”œβ”€β”€ class1/
β”‚   β”œβ”€β”€ class2/
β”‚   β”œβ”€β”€ ...
β”œβ”€β”€ val/
β”‚   β”œβ”€β”€ class1/
β”‚   β”œβ”€β”€ class2/
β”‚   β”œβ”€β”€ ...
β”œβ”€β”€ test/
β”‚   β”œβ”€β”€ class1/
β”‚   β”œβ”€β”€ class2/
β”‚   β”œβ”€β”€ ...
''')

# Upload image dataset
st.header("1. Upload Image Dataset")
uploaded_file = st.file_uploader("Choose a ZIP file containing your image dataset", type=["zip"])
dataset_path = 'datasets'
split_path = 'split_dir'
agree = st.checkbox('Select if your data is not splited in given format')

if uploaded_file:
    if st.button('Extract Data'):
        if not agree:
            with st.spinner("Extracting dataset..."):
                # You should write code here to extract and prepare the dataset.
                file_name = uploaded_file.name
                # st.write(f"Uploaded file name: {file_name.split('.')[0]}")
                extract_dir = dataset_path
                os.makedirs(extract_dir, exist_ok=True)
                # Extract the zip file
                with zipfile.ZipFile(uploaded_file, 'r') as zip_ref:
                    zip_ref.extractall(extract_dir)
                st.success("Dataset extraction complete!")
        else:
            with st.spinner("Extracting dataset..."):
                # You should write code here to extract and prepare the dataset.
                file_name = uploaded_file.name
                # st.write(f"Uploaded file name: {file_name.split('.')[0]}")
                extract_dir = split_path
                os.makedirs(extract_dir, exist_ok=True)
                # Extract the zip file
                with zipfile.ZipFile(uploaded_file, 'r') as zip_ref:
                    zip_ref.extractall(extract_dir)
                splitfolders.ratio(extract_dir,seed=1337, output=dataset_path, ratio=(0.8, 0.1, 0.1))
                directory_to_delete = extract_dir
                delete_directory(directory_to_delete)
                st.success("Dataset extraction complete!")

# Sidebar to select model and other options
st.header("2. Select Model and Options")
model_name = st.selectbox("Select a pre-trained model:", ['EfficientNet_B0','EfficientNet_B1','MnasNet0_5','MnasNet0_75','MnasNet1_0','MobileNet_v2',
 'MobileNet_v3_small','MobileNet_v3_large','RegNet_y_400mf','ShuffleNet_v2_x0_5','ShuffleNet_v2_x1_0','ShuffleNet_v2_x1_5',
'SqueezeNet 1_0','SqueezeNet 1_1'])
epochs = st.slider("Number of Epochs", min_value=1, max_value=50, value=10)
batch_size = st.slider("Batch Size", min_value=1, max_value=32, value=8)



# if st.button('jjjj'):
#     if uploaded_file==True:
#         st.text('fine')
#     else:
#         st.success('Please Upload Data First')

# Training and Evaluation
# if uploaded_file:
st.header("3. Choose Model and Train")
if st.button("Train Model"):
    if uploaded_file:
        with st.spinner("Processing Data..."):
            c.data_loader(dataset_path,batch_size)
        with st.spinner("Training model..."):
            # captured_output = StringIO()
            # sys.stdout = captured_output
            model = c.train_model(model_name,epochs)
            # sys.stdout = sys.__stdout__

            # # Display captured output in Streamlit
            # st.sidebar.code("Printed output:")
            # st.sidebar.code(captured_output.getvalue())
            # You should write code here to load the dataset, build the selected model, train it, and save the model.

            # For example:
            # model = build_model(model_name)
            # train_model(model, dataset_path, epochs, batch_size)
            # model.save("custom_classification_model.h5")

            st.success("Training complete!")
        st.header('4. Evaluation')
        with st.spinner("Evaluating model..."):
            # You should write code here to load the trained model, evaluate its performance, and display metrics.

            # For example:
            # trained_model = tf.keras.models.load_model("custom_classification_model.h5")
            # test_data, test_labels = load_test_data(dataset_path)
            # predictions = trained_model.predict(test_data)
            # report = classification_report(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
            # confusion = confusion_matrix(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
            y_true, y_pred, y_pred_prob=c.pytorch_predict(model)
            # st.text([y_true, y_pred])
            col1, col2 = st.columns([3,2])

            col1.subheader("Classification Report")
            # col1.text(classification_report(y_true, y_pred))
            report_dict = classification_report(y_true, y_pred, target_names=c.class_names,output_dict=True)
            accuracy = report_dict['accuracy']
            del report_dict['accuracy']
            df = pd.DataFrame(report_dict).transpose()
            col1.text(f'Accuracy of the Model: {round(accuracy,1)}')
            col1.dataframe(df)


            col2.subheader("Confusion Matrix")
            cm = confusion_matrix(y_true, y_pred)
            fig = ConfusionMatrixDisplay(cm, display_labels=c.class_names).plot(cmap='Blues')
            plt.savefig('elements/confusion_matrix.png')
            # fig_array = np.array(fig.figure_.canvas.renderer.buffer_rgba())
            col2.image('elements/confusion_matrix.png')
            # st.text("Classification Report:")
            # # st.text(report)
            # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))

            # st.table(df)

            # st.text("Confusion Matrix:")
            # # st.write(confusion)
            # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))

            # st.table(df)
            with open('model.pt', "rb") as model_file:
                st.download_button(
                    label="Download Model",
                    data=model_file,
                    file_name=f"model_{model_name}.pt",
                    key="model_download",
                )
        st.balloons()
    else:
        st.warning("Please Upload Data First")


    # st.header("4. Evaluate Model")
    # if st.button("Evaluate Model"):
    #     with st.spinner("Evaluating model..."):
    #         # You should write code here to load the trained model, evaluate its performance, and display metrics.

    #         # For example:
    #         # trained_model = tf.keras.models.load_model("custom_classification_model.h5")
    #         # test_data, test_labels = load_test_data(dataset_path)
    #         # predictions = trained_model.predict(test_data)
    #         # report = classification_report(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
    #         # confusion = confusion_matrix(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
    #         y_true, y_pred, y_pred_prob=c.pytorch_predict()
    #         st.text(y_true, y_pred)
    #         st.text("Classification Report:")
    #         # st.text(report)
    #         df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))

    #         st.table(df)

    #         st.text("Confusion Matrix:")
    #         # st.write(confusion)
    #         df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))

    #         st.table(df)

# Helper functions for dataset extraction, model building, and training can be defined separately.