mainakhf commited on
Commit
b156f43
Β·
1 Parent(s): 00d9f2a

Upload 2 files

Browse files
pages/🎈_object_detection.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+
5
+
6
+ st.title('Object Detection(YOLO) Model Training')
7
+ st.image('elements/object_banner.png')
8
+
9
+ st.image('elements/commingsoon.gif',use_column_width=True)
10
+
11
+ # st.sidebar.subheader('Folder Format')
12
+ # st.sidebar.subheader(':red[(Data should be in yolo format)]')
13
+
14
+ # st.sidebar.code('''custom_dataset/
15
+ # β”œβ”€β”€ train/
16
+ # β”‚ β”œβ”€β”€ image/
17
+ # β”‚ β”œβ”€β”€ label/
18
+ # β”‚ β”œβ”€β”€ ...
19
+ # β”œβ”€β”€ val/
20
+ # β”‚ β”œβ”€β”€ image/
21
+ # β”‚ β”œβ”€β”€ label/
22
+ # β”‚ β”œβ”€β”€ ...
23
+ # ''')
24
+
25
+ # from io import StringIO
26
+ # import sys
27
+
28
+ # # Redirect stdout to capture print statements
29
+ # captured_output = StringIO()
30
+ # sys.stdout = captured_output
31
+
32
+ # from print import my_function
33
+
34
+ # # Run the function to capture the output
35
+ # if st.button('print'):
36
+ # my_function()
37
+
38
+ # # Reset stdout to its original state
39
+ # sys.stdout = sys.__stdout__
40
+
41
+ # # Display captured output in Streamlit
42
+ # st.sidebar.code("Printed output:")
43
+ # st.sidebar.code(captured_output.getvalue())
44
+
45
+ # st.header("1. Upload Image Dataset")
46
+ # uploaded_file = st.file_uploader("Choose a ZIP file containing your image dataset", type=["zip"])
47
+ # dataset_path = True
48
+
49
+ # # Sidebar to select model and other options
50
+ # st.header("2. Select Model and Options")
51
+ # model_name = st.selectbox("Select a pre-trained model:", ["MobileNetV2", "ResNet50", "InceptionV3"])
52
+ # epochs = st.slider("Number of Epochs", min_value=1, max_value=50, value=10)
53
+ # batch_size = st.slider("Batch Size", min_value=1, max_value=32, value=8)
54
+
55
+
56
+ # if uploaded_file:
57
+ # with st.spinner("Extracting dataset..."):
58
+ # # You should write code here to extract and prepare the dataset.
59
+
60
+ # # For example:
61
+ # # dataset_path = extract_dataset(uploaded_file)
62
+ # dataset_path = 'jjjj'
63
+
64
+ # st.success("Dataset extraction complete!")
65
+
66
+ # # Training and Evaluation
67
+ # if dataset_path:
68
+ # st.header("3. Choose Model and Train")
69
+ # if st.button("Train Model"):
70
+ # with st.spinner("Training model..."):
71
+ # # You should write code here to load the dataset, build the selected model, train it, and save the model.
72
+
73
+ # # For example:
74
+ # # model = build_model(model_name)
75
+ # # train_model(model, dataset_path, epochs, batch_size)
76
+ # # model.save("custom_classification_model.h5")
77
+
78
+ # st.success("Training complete!")
79
+
80
+ # st.header("4. Evaluate Model")
81
+ # if st.button("Evaluate Model"):
82
+ # with st.spinner("Evaluating model..."):
83
+ # # You should write code here to load the trained model, evaluate its performance, and display metrics.
84
+
85
+ # # For example:
86
+ # # trained_model = tf.keras.models.load_model("custom_classification_model.h5")
87
+ # # test_data, test_labels = load_test_data(dataset_path)
88
+ # # predictions = trained_model.predict(test_data)
89
+ # # report = classification_report(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
90
+ # # confusion = confusion_matrix(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
91
+
92
+ # st.text("Classification Report:")
93
+ # # st.text(report)
94
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
95
+
96
+ # st.table(df)
97
+
98
+ # st.text("Confusion Matrix:")
99
+ # # st.write(confusion)
100
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
101
+
102
+ # st.table(df)
103
+
104
+ # # Helper functions for dataset extraction, model building, and training can be defined separately.
pages/🐢🐱_image_classification.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import os, zipfile, shutil
5
+ from utils.image_classification import classifier
6
+ from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay,accuracy_score,classification_report
7
+ import matplotlib.pyplot as plt
8
+ import splitfolders
9
+ from io import StringIO
10
+ import sys
11
+ # import tensorflow as tf
12
+ # from tensorflow.keras.preprocessing.image import ImageDataGenerator
13
+ # from tensorflow.keras.applications import MobileNetV2
14
+ # from tensorflow.keras.layers import Input, GlobalAveragePooling2D, Dense
15
+ # from tensorflow.keras.models import Model
16
+ # from sklearn.metrics import classification_report, confusion_matrix
17
+ # import matplotlib.pyplot as plt
18
+ # from io import BytesIO
19
+ # import numpy as np
20
+ # from PIL import Image
21
+ c=classifier()
22
+
23
+ def delete_directory(directory_path):
24
+ try:
25
+ shutil.rmtree(directory_path)
26
+ print(f"Directory '{directory_path}' successfully deleted.")
27
+ except Exception as e:
28
+ print(f"Error deleting directory '{directory_path}': {e}")
29
+
30
+ # Example usage:
31
+ # st.subheader('This is a subheader with a divider', divider='blue')
32
+
33
+ st.title('Image Classification Model Training')
34
+ st.image('elements/image_banner.jpeg')
35
+
36
+ # Page title
37
+ st.title("Custom Image Classification Trainer")
38
+
39
+ st.sidebar.subheader('Folder Format')
40
+ st.sidebar.code('''custom_dataset/
41
+ β”œβ”€β”€ train/
42
+ β”‚ β”œβ”€β”€ class1/
43
+ β”‚ β”œβ”€β”€ class2/
44
+ β”‚ β”œβ”€β”€ ...
45
+ β”œβ”€β”€ val/
46
+ β”‚ β”œβ”€β”€ class1/
47
+ β”‚ β”œβ”€β”€ class2/
48
+ β”‚ β”œβ”€β”€ ...
49
+ β”œβ”€β”€ test/
50
+ β”‚ β”œβ”€β”€ class1/
51
+ β”‚ β”œβ”€β”€ class2/
52
+ β”‚ β”œβ”€β”€ ...
53
+ ''')
54
+
55
+ # Upload image dataset
56
+ st.header("1. Upload Image Dataset")
57
+ uploaded_file = st.file_uploader("Choose a ZIP file containing your image dataset", type=["zip"])
58
+ dataset_path = 'datasets'
59
+ split_path = 'split_dir'
60
+ agree = st.checkbox('Select if your data is not splited in given format')
61
+
62
+ if uploaded_file:
63
+ if st.button('Extract Data'):
64
+ if not agree:
65
+ with st.spinner("Extracting dataset..."):
66
+ # You should write code here to extract and prepare the dataset.
67
+ file_name = uploaded_file.name
68
+ # st.write(f"Uploaded file name: {file_name.split('.')[0]}")
69
+ extract_dir = dataset_path
70
+ os.makedirs(extract_dir, exist_ok=True)
71
+ # Extract the zip file
72
+ with zipfile.ZipFile(uploaded_file, 'r') as zip_ref:
73
+ zip_ref.extractall(extract_dir)
74
+ st.success("Dataset extraction complete!")
75
+ else:
76
+ with st.spinner("Extracting dataset..."):
77
+ # You should write code here to extract and prepare the dataset.
78
+ file_name = uploaded_file.name
79
+ # st.write(f"Uploaded file name: {file_name.split('.')[0]}")
80
+ extract_dir = split_path
81
+ os.makedirs(extract_dir, exist_ok=True)
82
+ # Extract the zip file
83
+ with zipfile.ZipFile(uploaded_file, 'r') as zip_ref:
84
+ zip_ref.extractall(extract_dir)
85
+ splitfolders.ratio(extract_dir,seed=1337, output=dataset_path, ratio=(0.8, 0.1, 0.1))
86
+ directory_to_delete = extract_dir
87
+ delete_directory(directory_to_delete)
88
+ st.success("Dataset extraction complete!")
89
+
90
+ # Sidebar to select model and other options
91
+ st.header("2. Select Model and Options")
92
+ model_name = st.selectbox("Select a pre-trained model:", ['EfficientNet_B0','EfficientNet_B1','MnasNet0_5','MnasNet0_75','MnasNet1_0','MobileNet_v2',
93
+ 'MobileNet_v3_small','MobileNet_v3_large','RegNet_y_400mf','ShuffleNet_v2_x0_5','ShuffleNet_v2_x1_0','ShuffleNet_v2_x1_5',
94
+ 'SqueezeNet 1_0','SqueezeNet 1_1'])
95
+ epochs = st.slider("Number of Epochs", min_value=1, max_value=50, value=10)
96
+ batch_size = st.slider("Batch Size", min_value=1, max_value=32, value=8)
97
+
98
+
99
+
100
+ # if st.button('jjjj'):
101
+ # if uploaded_file==True:
102
+ # st.text('fine')
103
+ # else:
104
+ # st.success('Please Upload Data First')
105
+
106
+ # Training and Evaluation
107
+ # if uploaded_file:
108
+ st.header("3. Choose Model and Train")
109
+ if st.button("Train Model"):
110
+ if uploaded_file:
111
+ with st.spinner("Processing Data..."):
112
+ c.data_loader(dataset_path,batch_size)
113
+ with st.spinner("Training model..."):
114
+ # captured_output = StringIO()
115
+ # sys.stdout = captured_output
116
+ model = c.train_model(model_name,epochs)
117
+ # sys.stdout = sys.__stdout__
118
+
119
+ # # Display captured output in Streamlit
120
+ # st.sidebar.code("Printed output:")
121
+ # st.sidebar.code(captured_output.getvalue())
122
+ # You should write code here to load the dataset, build the selected model, train it, and save the model.
123
+
124
+ # For example:
125
+ # model = build_model(model_name)
126
+ # train_model(model, dataset_path, epochs, batch_size)
127
+ # model.save("custom_classification_model.h5")
128
+
129
+ st.success("Training complete!")
130
+ st.header('4. Evaluation')
131
+ with st.spinner("Evaluating model..."):
132
+ # You should write code here to load the trained model, evaluate its performance, and display metrics.
133
+
134
+ # For example:
135
+ # trained_model = tf.keras.models.load_model("custom_classification_model.h5")
136
+ # test_data, test_labels = load_test_data(dataset_path)
137
+ # predictions = trained_model.predict(test_data)
138
+ # report = classification_report(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
139
+ # confusion = confusion_matrix(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
140
+ y_true, y_pred, y_pred_prob=c.pytorch_predict(model)
141
+ # st.text([y_true, y_pred])
142
+ col1, col2 = st.columns([3,2])
143
+
144
+ col1.subheader("Classification Report")
145
+ # col1.text(classification_report(y_true, y_pred))
146
+ report_dict = classification_report(y_true, y_pred, target_names=c.class_names,output_dict=True)
147
+ accuracy = report_dict['accuracy']
148
+ del report_dict['accuracy']
149
+ df = pd.DataFrame(report_dict).transpose()
150
+ col1.text(f'Accuracy of the Model: {round(accuracy,1)}')
151
+ col1.dataframe(df)
152
+
153
+
154
+ col2.subheader("Confusion Matrix")
155
+ cm = confusion_matrix(y_true, y_pred)
156
+ fig = ConfusionMatrixDisplay(cm, display_labels=c.class_names).plot(cmap='Blues')
157
+ plt.savefig('elements/confusion_matrix.png')
158
+ # fig_array = np.array(fig.figure_.canvas.renderer.buffer_rgba())
159
+ col2.image('elements/confusion_matrix.png')
160
+ # st.text("Classification Report:")
161
+ # # st.text(report)
162
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
163
+
164
+ # st.table(df)
165
+
166
+ # st.text("Confusion Matrix:")
167
+ # # st.write(confusion)
168
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
169
+
170
+ # st.table(df)
171
+ with open('model.pt', "rb") as model_file:
172
+ st.download_button(
173
+ label="Download Model",
174
+ data=model_file,
175
+ file_name=f"model_{model_name}.pt",
176
+ key="model_download",
177
+ )
178
+ st.balloons()
179
+ else:
180
+ st.warning("Please Upload Data First")
181
+
182
+
183
+ # st.header("4. Evaluate Model")
184
+ # if st.button("Evaluate Model"):
185
+ # with st.spinner("Evaluating model..."):
186
+ # # You should write code here to load the trained model, evaluate its performance, and display metrics.
187
+
188
+ # # For example:
189
+ # # trained_model = tf.keras.models.load_model("custom_classification_model.h5")
190
+ # # test_data, test_labels = load_test_data(dataset_path)
191
+ # # predictions = trained_model.predict(test_data)
192
+ # # report = classification_report(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
193
+ # # confusion = confusion_matrix(np.argmax(test_labels, axis=1), np.argmax(predictions, axis=1))
194
+ # y_true, y_pred, y_pred_prob=c.pytorch_predict()
195
+ # st.text(y_true, y_pred)
196
+ # st.text("Classification Report:")
197
+ # # st.text(report)
198
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
199
+
200
+ # st.table(df)
201
+
202
+ # st.text("Confusion Matrix:")
203
+ # # st.write(confusion)
204
+ # df = pd.DataFrame(np.random.randn(2, 2), columns=("col %d" % i for i in range(2)))
205
+
206
+ # st.table(df)
207
+
208
+ # Helper functions for dataset extraction, model building, and training can be defined separately.
209
+