LenixC commited on
Commit
fa42857
·
1 Parent(s): d09ace4

Built gradio app for the example.

Browse files
Files changed (1) hide show
  1. app.py +230 -0
app.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gradio Implementation: Lenix Carter
2
+ # License: BSD 3-Clause or CC-0
3
+
4
+ import warnings
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ import matplotlib
9
+ import matplotlib.pyplot as plt
10
+
11
+ from sklearn.neural_network import MLPClassifier
12
+ from sklearn.preprocessing import MinMaxScaler
13
+ from sklearn import datasets
14
+ from sklearn.exceptions import ConvergenceWarning
15
+
16
+ matplotlib.use('agg')
17
+
18
+ # different learning rate schedules and momentum parameters
19
+ params = [
20
+ {
21
+ "solver": "sgd",
22
+ "learning_rate": "constant",
23
+ "momentum": 0,
24
+ "learning_rate_init": 0.2,
25
+ },
26
+ {
27
+ "solver": "sgd",
28
+ "learning_rate": "constant",
29
+ "momentum": 0.9,
30
+ "nesterovs_momentum": False,
31
+ "learning_rate_init": 0.2,
32
+ },
33
+ {
34
+ "solver": "sgd",
35
+ "learning_rate": "constant",
36
+ "momentum": 0.9,
37
+ "nesterovs_momentum": True,
38
+ "learning_rate_init": 0.2,
39
+ },
40
+ {
41
+ "solver": "sgd",
42
+ "learning_rate": "invscaling",
43
+ "momentum": 0,
44
+ "learning_rate_init": 0.2,
45
+ },
46
+ {
47
+ "solver": "sgd",
48
+ "learning_rate": "invscaling",
49
+ "momentum": 0.9,
50
+ "nesterovs_momentum": True,
51
+ "learning_rate_init": 0.2,
52
+ },
53
+ {
54
+ "solver": "sgd",
55
+ "learning_rate": "invscaling",
56
+ "momentum": 0.9,
57
+ "nesterovs_momentum": False,
58
+ "learning_rate_init": 0.2,
59
+ },
60
+ {"solver": "adam", "learning_rate_init": 0.01},
61
+ ]
62
+
63
+ labels = [
64
+ "constant learning-rate",
65
+ "constant with momentum",
66
+ "constant with Nesterov's momentum",
67
+ "inv-scaling learning-rate",
68
+ "inv-scaling with momentum",
69
+ "inv-scaling with Nesterov's momentum",
70
+ "adam",
71
+ ]
72
+
73
+ plot_args = [
74
+ {"c": "red", "linestyle": "-"},
75
+ {"c": "green", "linestyle": "-"},
76
+ {"c": "blue", "linestyle": "-"},
77
+ {"c": "red", "linestyle": "--"},
78
+ {"c": "green", "linestyle": "--"},
79
+ {"c": "blue", "linestyle": "--"},
80
+ {"c": "black", "linestyle": "-"},
81
+ ]
82
+
83
+ # load / generate some toy datasets
84
+ iris = datasets.load_iris()
85
+ X_digits, y_digits = datasets.load_digits(return_X_y=True)
86
+ data_sets = [
87
+ (iris.data, iris.target),
88
+ (X_digits, y_digits),
89
+ datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
90
+ datasets.make_moons(noise=0.3, random_state=0),
91
+ ]
92
+
93
+ def run_mlp(dataset, models, clr_lr,
94
+ cwm_lr, cwm_mom,
95
+ nest_lr, nest_mom,
96
+ inv_lr,
97
+ iwm_lr, iwm_mom,
98
+ invN_lr, invN_mom,
99
+ adam_lr):
100
+ plt.clf()
101
+ new_params = [
102
+ {"learning_rate_init": clr_lr},
103
+ {"learning_rate_init": cwm_lr,
104
+ "momentum": cwm_mom},
105
+ {"learning_rate_init": nest_lr,
106
+ "momentum": nest_mom},
107
+ {"learning_rate_init": inv_lr},
108
+ {"learning_rate_init": iwm_lr,
109
+ "momentum": iwm_mom},
110
+ {"learning_rate_init": invN_lr,
111
+ "momentum": invN_mom},
112
+ {"learning_rate_init": adam_lr}
113
+ ]
114
+ for (param, new_param) in zip(params, new_params):
115
+ param.update(new_param)
116
+
117
+ iris = datasets.load_iris()
118
+ X_digits, y_digits = datasets.load_digits(return_X_y=True)
119
+ data_sets = [
120
+ (iris.data, iris.target),
121
+ (X_digits, y_digits),
122
+ datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
123
+ datasets.make_moons(noise=0.3, random_state=0),
124
+ ]
125
+ name = ["Iris", "Digits", "Circles", "Moons"]
126
+
127
+ return plot_on_dataset(*data_sets[dataset], models, name[dataset])
128
+
129
+ def plot_on_dataset(X, y, models, name):
130
+ # for each dataset, plot learning for each learning strategy
131
+ print("\nlearning on dataset %s" % name)
132
+
133
+ X = MinMaxScaler().fit_transform(X)
134
+ mlps = []
135
+ if name == "Digits":
136
+ # digits is larger but converges fairly quickly
137
+ max_iter = 15
138
+ else:
139
+ max_iter = 400
140
+
141
+ for model in models:
142
+ label = labels[model]
143
+ param = params[model]
144
+ print("training: %s" % label)
145
+ mlp = MLPClassifier(random_state=0, max_iter=max_iter, **param)
146
+
147
+ # some parameter combinations will not converge as can be seen on the
148
+ # plots so they are ignored here
149
+ with warnings.catch_warnings():
150
+ warnings.filterwarnings(
151
+ "ignore", category=ConvergenceWarning, module="sklearn"
152
+ )
153
+ mlp.fit(X, y)
154
+
155
+ mlps.append(mlp)
156
+ print("Training set score: %f" % mlp.score(X, y))
157
+ print("Training set loss: %f" % mlp.loss_)
158
+
159
+ print(label)
160
+ plt.plot(mlp.loss_curve_, label=label, **plot_args[model])
161
+
162
+ plt.legend(loc="upper right")
163
+
164
+ return plt
165
+
166
+ title = "Compare Stochastic learning strategies for MLPClassifier"
167
+ with gr.Blocks() as demo:
168
+ gr.Markdown(f" # {title}")
169
+ gr.Markdown("""
170
+ This example demonstrates different stochastic learning strategies on the MLP Classifier. You may also tweak some parameters of the learning strategies.
171
+
172
+ This is based on the example [here](https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_training_curves.html#sphx-glr-auto-examples-neural-networks-plot-mlp-training-curves-py)
173
+ """)
174
+ with gr.Tabs():
175
+ with gr.TabItem("Model and Data Selection"):
176
+ with gr.Row():
177
+ dataset = gr.Dropdown(["Iris", "Digits", "Circles", "Moons"],
178
+ value="Iris",
179
+ type="index")
180
+ models = gr.CheckboxGroup(["Constant Learning-Rate",
181
+ "Constant with Momentum",
182
+ "Constant with Nesterov's Momentum",
183
+ "Inverse Scaling Learning-Rate",
184
+ "Inverse Scaling with Momentum",
185
+ "Inverse Scaling with Nesterov's Momentum",
186
+ "Adam"],
187
+ label="Stochastic Learning Strategy",
188
+ type="index")
189
+ with gr.TabItem("Model Tuning"):
190
+ with gr.Accordion("Constant Learning-Rate", open=False):
191
+ clr_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
192
+ with gr.Accordion("Constant with Momentum", open=False):
193
+ cwm_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
194
+ cwm_mom = gr.Slider(0.01, 1.00, 0.9, label="Momentum")
195
+ with gr.Accordion("Constant with Nesterov's Momentum", open=False):
196
+ nest_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
197
+ nest_mom = gr.Slider(0.01, 1.00, 0.9, label="Momentum")
198
+ with gr.Accordion("Inverse Scaling Learning-Rate", open=False):
199
+ inv_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
200
+ with gr.Accordion("Inverse Scaling with Momentum", open=False):
201
+ iwm_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
202
+ iwm_mom = gr.Slider(0.01, 1.00, 0.9, label="Momentum")
203
+ with gr.Accordion("Inverse Scaling with Nesterov's Momentum", open=False):
204
+ invN_lr = gr.Slider(0.01, 1.00, .2, label="Learning Rate")
205
+ invN_mom = gr.Slider(0.01, 1.00, 0.9, label="Momentum")
206
+ with gr.Accordion("Adam", open=False):
207
+ adam_lr = gr.Slider(0.001, 1.00, 0.01, label="Learning Rate")
208
+
209
+ btn = gr.Button(label="Run")
210
+ stoch_graph = gr.Plot(label="Stochastic Learning Strategies")
211
+ btn.click(
212
+ fn=run_mlp,
213
+ inputs=[dataset, models,
214
+ clr_lr,
215
+ cwm_lr,
216
+ cwm_mom,
217
+ nest_lr,
218
+ nest_mom,
219
+ inv_lr,
220
+ iwm_lr,
221
+ iwm_mom,
222
+ invN_lr,
223
+ invN_mom,
224
+ adam_lr],
225
+ outputs=[stoch_graph]
226
+ )
227
+
228
+ if __name__ == '__main__':
229
+ demo.launch()
230
+