Update the model
Browse files
train.py
CHANGED
@@ -28,7 +28,7 @@ model.add(Dropout(0.5)) # dropout makes ___ task harder __ removing ____ informa
|
|
28 |
model.add(Dense(512, activation="relu"))
|
29 |
model.add(Dense(512, activation="relu"))
|
30 |
model.add(Dense(256, activation="relu"))
|
31 |
-
model.add(Dense(dset_size, activation="
|
32 |
|
33 |
X = [] # we're loading the training data into input X
|
34 |
y = [] # and output y
|
@@ -43,11 +43,10 @@ for key in dset:
|
|
43 |
X = np.array(X) # normal lists are way slower than numpy arrays (remember, a list and an array is not the same thing, an array is far more limited)
|
44 |
y = np.array(y) # that's why keras supports only numpy arrays ^
|
45 |
|
46 |
-
model.compile(optimizer=Adam(), loss="
|
47 |
-
# TODO: change the loss
|
48 |
|
49 |
model.fit(X, y, epochs=10, batch_size=8) # training the model, epochs means how many times does it have to read the data, batch_size is an optimization to train on multiple messages at the same time. Loss and accuracy are the opposite things, loss is how far the output is from a correct one, from 1 to 0, and accuracy how often does the model get the answer right, from 0 to 1.
|
50 |
-
#
|
51 |
|
52 |
model.summary() # just for you to see info about the model, useful because you can check the parameter count
|
53 |
|
|
|
28 |
model.add(Dense(512, activation="relu"))
|
29 |
model.add(Dense(512, activation="relu"))
|
30 |
model.add(Dense(256, activation="relu"))
|
31 |
+
model.add(Dense(dset_size, activation="softmax")) # softmax is made for output, if the output should have only 1 neuron active, that means only one positive number is allowed and other are zeros
|
32 |
|
33 |
X = [] # we're loading the training data into input X
|
34 |
y = [] # and output y
|
|
|
43 |
X = np.array(X) # normal lists are way slower than numpy arrays (remember, a list and an array is not the same thing, an array is far more limited)
|
44 |
y = np.array(y) # that's why keras supports only numpy arrays ^
|
45 |
|
46 |
+
model.compile(optimizer=Adam(), loss="categorical_crossentropy", metrics=["accuracy",]) # settings for the training, loss means the way to calculate loss - categorical crossentropy
|
|
|
47 |
|
48 |
model.fit(X, y, epochs=10, batch_size=8) # training the model, epochs means how many times does it have to read the data, batch_size is an optimization to train on multiple messages at the same time. Loss and accuracy are the opposite things, loss is how far the output is from a correct one, from 1 to 0, and accuracy how often does the model get the answer right, from 0 to 1.
|
49 |
+
# Add , workers=4, use_multiprocessing=True) if you don't have a GPU
|
50 |
|
51 |
model.summary() # just for you to see info about the model, useful because you can check the parameter count
|
52 |
|