Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,14 @@ import requests
|
|
10 |
import asyncio
|
11 |
from io import BytesIO
|
12 |
import base64
|
13 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
import matplotlib.pyplot as plt
|
15 |
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, learning_curve
|
16 |
from sklearn.linear_model import LinearRegression, LogisticRegression
|
@@ -1423,10 +1430,22 @@ elif app_mode == "Neural Network Studio":
|
|
1423 |
# Neural Network Configuration
|
1424 |
st.subheader("⚙️ Neural Network Configuration")
|
1425 |
with st.expander("Configure Neural Network", expanded=True):
|
1426 |
-
|
1427 |
-
|
1428 |
-
|
1429 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1430 |
epochs = st.slider("Epochs", 10, 200, 50)
|
1431 |
batch_size = st.slider("Batch Size", 16, 128, 32)
|
1432 |
|
@@ -1467,23 +1486,72 @@ elif app_mode == "Neural Network Studio":
|
|
1467 |
X_train_processed = preprocessor.fit_transform(X_train)
|
1468 |
X_test_processed = preprocessor.transform(X_test)
|
1469 |
|
1470 |
-
#
|
1471 |
model = keras.Sequential()
|
1472 |
model.add(layers.Input(shape=(X_train_processed.shape[1],)))
|
|
|
1473 |
for _ in range(hidden_layers):
|
1474 |
-
|
1475 |
-
|
1476 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1477 |
|
1478 |
# Compile the model
|
1479 |
-
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
|
1480 |
model.compile(optimizer=optimizer,
|
1481 |
loss='mse' if problem_type == "Regression" else 'sparse_categorical_crossentropy',
|
1482 |
metrics=['mae'] if problem_type == "Regression" else ['accuracy'])
|
1483 |
|
1484 |
-
#
|
1485 |
-
|
1486 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1487 |
|
1488 |
# Store model and preprocessor
|
1489 |
st.session_state.model = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)])
|
@@ -1497,6 +1565,11 @@ elif app_mode == "Neural Network Studio":
|
|
1497 |
|
1498 |
# Model Evaluation
|
1499 |
y_pred = model.predict(X_test_processed)
|
|
|
|
|
|
|
|
|
|
|
1500 |
if problem_type == "Regression":
|
1501 |
mse = mean_squared_error(y_test, y_pred)
|
1502 |
rmse = np.sqrt(mse)
|
@@ -1518,14 +1591,32 @@ elif app_mode == "Neural Network Studio":
|
|
1518 |
st.write("Classification Report:")
|
1519 |
st.text(classification_report(y_test, y_pred))
|
1520 |
|
1521 |
-
# Visualization
|
1522 |
st.subheader("📊 Training History")
|
1523 |
-
fig,
|
1524 |
-
|
1525 |
-
|
1526 |
-
|
1527 |
-
|
1528 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1529 |
st.pyplot(fig)
|
1530 |
|
1531 |
st.success("Neural network trained successfully!")
|
@@ -1538,7 +1629,8 @@ elif app_mode == "Neural Network Studio":
|
|
1538 |
model_filename = st.text_input("Enter Model Filename (without extension)", "neural_network")
|
1539 |
if st.button("Save Model"):
|
1540 |
try:
|
1541 |
-
|
1542 |
-
st.
|
|
|
1543 |
except Exception as e:
|
1544 |
st.error(f"Error saving model: {e}")
|
|
|
10 |
import asyncio
|
11 |
from io import BytesIO
|
12 |
import base64
|
13 |
+
import tensorflow as tf
|
14 |
+
from tensorflow import keras
|
15 |
+
from tensorflow.keras import layers, callbacks
|
16 |
+
from tensorflow.keras.utils import to_categorical
|
17 |
+
from keras.models import Sequential
|
18 |
+
from keras.layers import Dense
|
19 |
+
import mimetype
|
20 |
+
import tensorflow
|
21 |
import matplotlib.pyplot as plt
|
22 |
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, learning_curve
|
23 |
from sklearn.linear_model import LinearRegression, LogisticRegression
|
|
|
1430 |
# Neural Network Configuration
|
1431 |
st.subheader("⚙️ Neural Network Configuration")
|
1432 |
with st.expander("Configure Neural Network", expanded=True):
|
1433 |
+
col1, col2 = st.columns(2)
|
1434 |
+
with col1:
|
1435 |
+
hidden_layers = st.slider("Number of Hidden Layers", 1, 5, 2)
|
1436 |
+
neurons_per_layer = st.slider("Neurons per Layer", 10, 200, 50)
|
1437 |
+
activation = st.selectbox("Activation Function",
|
1438 |
+
["relu", "tanh", "sigmoid", "selu", "swish"])
|
1439 |
+
dropout_rate = st.slider("Dropout Rate", 0.0, 0.5, 0.2)
|
1440 |
+
initializer = st.selectbox("Weight Initializer",
|
1441 |
+
["glorot_uniform", "he_normal", "lecun_uniform"])
|
1442 |
+
|
1443 |
+
with col2:
|
1444 |
+
learning_rate = st.slider("Learning Rate", 0.0001, 0.1, 0.001, format="%.4f")
|
1445 |
+
optimizer_choice = st.selectbox("Optimizer",
|
1446 |
+
["Adam", "Nadam", "RMSprop", "SGD"])
|
1447 |
+
batch_norm = st.checkbox("Batch Normalization", value=True)
|
1448 |
+
regularization = st.checkbox("L2 Regularization")
|
1449 |
epochs = st.slider("Epochs", 10, 200, 50)
|
1450 |
batch_size = st.slider("Batch Size", 16, 128, 32)
|
1451 |
|
|
|
1486 |
X_train_processed = preprocessor.fit_transform(X_train)
|
1487 |
X_test_processed = preprocessor.transform(X_test)
|
1488 |
|
1489 |
+
# Build neural network with advanced features
|
1490 |
model = keras.Sequential()
|
1491 |
model.add(layers.Input(shape=(X_train_processed.shape[1],)))
|
1492 |
+
|
1493 |
for _ in range(hidden_layers):
|
1494 |
+
# Create configurable layers
|
1495 |
+
layer_config = {
|
1496 |
+
'units': neurons_per_layer,
|
1497 |
+
'activation': activation,
|
1498 |
+
'kernel_initializer': initializer
|
1499 |
+
}
|
1500 |
+
|
1501 |
+
if regularization:
|
1502 |
+
layer_config['kernel_regularizer'] = keras.regularizers.l2(0.01)
|
1503 |
+
|
1504 |
+
model.add(layers.Dense(**layer_config))
|
1505 |
+
|
1506 |
+
if batch_norm:
|
1507 |
+
model.add(layers.BatchNormalization())
|
1508 |
+
|
1509 |
+
if dropout_rate > 0:
|
1510 |
+
model.add(layers.Dropout(dropout_rate))
|
1511 |
+
|
1512 |
+
# Output layer
|
1513 |
+
output_activation = 'linear' if problem_type == "Regression" else 'softmax'
|
1514 |
+
output_units = 1 if problem_type == "Regression" else len(np.unique(y_train))
|
1515 |
+
model.add(layers.Dense(output_units, activation=output_activation))
|
1516 |
+
|
1517 |
+
# Configure optimizer
|
1518 |
+
optimizers = {
|
1519 |
+
"Adam": keras.optimizers.Adam(learning_rate=learning_rate),
|
1520 |
+
"Nadam": keras.optimizers.Nadam(learning_rate=learning_rate),
|
1521 |
+
"RMSprop": keras.optimizers.RMSprop(learning_rate=learning_rate),
|
1522 |
+
"SGD": keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
|
1523 |
+
}
|
1524 |
+
|
1525 |
+
optimizer = optimizers[optimizer_choice]
|
1526 |
|
1527 |
# Compile the model
|
|
|
1528 |
model.compile(optimizer=optimizer,
|
1529 |
loss='mse' if problem_type == "Regression" else 'sparse_categorical_crossentropy',
|
1530 |
metrics=['mae'] if problem_type == "Regression" else ['accuracy'])
|
1531 |
|
1532 |
+
# Add callbacks section
|
1533 |
+
with st.expander("Advanced Training Options"):
|
1534 |
+
early_stopping = st.checkbox("Early Stopping", value=True)
|
1535 |
+
reduce_lr = st.checkbox("Reduce Learning Rate on Plateau")
|
1536 |
+
patience = st.slider("Patience Epochs", 5, 20, 10) if early_stopping else 0
|
1537 |
+
|
1538 |
+
callbacks_list = []
|
1539 |
+
if early_stopping:
|
1540 |
+
callbacks_list.append(
|
1541 |
+
callbacks.EarlyStopping(patience=patience, restore_best_weights=True))
|
1542 |
+
if reduce_lr:
|
1543 |
+
callbacks_list.append(
|
1544 |
+
callbacks.ReduceLROnPlateau(factor=0.2, patience=patience//2))
|
1545 |
+
|
1546 |
+
# Train the model with callbacks
|
1547 |
+
history = model.fit(
|
1548 |
+
X_train_processed, y_train,
|
1549 |
+
epochs=epochs,
|
1550 |
+
batch_size=batch_size,
|
1551 |
+
validation_split=0.2,
|
1552 |
+
callbacks=callbacks_list,
|
1553 |
+
verbose=0
|
1554 |
+
)
|
1555 |
|
1556 |
# Store model and preprocessor
|
1557 |
st.session_state.model = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)])
|
|
|
1565 |
|
1566 |
# Model Evaluation
|
1567 |
y_pred = model.predict(X_test_processed)
|
1568 |
+
|
1569 |
+
# Post-processing for classification
|
1570 |
+
if problem_type == "Classification":
|
1571 |
+
y_pred = np.argmax(y_pred, axis=1) # Convert probabilities to class labels
|
1572 |
+
|
1573 |
if problem_type == "Regression":
|
1574 |
mse = mean_squared_error(y_test, y_pred)
|
1575 |
rmse = np.sqrt(mse)
|
|
|
1591 |
st.write("Classification Report:")
|
1592 |
st.text(classification_report(y_test, y_pred))
|
1593 |
|
1594 |
+
# Visualization with multiple metrics
|
1595 |
st.subheader("📊 Training History")
|
1596 |
+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
|
1597 |
+
|
1598 |
+
# Plot loss
|
1599 |
+
ax1.plot(history.history['loss'], label='Train Loss')
|
1600 |
+
ax1.plot(history.history['val_loss'], label='Validation Loss')
|
1601 |
+
ax1.set_title('Loss Evolution')
|
1602 |
+
ax1.set_xlabel('Epoch')
|
1603 |
+
ax1.set_ylabel('Loss')
|
1604 |
+
ax1.legend()
|
1605 |
+
|
1606 |
+
# Plot accuracy/metric
|
1607 |
+
if problem_type == "Classification":
|
1608 |
+
ax2.plot(history.history['accuracy'], label='Train Accuracy')
|
1609 |
+
ax2.plot(history.history['val_accuracy'], label='Validation Accuracy')
|
1610 |
+
ax2.set_title('Accuracy Evolution')
|
1611 |
+
ax2.set_ylabel('Accuracy')
|
1612 |
+
else:
|
1613 |
+
ax2.plot(history.history['mae'], label='Train MAE')
|
1614 |
+
ax2.plot(history.history['val_mae'], label='Validation MAE')
|
1615 |
+
ax2.set_title('MAE Evolution')
|
1616 |
+
ax2.set_ylabel('MAE')
|
1617 |
+
|
1618 |
+
ax2.set_xlabel('Epoch')
|
1619 |
+
ax2.legend()
|
1620 |
st.pyplot(fig)
|
1621 |
|
1622 |
st.success("Neural network trained successfully!")
|
|
|
1629 |
model_filename = st.text_input("Enter Model Filename (without extension)", "neural_network")
|
1630 |
if st.button("Save Model"):
|
1631 |
try:
|
1632 |
+
# Save the entire Keras model including architecture and weights
|
1633 |
+
st.session_state.model.named_steps['model'].save(f"{model_filename}.h5") # Saves as a HDF5 file
|
1634 |
+
st.success(f"Model saved as {model_filename}.h5")
|
1635 |
except Exception as e:
|
1636 |
st.error(f"Error saving model: {e}")
|