import numpy as np import pandas as pd import statsmodels.formula.api as smf import statsmodels.api as sm import plotly.graph_objects as go from scipy.optimize import minimize import plotly.express as px from scipy.stats import t, f import gradio as gr import io import zipfile import tempfile from datetime import datetime import docx from docx.shared import Inches, Pt from docx.enum.text import WD_PARAGRAPH_ALIGNMENT from matplotlib.colors import to_hex import os # --- Data definition in global scope --- data_dict = { 'Tratamiento': ['T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'T8', 'T9', 'T10', 'T11', 'T12', 'T13', 'T14', 'T15'] * 3, 'Tiempo_fermentacion_h': [16] * 15 + [23] * 15 + [40] * 15, 'pH': [6.02, 5.39, 6.27, 4.82, 6.25, 4.87, 4.76, 4.68, 4.64, 6.35, 4.67, 6.43, 4.58, 4.60, 6.96, 5.17, 5.95, 6.90, 5.50, 5.08, 4.95, 5.41, 5.52, 4.98, 7.10, 5.36, 6.91, 5.21, 4.66, 7.10, 5.42, 5.60, 7.36, 5.36, 4.66, 4.93, 5.18, 5.26, 4.92, 7.28, 5.26, 6.84, 5.19, 4.58, 7.07], 'Abs_600nm': [1.576, 1.474, 1.293, 1.446, 1.537, 1.415, 1.481, 1.419, 1.321, 1.224, 1.459, 0.345, 1.279, 1.181, 0.662, 1.760, 1.690, 1.485, 1.658, 1.728, 1.594, 1.673, 1.607, 1.531, 1.424, 1.595, 0.344, 1.477, 1.257, 0.660, 1.932, 1.780, 1.689, 1.876, 1.885, 1.824, 1.913, 1.810, 1.852, 1.694, 1.831, 0.347, 1.752, 1.367, 0.656], 'Glucosa_g_L': [5,10,0,5,10,5,10,5,10,0,5,0,5,5,0] * 3, 'Proteina_Pescado_g_L': [1.4,1.4,3.2,3.2,3.2,3.2,3.2,5,5,5,5,0,5,5,0] * 3, 'Sulfato_Manganeso_g_L': [0.75,0.5,0.75,0.5,0.75,0.5,0.75,0.5,0.25,0.75,0.5,0.25,0.5,0.25,0.5] * 3 } data = pd.DataFrame(data_dict) # --- End of data definition in global scope --- # --- Clase RSM_BoxBehnken --- class RSM_BoxBehnken: def __init__(self, data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels): """ Inicializa la clase con los datos del diseño Box-Behnken. """ self.data = data.copy() self.model = None self.model_simplified = None self.optimized_results = None self.optimal_levels = None self.all_figures = [] # Lista para almacenar las figuras self.x1_name = x1_name self.x2_name = x2_name self.x3_name = x3_name self.y_name = y_name # Niveles originales de las variables self.x1_levels = x1_levels self.x2_levels = x2_levels self.x3_levels = x3_levels def get_levels(self, variable_name): """ Obtiene los niveles para una variable específica. """ if variable_name == self.x1_name: return self.x1_levels elif variable_name == self.x2_name: return self.x2_levels elif variable_name == self.x3_name: return self.x3_levels else: raise ValueError(f"Variable desconocida: {variable_name}") def fit_model(self): """ Ajusta el modelo de segundo orden completo a los datos. """ formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \ f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2) + ' \ f'{self.x1_name}:{self.x2_name} + {self.x1_name}:{self.x3_name} + {self.x2_name}:{self.x3_name}' self.model = smf.ols(formula, data=self.data).fit() print("Modelo Completo:") print(self.model.summary()) return self.model, self.pareto_chart(self.model, "Pareto - Modelo Completo") def fit_simplified_model(self): """ Ajusta el modelo de segundo orden a los datos, eliminando términos no significativos. """ formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + ' \ f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)' # Adjusted formula to include x3^2 self.model_simplified = smf.ols(formula, data=self.data).fit() print("\nModelo Simplificado:") print(self.model_simplified.summary()) return self.model_simplified, self.pareto_chart(self.model_simplified, "Pareto - Modelo Simplificado") def optimize(self, method='Nelder-Mead'): """ Encuentra los niveles óptimos de los factores para maximizar la respuesta usando el modelo simplificado. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return def objective_function(x): return -self.model_simplified.predict(pd.DataFrame({ self.x1_name: [x[0]], self.x2_name: [x[1]], self.x3_name: [x[2]] })).values[0] bounds = [(-1, 1), (-1, 1), (-1, 1)] x0 = [0, 0, 0] self.optimized_results = minimize(objective_function, x0, method=method, bounds=bounds) self.optimal_levels = self.optimized_results.x # Convertir niveles óptimos de codificados a naturales optimal_levels_natural = [ self.coded_to_natural(self.optimal_levels[0], self.x1_name), self.coded_to_natural(self.optimal_levels[1], self.x2_name), self.coded_to_natural(self.optimal_levels[2], self.x3_name) ] # Crear la tabla de optimización optimization_table = pd.DataFrame({ 'Variable': [self.x1_name, self.x2_name, self.x3_name], 'Nivel Óptimo (Natural)': optimal_levels_natural, 'Nivel Óptimo (Codificado)': self.optimal_levels }) return optimization_table.round(3) # Redondear a 3 decimales def plot_rsm_individual(self, fixed_variable, fixed_level): """ Genera un gráfico de superficie de respuesta (RSM) individual para una configuración específica. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return None # Determinar las variables que varían y sus niveles naturales varying_variables = [var for var in [self.x1_name, self.x2_name, self.x3_name] if var != fixed_variable] # Establecer los niveles naturales para las variables que varían x_natural_levels = self.get_levels(varying_variables[0]) y_natural_levels = self.get_levels(varying_variables[1]) # Crear una malla de puntos para las variables que varían (en unidades naturales) x_range_natural = np.linspace(x_natural_levels[0], x_natural_levels[-1], 100) y_range_natural = np.linspace(y_natural_levels[0], y_natural_levels[-1], 100) x_grid_natural, y_grid_natural = np.meshgrid(x_range_natural, y_range_natural) # Convertir la malla de variables naturales a codificadas x_grid_coded = self.natural_to_coded(x_grid_natural, varying_variables[0]) y_grid_coded = self.natural_to_coded(y_grid_natural, varying_variables[1]) # Crear un DataFrame para la predicción con variables codificadas prediction_data = pd.DataFrame({ varying_variables[0]: x_grid_coded.flatten(), varying_variables[1]: y_grid_coded.flatten(), }) prediction_data[fixed_variable] = self.natural_to_coded(fixed_level, fixed_variable) # Calcular los valores predichos z_pred = self.model_simplified.predict(prediction_data).values.reshape(x_grid_coded.shape) # Filtrar por el nivel de la variable fija (en codificado) fixed_level_coded = self.natural_to_coded(fixed_level, fixed_variable) subset_data = self.data[np.isclose(self.data[fixed_variable], fixed_level_coded)] # Filtrar por niveles válidos en las variables que varían valid_levels = [-1, 0, 1] experiments_data = subset_data[ subset_data[varying_variables[0]].isin(valid_levels) & subset_data[varying_variables[1]].isin(valid_levels) ] # Convertir coordenadas de experimentos a naturales experiments_x_natural = experiments_data[varying_variables[0]].apply(lambda x: self.coded_to_natural(x, varying_variables[0])) experiments_y_natural = experiments_data[varying_variables[1]].apply(lambda x: self.coded_to_natural(x, varying_variables[1])) # Crear el gráfico de superficie con variables naturales en los ejes y transparencia fig = go.Figure(data=[go.Surface(z=z_pred, x=x_grid_natural, y=y_grid_natural, colorscale='Viridis', opacity=0.7, showscale=True)]) # --- Añadir cuadrícula a la superficie --- # Líneas en la dirección x for i in range(x_grid_natural.shape[0]): fig.add_trace(go.Scatter3d( x=x_grid_natural[i, :], y=y_grid_natural[i, :], z=z_pred[i, :], mode='lines', line=dict(color='gray', width=2), showlegend=False, hoverinfo='skip' )) # Líneas en la dirección y for j in range(x_grid_natural.shape[1]): fig.add_trace(go.Scatter3d( x=x_grid_natural[:, j], y=y_grid_natural[:, j], z=z_pred[:, j], mode='lines', line=dict(color='gray', width=2), showlegend=False, hoverinfo='skip' )) # --- Fin de la adición de la cuadrícula --- # Añadir los puntos de los experimentos en la superficie de respuesta con diferentes colores y etiquetas colors = px.colors.qualitative.Safe point_labels = [f"{row[self.y_name]:.3f}" for _, row in experiments_data.iterrows()] fig.add_trace(go.Scatter3d( x=experiments_x_natural, y=experiments_y_natural, z=experiments_data[self.y_name].round(3), mode='markers+text', marker=dict(size=4, color=colors[:len(experiments_x_natural)]), text=point_labels, textposition='top center', name='Experimentos' )) # Añadir etiquetas y título con variables naturales fig.update_layout( scene=dict( xaxis_title=f"{varying_variables[0]} ({self.get_units(varying_variables[0])})", yaxis_title=f"{varying_variables[1]} ({self.get_units(varying_variables[1])})", zaxis_title=self.y_name, ), title=f"{self.y_name} vs {varying_variables[0]} y {varying_variables[1]}
{fixed_variable} fijo en {fixed_level:.3f} ({self.get_units(fixed_variable)}) (Modelo Simplificado)", height=800, width=1000, showlegend=True ) return fig def get_units(self, variable_name): """ Define las unidades de las variables para etiquetas. Puedes personalizar este método según tus necesidades. """ units = { 'Glucosa_g_L': 'g/L', 'Proteina_Pescado_g_L': 'g/L', 'Sulfato_Manganeso_g_L': 'g/L', 'Abs_600nm': '' # No units for Absorbance } return units.get(variable_name, '') def generate_all_plots(self): """ Genera todas las gráficas de RSM, variando la variable fija y sus niveles usando el modelo simplificado. Almacena las figuras en self.all_figures. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return self.all_figures = [] # Resetear la lista de figuras # Niveles naturales para graficar - Using levels from the data context, not Box-Behnken design levels. levels_to_plot_natural = { self.x1_name: sorted(list(set(self.data[self.x1_name]))), # Using unique values from data self.x2_name: sorted(list(set(self.data[self.x2_name]))), # Using unique values from data self.x3_name: sorted(list(set(self.data[self.x3_name]))) # Using unique values from data } # Generar y almacenar gráficos individuales for fixed_variable in [self.x1_name, self.x2_name, self.x3_name]: for level in levels_to_plot_natural[fixed_variable]: fig = self.plot_rsm_individual(fixed_variable, level) if fig is not None: self.all_figures.append(fig) def coded_to_natural(self, coded_value, variable_name): """Convierte un valor codificado a su valor natural.""" levels = self.get_levels(variable_name) return levels[0] + (coded_value + 1) * (levels[-1] - levels[0]) / 2 def natural_to_coded(self, natural_value, variable_name): """Convierte un valor natural a su valor codificado.""" levels = self.get_levels(variable_name) return -1 + 2 * (natural_value - levels[0]) / (levels[-1] - levels[0]) def pareto_chart(self, model, title): """ Genera un diagrama de Pareto para los efectos estandarizados de un modelo, incluyendo la línea de significancia. """ # Calcular los efectos estandarizados tvalues = model.tvalues[1:] # Excluir la Intercept abs_tvalues = np.abs(tvalues) sorted_idx = np.argsort(abs_tvalues)[::-1] sorted_tvalues = abs_tvalues[sorted_idx] sorted_names = tvalues.index[sorted_idx] # Calcular el valor crítico de t para la línea de significancia alpha = 0.05 # Nivel de significancia dof = model.df_resid # Grados de libertad residuales t_critical = t.ppf(1 - alpha / 2, dof) # Crear el diagrama de Pareto fig = px.bar( x=sorted_tvalues.round(3), y=sorted_names, orientation='h', labels={'x': 'Efecto Estandarizado', 'y': 'Término'}, title=title ) fig.update_yaxes(autorange="reversed") # Agregar la línea de significancia fig.add_vline(x=t_critical, line_dash="dot", annotation_text=f"t crítico = {t_critical:.3f}", annotation_position="bottom right") return fig def get_simplified_equation(self): """ Retorna la ecuación del modelo simplificado como una cadena de texto. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return None coefficients = self.model_simplified.params equation = f"{self.y_name} = {coefficients['Intercept']:.3f}" for term, coef in coefficients.items(): if term != 'Intercept': if term == f'{self.x1_name}': equation += f" + {coef:.3f}*{self.x1_name}" elif term == f'{self.x2_name}': equation += f" + {coef:.3f}*{self.x2_name}" elif term == f'{self.x3_name}': equation += f" + {coef:.3f}*{self.x3_name}" elif term == f'I({self.x1_name} ** 2)': equation += f" + {coef:.3f}*{self.x1_name}^2" elif term == f'I({self.x2_name} ** 2)': equation += f" + {coef:.3f}*{self.x2_name}^2" elif term == f'I({self.x3_name} ** 2)': equation += f" + {coef:.3f}*{self.x3_name}^2" return equation def generate_prediction_table(self): """ Genera una tabla con los valores actuales, predichos y residuales. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return None self.data['Predicho'] = self.model_simplified.predict(self.data) self.data['Residual'] = self.data[self.y_name] - self.data['Predicho'] return self.data[[self.y_name, 'Predicho', 'Residual']].round(3) def calculate_contribution_percentage(self): """ Calcula el porcentaje de contribución de cada factor a la variabilidad de la respuesta (AIA). """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return None # ANOVA del modelo simplificado anova_table = sm.stats.anova_lm(self.model_simplified, typ=2) # Suma de cuadrados total ss_total = anova_table['sum_sq'].sum() # Crear tabla de contribución contribution_table = pd.DataFrame({ 'Factor': [], 'Suma de Cuadrados': [], '% Contribución': [] }) # Calcular porcentaje de contribución para cada factor for index, row in anova_table.iterrows(): if index != 'Residual': factor_name = index if factor_name == f'I({self.x1_name} ** 2)': factor_name = f'{self.x1_name}^2' elif factor_name == f'I({self.x2_name} ** 2)': factor_name = f'{self.x2_name}^2' elif factor_name == f'I({self.x3_name} ** 2)': factor_name = f'{self.x3_name}^2' ss_factor = row['sum_sq'] contribution_percentage = (ss_factor / ss_total) * 100 contribution_table = pd.concat([contribution_table, pd.DataFrame({ 'Factor': [factor_name], 'Suma de Cuadrados': [ss_factor], '% Contribución': [contribution_percentage] })], ignore_index=True) return contribution_table.round(3) def calculate_detailed_anova(self): """ Calcula la tabla ANOVA detallada con la descomposición del error residual. """ if self.model_simplified is None: print("Error: Ajusta el modelo simplificado primero.") return None # --- ANOVA detallada --- # 1. Ajustar un modelo solo con los términos de primer orden y cuadráticos formula_reduced = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \ f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)' model_reduced = smf.ols(formula_reduced, data=self.data).fit() # 2. ANOVA del modelo reducido (para obtener la suma de cuadrados de la regresión) anova_reduced = sm.stats.anova_lm(model_reduced, typ=2) # 3. Suma de cuadrados total ss_total = np.sum((self.data[self.y_name] - self.data[self.y_name].mean())**2) # 4. Grados de libertad totales df_total = len(self.data) - 1 # 5. Suma de cuadrados de la regresión ss_regression = anova_reduced['sum_sq'][:-1].sum() # Sumar todo excepto 'Residual' # 6. Grados de libertad de la regresión df_regression = len(anova_reduced) - 1 # 7. Suma de cuadrados del error residual ss_residual = self.model_simplified.ssr df_residual = self.model_simplified.df_resid # 8. Suma de cuadrados del error puro (se calcula a partir de las réplicas) replicas = self.data[self.data.duplicated(subset=[self.x1_name, self.x2_name, self.x3_name], keep=False)] if not replicas.empty: ss_pure_error = replicas.groupby([self.x1_name, self.x2_name, self.x3_name])[self.y_name].var().sum() * replicas.groupby([self.x1_name, self.x2_name, self.x3_name]).ngroups df_pure_error = len(replicas) - replicas.groupby([self.x1_name, self.x2_name, self.x3_name]).ngroups else: ss_pure_error = np.nan df_pure_error = np.nan # 9. Suma de cuadrados de la falta de ajuste ss_lack_of_fit = ss_residual - ss_pure_error if not np.isnan(ss_pure_error) else np.nan df_lack_of_fit = df_residual - df_pure_error if not np.isnan(df_pure_error) else np.nan # 10. Cuadrados medios ms_regression = ss_regression / df_regression ms_residual = ss_residual / df_residual ms_lack_of_fit = np.nan # Initialize ms_lack_of_fit to nan if not np.isnan(df_lack_of_fit) and df_lack_of_fit != 0: # Check df_lack_of_fit is valid ms_lack_of_fit = ss_lack_of_fit / df_lack_of_fit ms_pure_error = ss_pure_error / df_pure_error if not np.isnan(df_pure_error) else np.nan # 11. Estadístico F y valor p para la falta de ajuste f_lack_of_fit = ms_lack_of_fit / ms_pure_error if not np.isnan(ms_lack_of_fit) and not np.isnan(ms_pure_error) and ms_pure_error != 0 else np.nan # Added nan checks and zero division check p_lack_of_fit = 1 - f.cdf(f_lack_of_fit, df_lack_of_fit, df_pure_error) if not np.isnan(f_lack_of_fit) and not np.isnan(df_lack_of_fit) and not np.isnan(df_pure_error) else np.nan # Added nan checks # 12. Crear la tabla ANOVA detallada detailed_anova_table = pd.DataFrame({ 'Fuente de Variación': ['Regresión', 'Residual', 'Falta de Ajuste', 'Error Puro', 'Total'], 'Suma de Cuadrados': [ss_regression, ss_residual, ss_lack_of_fit, ss_pure_error, ss_total], 'Grados de Libertad': [df_regression, df_residual, df_lack_of_fit, df_pure_error, df_total], 'Cuadrado Medio': [ms_regression, ms_residual, ms_lack_of_fit, ms_pure_error, np.nan], 'F': [np.nan, np.nan, f_lack_of_fit, np.nan, np.nan], 'Valor p': [np.nan, np.nan, p_lack_of_fit, np.nan, np.nan] }) # Calcular la suma de cuadrados y grados de libertad para la curvatura ss_curvature = anova_reduced['sum_sq'][f'I({self.x1_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x2_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x3_name} ** 2)'] df_curvature = 3 # Añadir la fila de curvatura a la tabla ANOVA detailed_anova_table.loc[len(detailed_anova_table)] = ['Curvatura', ss_curvature, df_curvature, ss_curvature / df_curvature, np.nan, np.nan] # Reorganizar las filas para que la curvatura aparezca después de la regresión detailed_anova_table = detailed_anova_table.reindex([0, 5, 1, 2, 3, 4]) # Resetear el índice para que sea consecutivo detailed_anova_table = detailed_anova_table.reset_index(drop=True) return detailed_anova_table.round(3) def get_all_tables(self): """ Obtiene todas las tablas generadas para ser exportadas a Excel. """ prediction_table = self.generate_prediction_table() contribution_table = self.calculate_contribution_percentage() detailed_anova_table = self.calculate_detailed_anova() return { 'Predicciones': prediction_table, '% Contribución': contribution_table, 'ANOVA Detallada': detailed_anova_table } def save_figures_to_zip(self): """ Guarda todas las figuras almacenadas en self.all_figures a un archivo ZIP en memoria. """ if not self.all_figures: return None zip_buffer = io.BytesIO() with zipfile.ZipFile(zip_buffer, 'w') as zip_file: for idx, fig in enumerate(self.all_figures, start=1): img_bytes = fig.to_image(format="png") zip_file.writestr(f'Grafico_{idx}.png', img_bytes) zip_buffer.seek(0) # Guardar en un archivo temporal with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as temp_file: temp_file.write(zip_buffer.read()) temp_path = temp_file.name return temp_path def save_fig_to_bytes(self, fig): """ Convierte una figura Plotly a bytes en formato PNG. """ return fig.to_image(format="png") def save_all_figures_png(self): """ Guarda todas las figuras en archivos PNG temporales y retorna las rutas. """ png_paths = [] for idx, fig in enumerate(self.all_figures, start=1): img_bytes = fig.to_image(format="png") with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: temp_file.write(img_bytes) temp_path = temp_file.name png_paths.append(temp_path) return png_paths def save_tables_to_excel(self): """ Guarda todas las tablas en un archivo Excel con múltiples hojas y retorna la ruta del archivo. """ tables = self.get_all_tables() excel_buffer = io.BytesIO() with pd.ExcelWriter(excel_buffer, engine='xlsxwriter') as writer: for sheet_name, table in tables.items(): table.to_excel(writer, sheet_name=sheet_name, index=False) excel_buffer.seek(0) excel_bytes = excel_buffer.read() # Guardar en un archivo temporal with tempfile.NamedTemporaryFile(delete=False, suffix=".xlsx") as temp_file: temp_file.write(excel_bytes) temp_path = temp_file.name return temp_path def export_tables_to_word(self, tables_dict): """ Exporta las tablas proporcionadas a un documento de Word. """ if not tables_dict: return None doc = docx.Document() # Configurar estilo de fuente style = doc.styles['Normal'] font = style.font font.name = 'Times New Roman' font.size = Pt(12) # Título del informe titulo = doc.add_heading('Informe de Optimización de Producción de Absorbancia', 0) # Changed Title titulo.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER doc.add_paragraph(f"Fecha: {datetime.now().strftime('%d/%m/%Y %H:%M')}").alignment = WD_PARAGRAPH_ALIGNMENT.CENTER doc.add_paragraph('\n') # Espacio for sheet_name, table in tables_dict.items(): # Añadir título de la tabla doc.add_heading(sheet_name, level=1) if table.empty: doc.add_paragraph("No hay datos disponibles para esta tabla.") continue # Añadir tabla al documento table_doc = doc.add_table(rows=1, cols=len(table.columns)) table_doc.style = 'Light List Accent 1' # Añadir encabezados hdr_cells = table_doc.rows[0].cells for idx, col_name in enumerate(table.columns): hdr_cells[idx].text = col_name # Añadir filas de datos for _, row in table.iterrows(): row_cells = table_doc.add_row().cells for idx, item in enumerate(row): row_cells[idx].text = str(item) doc.add_paragraph('\n') # Espacio entre tablas # Guardar el documento en un archivo temporal with tempfile.NamedTemporaryFile(delete=False, suffix=".docx") as tmp: doc.save(tmp.name) tmp_path = tmp.name return tmp_path # --- Funciones para la Interfaz de Gradio --- def load_data(data_str): # Modified load_data to only take data_str """ Carga los datos del diseño Box-Behnken desde cajas de texto y crea la instancia de RSM_BoxBehnken. """ try: # Use the global data DataFrame global rsm, data x1_name = "Glucosa_g_L" x2_name = "Proteina_Pescado_g_L" x3_name = "Sulfato_Manganeso_g_L" y_name = "Abs_600nm" x1_levels = sorted(list(set(data[x1_name]))) # Levels from data x2_levels = sorted(list(set(data[x2_name]))) # Levels from data x3_levels = sorted(list(set(data[x3_name]))) # Levels from data # Crear la instancia de RSM_BoxBehnken rsm = RSM_BoxBehnken(data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels) return data.round(3), gr.update(visible=True) # Removed other outputs, only return data_output and analysis_row except Exception as e: # Mostrar mensaje de error error_message = f"Error al cargar los datos: {str(e)}" print(error_message) return None, gr.update(visible=False) # Removed other outputs, only return data_output and analysis_row def fit_and_optimize_model(): if 'rsm' not in globals(): return [None]*11 # Ajustar el número de outputs # Ajustar modelos y optimizar model_completo, pareto_completo = rsm.fit_model() model_simplificado, pareto_simplificado = rsm.fit_simplified_model() optimization_table = rsm.optimize() equation = rsm.get_simplified_equation() prediction_table = rsm.generate_prediction_table() contribution_table = rsm.calculate_contribution_percentage() anova_table = rsm.calculate_detailed_anova() # Generar todas las figuras and store them in rsm.all_figures rsm.generate_all_plots() # Formatear la ecuación para que se vea mejor en Markdown equation_formatted = equation.replace(" + ", "
+ ").replace(" ** ", "^").replace("*", " × ") equation_formatted = f"### Ecuación del Modelo Simplificado:
{equation_formatted}" # Guardar las tablas en Excel temporal excel_path = rsm.save_tables_to_excel() zip_path = rsm.save_figures_to_zip() return ( model_completo.summary().as_html(), pareto_completo, model_simplificado.summary().as_html(), pareto_simplificado, equation_formatted, optimization_table, prediction_table, contribution_table, anova_table, zip_path, excel_path ) def show_plot(current_index, all_figures): if not all_figures: return None, "No hay gráficos disponibles.", current_index selected_fig = all_figures[current_index] plot_info_text = f"Gráfico {current_index + 1} de {len(all_figures)}" return selected_fig, plot_info_text, current_index def navigate_plot(direction, current_index, all_figures): """ Navega entre los gráficos. """ if not all_figures: return None, "No hay gráficos disponibles.", current_index if direction == 'left': new_index = (current_index - 1) % len(all_figures) elif direction == 'right': new_index = (current_index + 1) % len(all_figures) else: new_index = current_index selected_fig = all_figures[new_index] plot_info_text = f"Gráfico {new_index + 1} de {len(all_figures)}" return selected_fig, plot_info_text, current_index def download_current_plot(all_figures, current_index): """ Descarga la figura actual como PNG. """ if not all_figures: return None fig = all_figures[current_index] img_bytes = rsm.save_fig_to_bytes(fig) filename = f"Grafico_RSM_{current_index + 1}.png" # Crear un archivo temporal with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file: temp_file.write(img_bytes) temp_path = temp_file.name return temp_path # Retornar solo la ruta def download_all_plots_zip(): """ Descarga todas las figuras en un archivo ZIP. """ if 'rsm' not in globals(): return None zip_path = rsm.save_figures_to_zip() if zip_path: filename = f"Graficos_RSM_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip" # Gradio no permite renombrar directamente, por lo que retornamos la ruta del archivo return zip_path return None def download_all_tables_excel(): """ Descarga todas las tablas en un archivo Excel con múltiples hojas. """ if 'rsm' not in globals(): return None excel_path = rsm.save_tables_to_excel() if excel_path: filename = f"Tablas_RSM_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx" # Gradio no permite renombrar directamente, por lo que retornamos la ruta del archivo return excel_path return None def exportar_word(rsm_instance, tables_dict): """ Función para exportar las tablas a un documento de Word. """ word_path = rsm_instance.export_tables_to_word(tables_dict) if word_path and os.path.exists(word_path): return word_path return None # --- Crear la interfaz de Gradio --- def create_gradio_interface(): with gr.Blocks() as demo: gr.Markdown("# Optimización de la Absorbancia usando RSM") # Changed Title with gr.Row(): with gr.Column(): gr.Markdown("## Configuración del Análisis") # Changed Section Title # Removed input boxes for variable names and levels data_input = gr.Textbox(label="Datos del Experimento (formato CSV - Ignored, Data is Hardcoded)", lines=5, interactive=False, value="Data is pre-loaded, ignore input.") # Adjusted Textbox load_button = gr.Button("Cargar Datos") # Keep load button for triggering data load but input is ignored with gr.Column(): gr.Markdown("## Datos Cargados") data_output = gr.Dataframe(label="Tabla de Datos", interactive=False) # Sección de análisis visible solo después de cargar los datos with gr.Row(visible=False) as analysis_row: with gr.Column(): fit_button = gr.Button("Ajustar Modelo y Optimizar") gr.Markdown("**Modelo Completo**") model_completo_output = gr.HTML() pareto_completo_output = gr.Plot() gr.Markdown("**Modelo Simplificado**") model_simplificado_output = gr.HTML() pareto_simplificado_output = gr.Plot() gr.Markdown("**Ecuación del Modelo Simplificado**") equation_output = gr.HTML() optimization_table_output = gr.Dataframe(label="Tabla de Optimización", interactive=False) prediction_table_output = gr.Dataframe(label="Tabla de Predicciones", interactive=False) contribution_table_output = gr.Dataframe(label="Tabla de % de Contribución", interactive=False) anova_table_output = gr.Dataframe(label="Tabla ANOVA Detallada", interactive=False) gr.Markdown("## Descargar Todas las Tablas") download_excel_button = gr.DownloadButton("Descargar Tablas en Excel") download_word_button = gr.DownloadButton("Descargar Tablas en Word") with gr.Column(): gr.Markdown("## Generar Gráficos de Superficie de Respuesta") fixed_variable_input = gr.Dropdown(label="Variable Fija", choices=["Glucosa_g_L", "Proteina_Pescado_g_L", "Sulfato_Manganeso_g_L"], value="Glucosa_g_L") # Updated choices fixed_level_input = gr.Slider(label="Nivel de Variable Fija (Natural Units)", minimum=min(data['Glucosa_g_L']), maximum=max(data['Glucosa_g_L']), step=0.1, value=5.0) # Updated Slider - Using data min/max plot_button = gr.Button("Generar Gráficos") with gr.Row(): left_button = gr.Button("<") right_button = gr.Button(">") rsm_plot_output = gr.Plot() plot_info = gr.Textbox(label="Información del Gráfico", value="Gráfico 1 de 9", interactive=False) with gr.Row(): download_plot_button = gr.DownloadButton("Descargar Gráfico Actual (PNG)") download_all_plots_button = gr.DownloadButton("Descargar Todos los Gráficos (ZIP)") current_index_state = gr.State(0) # Estado para el índice actual all_figures_state = gr.State([]) # Estado para todas las figuras # Cargar datos - Modified load_button click to only take data_input (which is ignored) load_button.click( load_data, inputs=[data_input], # Only data_input is now input outputs=[data_output, analysis_row] # Removed other outputs, only return data_output and analysis_row ) # Ajustar modelo y optimizar fit_button.click( fit_and_optimize_model, inputs=[], outputs=[ # Corrected outputs to return calculated values not output components model_completo_output, # This should be removed, returning summary HTML string instead pareto_completo_output, # This should be removed, returning Plotly Figure instead model_simplificado_output, # This should be removed, returning summary HTML string instead pareto_simplificado_output, # This should be removed, returning Plotly Figure instead equation_output, # This should be removed, returning formatted equation string instead optimization_table_output, # This should be removed, returning DataFrame instead prediction_table_output, # This should be removed, returning DataFrame instead contribution_table_output, # This should be removed, returning DataFrame instead anova_table_output, # This should be removed, returning DataFrame instead download_all_plots_button, # Correct - returning file path for download button download_excel_button # Correct - returning file path for download button ] ) # Generar y mostrar los gráficos plot_button.click( lambda fixed_var, fixed_lvl: ( rsm.plot_rsm_individual(fixed_var, fixed_lvl), f"Gráfico 1 de {len(rsm.all_figures)}" if rsm.all_figures else "No hay gráficos disponibles.", 0, rsm.all_figures # Actualizar el estado de todas las figuras ), inputs=[fixed_variable_input, fixed_level_input], outputs=[rsm_plot_output, plot_info, current_index_state, all_figures_state] ) # Navegación de gráficos left_button.click( lambda current_index, all_figures: navigate_plot('left', current_index, all_figures), inputs=[current_index_state, all_figures_state], outputs=[rsm_plot_output, plot_info, current_index_state] ) right_button.click( lambda current_index, all_figures: navigate_plot('right', current_index, all_figures), inputs=[current_index_state, all_figures_state], outputs=[rsm_plot_output, plot_info, current_index_state] ) # Descargar gráfico actual download_plot_button.click( download_current_plot, inputs=[all_figures_state, current_index_state], outputs=download_plot_button ) # Descargar todos los gráficos en ZIP download_all_plots_button.click( download_all_plots_zip, inputs=[], outputs=download_all_plots_button ) # Descargar todas las tablas en Excel y Word download_excel_button.click( fn=lambda: download_all_tables_excel(), inputs=[], outputs=download_excel_button ) download_word_button.click( fn=lambda: exportar_word(rsm, rsm.get_all_tables()), inputs=[], outputs=download_word_button ) # Ejemplo de uso gr.Markdown("## Instrucciones:") # Shortened Instructions gr.Markdown(""" 1. Click 'Cargar Datos' para usar los datos precargados. 2. Click 'Ajustar Modelo y Optimizar'. 3. Select 'Variable Fija' and 'Nivel de Variable Fija'. 4. Click 'Generar Gráficos'. 5. Navigate plots with '<' and '>'. 6. Download plots and tables as needed. """) return demo # --- Función Principal --- def main(): interface = create_gradio_interface() interface.launch(share=True) if __name__ == "__main__": main()