Upload 2 files
Browse files- interface.py +115 -108
- models.py +192 -149
interface.py
CHANGED
@@ -2,36 +2,30 @@
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
import matplotlib
|
5 |
-
matplotlib.use('Agg')
|
6 |
import matplotlib.pyplot as plt
|
7 |
from PIL import Image
|
8 |
import io
|
9 |
import json
|
10 |
-
import traceback
|
11 |
|
12 |
-
|
13 |
-
from
|
14 |
-
# from decorators import gpu_decorator # No es necesario con Modal
|
15 |
|
16 |
-
# Variables globales que serán "inyectadas"
|
17 |
USE_MODAL_FOR_LLM_ANALYSIS = False
|
18 |
generate_analysis_from_modal = None
|
19 |
|
20 |
-
def create_error_image(message="Error", width=600, height=400):
|
21 |
-
|
22 |
-
img = Image.new('RGB', (width, height), color = (255, 200, 200))
|
23 |
-
# No podemos dibujar texto fácilmente sin Pillow-SIMD o dependencias de dibujo complejas.
|
24 |
-
# Una imagen simple es suficiente para indicar un error.
|
25 |
-
# from PIL import ImageDraw
|
26 |
-
# d = ImageDraw.Draw(img)
|
27 |
-
# d.text((10,10), message, fill=(0,0,0)) # Esto requeriría una fuente
|
28 |
print(f"Generando imagen de error: {message}")
|
29 |
return img
|
30 |
|
31 |
def parse_bounds_str(bounds_str_input, num_params):
|
|
|
32 |
bounds_str = str(bounds_str_input).strip()
|
33 |
if not bounds_str:
|
34 |
-
print(f"Cadena de límites vacía para {num_params} params. Usando (-inf, inf).")
|
35 |
return [-np.inf] * num_params, [np.inf] * num_params
|
36 |
try:
|
37 |
bounds_str = bounds_str.lower().replace('inf', 'np.inf').replace('none', 'None')
|
@@ -51,53 +45,37 @@ def parse_bounds_str(bounds_str_input, num_params):
|
|
51 |
low = -np.inf if (item[0] is None or (isinstance(item[0], float) and np.isnan(item[0]))) else float(item[0])
|
52 |
high = np.inf if (item[1] is None or (isinstance(item[1], float) and np.isnan(item[1]))) else float(item[1])
|
53 |
lower_bounds.append(low); upper_bounds.append(high)
|
|
|
54 |
return lower_bounds, upper_bounds
|
55 |
except Exception as e:
|
56 |
-
print(f"
|
57 |
return [-np.inf] * num_params, [np.inf] * num_params
|
58 |
|
|
|
59 |
def call_llm_analysis_service(prompt: str) -> str:
|
60 |
-
|
61 |
-
# ... (sin cambios respecto a la versión anterior completa)
|
62 |
if USE_MODAL_FOR_LLM_ANALYSIS and generate_analysis_from_modal:
|
63 |
-
print("interface.py:
|
64 |
-
try:
|
65 |
-
|
66 |
-
except Exception as e_modal_call:
|
67 |
-
print(f"Error llamando a la función Modal LLM: {e_modal_call}")
|
68 |
-
traceback.print_exc()
|
69 |
-
return f"Error al contactar el servicio de análisis IA (Modal): {e_modal_call}"
|
70 |
else:
|
71 |
-
print("interface.py: Usando
|
72 |
-
# Implementación de fallback local (como en la respuesta anterior)
|
73 |
try:
|
74 |
from config import MODEL_PATH, MAX_LENGTH, DEVICE
|
75 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
76 |
-
import torch
|
77 |
-
|
78 |
-
print(f"Fallback: Cargando modelo {MODEL_PATH} localmente en {DEVICE}...")
|
79 |
tokenizer_local = AutoTokenizer.from_pretrained(MODEL_PATH)
|
80 |
model_local = AutoModelForCausalLM.from_pretrained(MODEL_PATH).to(DEVICE)
|
81 |
-
|
82 |
model_context_window = getattr(model_local.config, 'max_position_embeddings', getattr(model_local.config, 'sliding_window', 4096))
|
83 |
max_prompt_len = model_context_window - MAX_LENGTH - 50
|
84 |
if max_prompt_len <= 0 : max_prompt_len = model_context_window // 2
|
85 |
-
|
86 |
inputs = tokenizer_local(prompt, return_tensors="pt", truncation=True, max_length=max_prompt_len).to(DEVICE)
|
87 |
with torch.no_grad():
|
88 |
-
outputs = model_local.generate(
|
89 |
-
**inputs, max_new_tokens=MAX_LENGTH,
|
90 |
-
eos_token_id=tokenizer_local.eos_token_id,
|
91 |
-
pad_token_id=tokenizer_local.pad_token_id if tokenizer_local.pad_token_id else tokenizer_local.eos_token_id,
|
92 |
-
do_sample=True, temperature=0.6, top_p=0.9
|
93 |
-
)
|
94 |
input_len = inputs.input_ids.shape[1]
|
95 |
analysis = tokenizer_local.decode(outputs[0][input_len:], skip_special_tokens=True)
|
96 |
return analysis.strip()
|
97 |
-
except Exception as
|
98 |
-
print(f"Error en el fallback LLM local: {e_local_llm}")
|
99 |
-
traceback.print_exc()
|
100 |
-
return f"Análisis (fallback local): Error al cargar/ejecutar modelo LLM local: {e_local_llm}."
|
101 |
|
102 |
|
103 |
def process_and_plot(
|
@@ -118,136 +96,165 @@ def process_and_plot(
|
|
118 |
substrate_eq_count_ui,
|
119 |
product_eq_count_ui
|
120 |
):
|
121 |
-
|
122 |
-
error_img = create_error_image("Error en procesamiento")
|
123 |
-
error_analysis_text = "No se pudo generar el análisis debido a un error."
|
124 |
|
125 |
try:
|
126 |
if file_obj is None:
|
|
|
127 |
return error_img, "Error: Por favor, sube un archivo Excel."
|
|
|
128 |
|
129 |
try:
|
130 |
df = pd.read_excel(file_obj.name)
|
|
|
131 |
except Exception as e:
|
132 |
return error_img, f"Error al leer el archivo Excel: {e}\n{traceback.format_exc()}"
|
133 |
|
134 |
expected_cols = ['Tiempo', 'Biomasa', 'Sustrato', 'Producto']
|
135 |
-
for col in expected_cols
|
136 |
-
|
137 |
-
|
138 |
|
139 |
time_data = df['Tiempo'].values
|
140 |
biomass_data_exp = df['Biomasa'].values
|
141 |
substrate_data_exp = df['Sustrato'].values
|
142 |
product_data_exp = df['Producto'].values
|
|
|
143 |
|
144 |
-
# Asegurar que los contadores sean enteros válidos
|
145 |
try:
|
146 |
active_biomass_eqs = int(float(biomass_eq_count_ui))
|
147 |
active_substrate_eqs = int(float(substrate_eq_count_ui))
|
148 |
active_product_eqs = int(float(product_eq_count_ui))
|
149 |
-
except (TypeError, ValueError):
|
150 |
-
return error_img, "Error: Número de ecuaciones inválido
|
151 |
-
|
152 |
-
|
153 |
-
all_eq_inputs = {
|
154 |
-
'biomass': (
|
155 |
-
|
156 |
-
|
157 |
-
[biomass_bound1_ui, biomass_bound2_ui, biomass_bound3_ui][:active_biomass_eqs],
|
158 |
-
biomass_data_exp
|
159 |
-
),
|
160 |
-
'substrate': (
|
161 |
-
[substrate_eq1_ui, substrate_eq2_ui, substrate_eq3_ui][:active_substrate_eqs],
|
162 |
-
[substrate_param1_ui, substrate_param2_ui, substrate_param3_ui][:active_substrate_eqs],
|
163 |
-
[substrate_bound1_ui, substrate_bound2_ui, substrate_bound3_ui][:active_substrate_eqs],
|
164 |
-
substrate_data_exp
|
165 |
-
),
|
166 |
-
'product': (
|
167 |
-
[product_eq1_ui, product_eq2_ui, product_eq3_ui][:active_product_eqs],
|
168 |
-
[product_param1_ui, product_param2_ui, product_param3_ui][:active_product_eqs],
|
169 |
-
[product_bound1_ui, product_bound2_ui, product_bound3_ui][:active_product_eqs],
|
170 |
-
product_data_exp
|
171 |
-
)
|
172 |
}
|
173 |
|
174 |
model_handler = BioprocessModel()
|
175 |
-
|
176 |
fitted_results_for_plot = {'biomass': [], 'substrate': [], 'product': []}
|
177 |
results_for_llm_prompt = {'biomass': [], 'substrate': [], 'product': []}
|
178 |
-
|
179 |
|
180 |
for model_type, (eq_list, param_str_list, bound_str_list, exp_data) in all_eq_inputs.items():
|
181 |
if not (isinstance(exp_data, np.ndarray) and exp_data.size > 0 and np.any(np.isfinite(exp_data))):
|
182 |
-
print(f"Datos experimentales para {model_type} no válidos o vacíos, saltando ajuste.")
|
|
|
183 |
continue
|
184 |
|
185 |
for i in range(len(eq_list)):
|
186 |
eq_str, param_s, bound_s = eq_list[i], param_str_list[i], bound_str_list[i]
|
187 |
-
if not eq_str or not param_s:
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
try:
|
190 |
model_handler.set_model(model_type, eq_str, param_s)
|
191 |
num_p = len(model_handler.models[model_type]['params'])
|
192 |
l_b, u_b = parse_bounds_str(bound_s, num_p)
|
193 |
-
current_biomass_p = biomass_params_for_s_p if model_type in ['substrate', 'product'] else None
|
194 |
|
195 |
-
|
|
|
196 |
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
r2_val = model_handler.r2.get(model_type, float('nan'))
|
199 |
rmse_val = model_handler.rmse.get(model_type, float('nan'))
|
200 |
|
201 |
fitted_results_for_plot[model_type].append({'equation': eq_str, 'y_pred': y_pred, 'params': current_params, 'R2': r2_val})
|
202 |
results_for_llm_prompt[model_type].append({'equation': eq_str, 'params_fitted': current_params, 'R2': r2_val, 'RMSE': rmse_val})
|
203 |
|
204 |
-
if model_type == 'biomass' and
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
# Generar gráfico
|
|
|
211 |
fig, axs = plt.subplots(3, 1, figsize=(10, 18), sharex=True)
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
axs[2]: (product_data_exp, 'Producto', fitted_results_for_plot['producto'])
|
216 |
-
}
|
217 |
-
|
218 |
for ax, data_actual, ylabel, plot_results in plot_config_map.items():
|
219 |
if isinstance(data_actual, np.ndarray) and data_actual.size > 0 and np.any(np.isfinite(data_actual)):
|
220 |
ax.plot(time_data, data_actual, 'o', label=f'Datos {ylabel}', markersize=5, alpha=0.7)
|
221 |
-
else:
|
222 |
-
ax.text(0.5, 0.5, f"No hay datos para {ylabel}", transform=ax.transAxes, ha='center', va='center')
|
223 |
-
|
224 |
for idx, res_detail in enumerate(plot_results):
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
230 |
if show_params_ui and plot_results:
|
231 |
-
param_display_texts = [f"Modelo {idx+1}:\n" + "\n".join([f" {k}: {v:.4g}" for k,v in
|
232 |
-
ax.text(0.02,
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
|
|
235 |
|
236 |
-
plt.tight_layout(rect=[0,
|
237 |
-
buf = io.BytesIO(); plt.savefig(buf,
|
238 |
image_pil = Image.open(buf); plt.close(fig)
|
|
|
239 |
|
240 |
# Construir prompt y llamar a LLM
|
241 |
-
|
|
|
242 |
prompt_details = json.dumps(results_for_llm_prompt, indent=2, ensure_ascii=False)
|
243 |
-
prompt_instructions = "\n\nPor favor, proporciona un análisis detallado
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
full_prompt = prompt_intro + prompt_details + prompt_instructions
|
|
|
245 |
analysis_text_llm = call_llm_analysis_service(full_prompt)
|
|
|
246 |
|
247 |
return image_pil, analysis_text_llm
|
248 |
|
249 |
except Exception as general_e:
|
250 |
error_trace = traceback.format_exc()
|
251 |
-
error_message_full = f"Error
|
252 |
print(error_message_full)
|
253 |
-
return create_error_image(f"Error: {general_e}"), error_message_full
|
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
import matplotlib
|
5 |
+
matplotlib.use('Agg')
|
6 |
import matplotlib.pyplot as plt
|
7 |
from PIL import Image
|
8 |
import io
|
9 |
import json
|
10 |
+
import traceback
|
11 |
|
12 |
+
from models import BioprocessModel # De TU models.py
|
13 |
+
# from decorators import gpu_decorator # No relevante para Modal
|
|
|
14 |
|
|
|
15 |
USE_MODAL_FOR_LLM_ANALYSIS = False
|
16 |
generate_analysis_from_modal = None
|
17 |
|
18 |
+
def create_error_image(message="Error en procesamiento", width=600, height=400):
|
19 |
+
# ... (sin cambios) ...
|
20 |
+
img = Image.new('RGB', (width, height), color = (255, 200, 200))
|
|
|
|
|
|
|
|
|
|
|
21 |
print(f"Generando imagen de error: {message}")
|
22 |
return img
|
23 |
|
24 |
def parse_bounds_str(bounds_str_input, num_params):
|
25 |
+
# ... (sin cambios, pero asegúrate que es la versión robusta de respuestas anteriores) ...
|
26 |
bounds_str = str(bounds_str_input).strip()
|
27 |
if not bounds_str:
|
28 |
+
print(f"DEBUG (parse_bounds_str): Cadena de límites vacía para {num_params} params. Usando (-inf, inf).")
|
29 |
return [-np.inf] * num_params, [np.inf] * num_params
|
30 |
try:
|
31 |
bounds_str = bounds_str.lower().replace('inf', 'np.inf').replace('none', 'None')
|
|
|
45 |
low = -np.inf if (item[0] is None or (isinstance(item[0], float) and np.isnan(item[0]))) else float(item[0])
|
46 |
high = np.inf if (item[1] is None or (isinstance(item[1], float) and np.isnan(item[1]))) else float(item[1])
|
47 |
lower_bounds.append(low); upper_bounds.append(high)
|
48 |
+
print(f"DEBUG (parse_bounds_str): Límites parseados: L={lower_bounds}, U={upper_bounds}")
|
49 |
return lower_bounds, upper_bounds
|
50 |
except Exception as e:
|
51 |
+
print(f"ERROR (parse_bounds_str): Parseando '{bounds_str_input}': {e}. Usando por defecto (-inf, inf).")
|
52 |
return [-np.inf] * num_params, [np.inf] * num_params
|
53 |
|
54 |
+
|
55 |
def call_llm_analysis_service(prompt: str) -> str:
|
56 |
+
# ... (sin cambios) ...
|
|
|
57 |
if USE_MODAL_FOR_LLM_ANALYSIS and generate_analysis_from_modal:
|
58 |
+
print("DEBUG (interface.py): Llamando a Modal LLM...")
|
59 |
+
try: return generate_analysis_from_modal(prompt)
|
60 |
+
except Exception as e: print(f"ERROR (interface.py): Modal LLM: {e}"); traceback.print_exc(); return f"Error servicio IA: {e}"
|
|
|
|
|
|
|
|
|
61 |
else:
|
62 |
+
print("DEBUG (interface.py): Usando LLM local (fallback)...")
|
|
|
63 |
try:
|
64 |
from config import MODEL_PATH, MAX_LENGTH, DEVICE
|
65 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
66 |
+
import torch
|
|
|
|
|
67 |
tokenizer_local = AutoTokenizer.from_pretrained(MODEL_PATH)
|
68 |
model_local = AutoModelForCausalLM.from_pretrained(MODEL_PATH).to(DEVICE)
|
|
|
69 |
model_context_window = getattr(model_local.config, 'max_position_embeddings', getattr(model_local.config, 'sliding_window', 4096))
|
70 |
max_prompt_len = model_context_window - MAX_LENGTH - 50
|
71 |
if max_prompt_len <= 0 : max_prompt_len = model_context_window // 2
|
|
|
72 |
inputs = tokenizer_local(prompt, return_tensors="pt", truncation=True, max_length=max_prompt_len).to(DEVICE)
|
73 |
with torch.no_grad():
|
74 |
+
outputs = model_local.generate(**inputs, max_new_tokens=MAX_LENGTH, eos_token_id=tokenizer_local.eos_token_id, pad_token_id=tokenizer_local.pad_token_id if tokenizer_local.pad_token_id else tokenizer_local.eos_token_id, do_sample=True, temperature=0.6, top_p=0.9)
|
|
|
|
|
|
|
|
|
|
|
75 |
input_len = inputs.input_ids.shape[1]
|
76 |
analysis = tokenizer_local.decode(outputs[0][input_len:], skip_special_tokens=True)
|
77 |
return analysis.strip()
|
78 |
+
except Exception as e: print(f"ERROR (interface.py): Fallback LLM: {e}"); traceback.print_exc(); return f"Error LLM local: {e}."
|
|
|
|
|
|
|
79 |
|
80 |
|
81 |
def process_and_plot(
|
|
|
96 |
substrate_eq_count_ui,
|
97 |
product_eq_count_ui
|
98 |
):
|
99 |
+
print("\nDEBUG (interface.py): process_and_plot INICIADO.")
|
100 |
+
error_img = create_error_image("Error inicial en procesamiento")
|
101 |
+
error_analysis_text = "No se pudo generar el análisis debido a un error de inicialización."
|
102 |
|
103 |
try:
|
104 |
if file_obj is None:
|
105 |
+
print("ERROR (interface.py): No se subió archivo.")
|
106 |
return error_img, "Error: Por favor, sube un archivo Excel."
|
107 |
+
print(f"DEBUG (interface.py): Archivo recibido: {file_obj.name}")
|
108 |
|
109 |
try:
|
110 |
df = pd.read_excel(file_obj.name)
|
111 |
+
print(f"DEBUG (interface.py): Excel leído. Columnas: {df.columns.tolist()}")
|
112 |
except Exception as e:
|
113 |
return error_img, f"Error al leer el archivo Excel: {e}\n{traceback.format_exc()}"
|
114 |
|
115 |
expected_cols = ['Tiempo', 'Biomasa', 'Sustrato', 'Producto']
|
116 |
+
missing_cols = [col for col in expected_cols if col not in df.columns]
|
117 |
+
if missing_cols:
|
118 |
+
return error_img, f"Error: Faltan columnas en Excel: {', '.join(missing_cols)}."
|
119 |
|
120 |
time_data = df['Tiempo'].values
|
121 |
biomass_data_exp = df['Biomasa'].values
|
122 |
substrate_data_exp = df['Sustrato'].values
|
123 |
product_data_exp = df['Producto'].values
|
124 |
+
print(f"DEBUG (interface.py): Datos extraídos. Longitud de tiempo: {len(time_data)}")
|
125 |
|
|
|
126 |
try:
|
127 |
active_biomass_eqs = int(float(biomass_eq_count_ui))
|
128 |
active_substrate_eqs = int(float(substrate_eq_count_ui))
|
129 |
active_product_eqs = int(float(product_eq_count_ui))
|
130 |
+
except (TypeError, ValueError) as e_count:
|
131 |
+
return error_img, f"Error: Número de ecuaciones inválido: {e_count}"
|
132 |
+
print(f"DEBUG (interface.py): Counts: Bio={active_biomass_eqs}, Sub={active_substrate_eqs}, Prod={active_product_eqs}")
|
133 |
+
|
134 |
+
all_eq_inputs = { # ... (sin cambios) ...
|
135 |
+
'biomass': ([biomass_eq1_ui, biomass_eq2_ui, biomass_eq3_ui][:active_biomass_eqs], [biomass_param1_ui, biomass_param2_ui, biomass_param3_ui][:active_biomass_eqs], [biomass_bound1_ui, biomass_bound2_ui, biomass_bound3_ui][:active_biomass_eqs], biomass_data_exp),
|
136 |
+
'substrate': ([substrate_eq1_ui, substrate_eq2_ui, substrate_eq3_ui][:active_substrate_eqs], [substrate_param1_ui, substrate_param2_ui, substrate_param3_ui][:active_substrate_eqs], [substrate_bound1_ui, substrate_bound2_ui, substrate_bound3_ui][:active_substrate_eqs], substrate_data_exp),
|
137 |
+
'product': ([product_eq1_ui, product_eq2_ui, product_eq3_ui][:active_product_eqs], [product_param1_ui, product_param2_ui, product_param3_ui][:active_product_eqs], [product_bound1_ui, product_bound2_ui, product_bound3_ui][:active_product_eqs], product_data_exp)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
}
|
139 |
|
140 |
model_handler = BioprocessModel()
|
|
|
141 |
fitted_results_for_plot = {'biomass': [], 'substrate': [], 'product': []}
|
142 |
results_for_llm_prompt = {'biomass': [], 'substrate': [], 'product': []}
|
143 |
+
biomass_params_for_s_p_dict = None # Debe ser un dict para models.py
|
144 |
|
145 |
for model_type, (eq_list, param_str_list, bound_str_list, exp_data) in all_eq_inputs.items():
|
146 |
if not (isinstance(exp_data, np.ndarray) and exp_data.size > 0 and np.any(np.isfinite(exp_data))):
|
147 |
+
print(f"INFO (interface.py): Datos experimentales para {model_type} no válidos o vacíos, saltando ajuste.")
|
148 |
+
results_for_llm_prompt[model_type].append({'equation': 'N/A - Sin datos válidos', 'params_fitted': {}, 'R2': np.nan, 'RMSE': np.nan})
|
149 |
continue
|
150 |
|
151 |
for i in range(len(eq_list)):
|
152 |
eq_str, param_s, bound_s = eq_list[i], param_str_list[i], bound_str_list[i]
|
153 |
+
if not eq_str or not param_s:
|
154 |
+
print(f"INFO (interface.py): Ecuación o parámetros vacíos para {model_type} #{i+1}, saltando.")
|
155 |
+
results_for_llm_prompt[model_type].append({'equation': eq_str if eq_str else 'Ecuación Vacía', 'params_fitted': {}, 'R2': np.nan, 'RMSE': np.nan, 'error': 'Ecuación o parámetros vacíos'})
|
156 |
+
continue
|
157 |
+
|
158 |
+
print(f"\nDEBUG (interface.py): Procesando {model_type} #{i+1}: Eq='{eq_str}', Params='{param_s}'")
|
159 |
|
160 |
try:
|
161 |
model_handler.set_model(model_type, eq_str, param_s)
|
162 |
num_p = len(model_handler.models[model_type]['params'])
|
163 |
l_b, u_b = parse_bounds_str(bound_s, num_p)
|
|
|
164 |
|
165 |
+
# Pasar biomass_params_fitted si es sustrato o producto
|
166 |
+
current_biomass_params_for_fit = biomass_params_for_s_p_dict if model_type in ['substrate', 'product'] else None
|
167 |
|
168 |
+
print(f"DEBUG (interface.py): Llamando a fit_model para {model_type} #{i+1}")
|
169 |
+
y_pred, popt_values = model_handler.fit_model(model_type, time_data, exp_data, bounds=(l_b, u_b), biomass_params_fitted=current_biomass_params_for_fit)
|
170 |
+
print(f"DEBUG (interface.py): fit_model regresó para {model_type} #{i+1}. y_pred (primeros 5): {y_pred[:5] if y_pred is not None else 'None'}")
|
171 |
+
|
172 |
+
if y_pred is None or popt_values is None: # Chequear si fit_model indicó fallo
|
173 |
+
print(f"ERROR (interface.py): Ajuste falló (y_pred o popt es None) para {model_type} #{i+1}.")
|
174 |
+
results_for_llm_prompt[model_type].append({'equation': eq_str, 'params_fitted': {}, 'R2': np.nan, 'RMSE': np.nan, 'error': 'Fallo en curve_fit'})
|
175 |
+
continue # Saltar al siguiente modelo/ecuación
|
176 |
+
|
177 |
+
current_params = model_handler.params.get(model_type, {})
|
178 |
r2_val = model_handler.r2.get(model_type, float('nan'))
|
179 |
rmse_val = model_handler.rmse.get(model_type, float('nan'))
|
180 |
|
181 |
fitted_results_for_plot[model_type].append({'equation': eq_str, 'y_pred': y_pred, 'params': current_params, 'R2': r2_val})
|
182 |
results_for_llm_prompt[model_type].append({'equation': eq_str, 'params_fitted': current_params, 'R2': r2_val, 'RMSE': rmse_val})
|
183 |
|
184 |
+
if model_type == 'biomass' and biomass_params_for_s_p_dict is None and current_params:
|
185 |
+
biomass_params_for_s_p_dict = current_params # Guardar como dict
|
186 |
+
print(f"DEBUG (interface.py): Parámetros de Biomasa (para S/P) guardados: {biomass_params_for_s_p_dict}")
|
187 |
+
|
188 |
+
except Exception as e_fit_loop:
|
189 |
+
error_msg = f"Error en bucle de ajuste para {model_type} #{i+1} ('{eq_str}'): {e_fit_loop}\n{traceback.format_exc()}"
|
190 |
+
print(error_msg)
|
191 |
+
results_for_llm_prompt[model_type].append({'equation': eq_str, 'params_fitted': {}, 'R2': np.nan, 'RMSE': np.nan, 'error': str(e_fit_loop)})
|
192 |
+
# Considerar si continuar con otros modelos o retornar error general
|
193 |
+
# Por ahora, continuamos para intentar ajustar otros modelos si es posible
|
194 |
|
195 |
# Generar gráfico
|
196 |
+
print("DEBUG (interface.py): Generando gráfico...")
|
197 |
fig, axs = plt.subplots(3, 1, figsize=(10, 18), sharex=True)
|
198 |
+
# ... (resto de la lógica de graficación, como en tu versión completa, sin cambios significativos) ...
|
199 |
+
plot_config_map = {axs[0]:(biomass_data_exp,'Biomasa',fitted_results_for_plot['biomass']), axs[1]:(substrate_data_exp,'Sustrato',fitted_results_for_plot['sustrato']), axs[2]:(product_data_exp,'Producto',fitted_results_for_plot['product'])}
|
200 |
+
any_plot_successful = False
|
|
|
|
|
|
|
201 |
for ax, data_actual, ylabel, plot_results in plot_config_map.items():
|
202 |
if isinstance(data_actual, np.ndarray) and data_actual.size > 0 and np.any(np.isfinite(data_actual)):
|
203 |
ax.plot(time_data, data_actual, 'o', label=f'Datos {ylabel}', markersize=5, alpha=0.7)
|
204 |
+
else: ax.text(0.5,0.5,f"No hay datos para {ylabel}",transform=ax.transAxes,ha='center',va='center')
|
|
|
|
|
205 |
for idx, res_detail in enumerate(plot_results):
|
206 |
+
if res_detail.get('y_pred') is not None and np.any(np.isfinite(res_detail['y_pred'])): # Solo plotear si y_pred es válido
|
207 |
+
label = f'Modelo {idx+1} (R²:{res_detail.get("R2", float("nan")):.3f})'
|
208 |
+
ax.plot(time_data, res_detail['y_pred'], '-', label=label, linewidth=2)
|
209 |
+
any_plot_successful = True
|
210 |
+
ax.set_xlabel('Tiempo'); ax.set_ylabel(ylabel); ax.grid(True,linestyle=':',alpha=0.7)
|
211 |
+
if show_legend_ui: ax.legend(loc=legend_position_ui,fontsize='small')
|
212 |
if show_params_ui and plot_results:
|
213 |
+
param_display_texts = [f"Modelo {idx+1}:\n" + "\n".join([f" {k}: {v:.4g}" for k,v in res_d.get('params',{}).items()]) for idx, res_d in enumerate(plot_results) if res_d.get('params')]
|
214 |
+
if param_display_texts: ax.text(0.02,0.98 if not ('upper' in legend_position_ui) else 0.02,"\n---\n".join(param_display_texts),transform=ax.transAxes,fontsize=7,verticalalignment='top' if not ('upper' in legend_position_ui) else 'bottom',bbox=dict(boxstyle='round,pad=0.3',fc='lightyellow',alpha=0.8))
|
215 |
+
|
216 |
+
if not any_plot_successful: # Si ningún modelo se ajustó o y_pred fue NaN
|
217 |
+
print("WARN (interface.py): Ningún modelo produjo un gráfico válido.")
|
218 |
+
# Podrías dibujar algo en la figura para indicar esto
|
219 |
+
axs[0].text(0.5, 0.5, "Ningún modelo se pudo ajustar o graficar.", transform=axs[0].transAxes, ha='center', va='center', fontsize=12, color='red')
|
220 |
+
|
221 |
|
222 |
+
plt.tight_layout(rect=[0,0,1,0.96]); fig.suptitle("Resultados del Ajuste de Modelos Cinéticos",fontsize=16)
|
223 |
+
buf = io.BytesIO(); plt.savefig(buf,format='png',dpi=150); buf.seek(0)
|
224 |
image_pil = Image.open(buf); plt.close(fig)
|
225 |
+
print("DEBUG (interface.py): Gráfico generado.")
|
226 |
|
227 |
# Construir prompt y llamar a LLM
|
228 |
+
# ... (sin cambios en la lógica del prompt) ...
|
229 |
+
prompt_intro = "Eres un experto en modelado cinético de bioprocesos. Analiza los siguientes resultados del ajuste de modelos a datos experimentales:\n\n"
|
230 |
prompt_details = json.dumps(results_for_llm_prompt, indent=2, ensure_ascii=False)
|
231 |
+
prompt_instructions = """\n\nPor favor, proporciona un análisis detallado y crítico en español, estructurado de la siguiente manera:
|
232 |
+
1. **Resumen General:** Una breve descripción del experimento y qué se intentó modelar.
|
233 |
+
2. **Análisis por Componente (Biomasa, Sustrato, Producto):**
|
234 |
+
a. Para cada ecuación probada:
|
235 |
+
i. Calidad del Ajuste: Evalúa el R² (cercano a 1 es ideal) y el RMSE (más bajo es mejor). Comenta si el ajuste es bueno, regular o pobre.
|
236 |
+
ii. Interpretación de Parámetros: Explica brevemente qué representan los parámetros ajustados y si sus valores parecen razonables en un contexto de bioproceso (ej. tasas positivas, concentraciones no negativas).
|
237 |
+
iii. Ecuación Específica: Menciona la ecuación usada.
|
238 |
+
iv. Errores: Si hubo un error durante el ajuste para esta ecuación específica, menciónalo.
|
239 |
+
b. Comparación (si se probó más de una ecuación para un componente): ¿Cuál ecuación proporcionó el mejor ajuste y por qué?
|
240 |
+
3. **Problemas y Limitaciones:**
|
241 |
+
a. ¿Hay problemas evidentes (ej. R² muy bajo, parámetros físicamente no realistas, sobreajuste si se puede inferir, etc.)?
|
242 |
+
b. ¿Qué limitaciones podrían tener los modelos o el proceso de ajuste?
|
243 |
+
4. **Sugerencias y Próximos Pasos:**
|
244 |
+
a. ¿Cómo se podría mejorar el modelado (ej. probar otras ecuaciones, transformar datos, revisar calidad de datos experimentales)?
|
245 |
+
b. ¿Qué experimentos adicionales podrían realizarse para validar o refinar los modelos?
|
246 |
+
5. **Conclusión Final:** Un veredicto general conciso sobre el éxito del modelado y la utilidad de los resultados obtenidos.
|
247 |
+
|
248 |
+
Utiliza un lenguaje claro y accesible, pero manteniendo el rigor técnico. El análisis debe ser útil para alguien que busca entender la cinética de su bioproceso."""
|
249 |
full_prompt = prompt_intro + prompt_details + prompt_instructions
|
250 |
+
print("DEBUG (interface.py): Prompt para LLM generado. Llamando al servicio LLM...")
|
251 |
analysis_text_llm = call_llm_analysis_service(full_prompt)
|
252 |
+
print("DEBUG (interface.py): Análisis LLM recibido.")
|
253 |
|
254 |
return image_pil, analysis_text_llm
|
255 |
|
256 |
except Exception as general_e:
|
257 |
error_trace = traceback.format_exc()
|
258 |
+
error_message_full = f"Error GENERAL INESPERADO en process_and_plot: {general_e}\n{error_trace}"
|
259 |
print(error_message_full)
|
260 |
+
return create_error_image(f"Error General: {general_e}"), error_message_full
|
models.py
CHANGED
@@ -3,193 +3,236 @@ import numpy as np
|
|
3 |
from scipy.optimize import curve_fit
|
4 |
from sympy import symbols, sympify, lambdify
|
5 |
import warnings
|
6 |
-
from sklearn.metrics import mean_squared_error
|
7 |
|
8 |
class BioprocessModel:
|
9 |
def __init__(self):
|
10 |
-
self.params = {}
|
11 |
-
self.models = {}
|
12 |
-
self.r2 = {}
|
13 |
-
self.rmse = {}
|
|
|
14 |
|
15 |
def set_model(self, model_type, equation_str, param_str):
|
16 |
-
""
|
17 |
-
|
18 |
-
|
19 |
-
:param model_type: 'biomass', 'substrate', o 'product'
|
20 |
-
:param equation_str: La ecuación como una cadena de texto (ej. "Xm * (1 - exp(-um * t))")
|
21 |
-
Si la ecuación es para sustrato o producto y depende de la biomasa X(t),
|
22 |
-
se debe usar 'X_val' en la ecuación para representar el valor de X(t).
|
23 |
-
:param param_str: Cadena de parámetros separados por comas (ej. "Xm, um")
|
24 |
-
"""
|
25 |
-
equation_str = equation_str.strip()
|
26 |
-
# Si el usuario escribe "Y = ...", tomar solo la parte derecha
|
27 |
-
if '=' in equation_str:
|
28 |
-
equation_str = equation_str.split('=', 1)[1].strip()
|
29 |
|
30 |
-
|
|
|
|
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
self.models[model_type] = {
|
33 |
-
'equation_str':
|
34 |
'params': params_list
|
35 |
}
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
64 |
|
65 |
def fit_model(self, model_type, time, data, bounds, biomass_params_fitted=None):
|
66 |
-
""
|
67 |
-
Ajusta el modelo configurado a los datos.
|
68 |
-
|
69 |
-
:param model_type: 'biomass', 'substrate', o 'product'
|
70 |
-
:param time: Array de datos de tiempo
|
71 |
-
:param data: Array de datos observados
|
72 |
-
:param bounds: Tupla (lower_bounds, upper_bounds) para los parámetros
|
73 |
-
:param biomass_params_fitted: Dict de parámetros de biomasa ajustados (necesario para sustrato/producto)
|
74 |
-
:return: (y_pred, popt) - Datos predichos y parámetros optimizados
|
75 |
-
"""
|
76 |
if model_type not in self.models:
|
|
|
77 |
raise ValueError(f"Modelo para '{model_type}' no configurado. Llama a set_model primero.")
|
78 |
|
79 |
model_config = self.models[model_type]
|
80 |
equation_expr = model_config['sympy_expr']
|
81 |
-
current_param_names = model_config['params']
|
82 |
-
current_param_syms = model_config['param_symbols']
|
83 |
t_sym = model_config['time_symbol']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
#
|
86 |
-
|
87 |
-
|
88 |
-
#
|
89 |
-
subs_dict = {sym: val for sym, val in zip(current_param_syms, current_p_values)}
|
90 |
|
91 |
-
#
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
if model_type in ['substrate', 'product'] and X_val_sym in equation_expr.free_symbols:
|
94 |
-
if biomass_params_fitted is None or 'biomass' not in self.models:
|
95 |
-
|
|
|
|
|
96 |
|
97 |
-
# Calcular X(t) usando el modelo de biomasa ajustado y los t_data actuales
|
98 |
biomass_model_config = self.models['biomass']
|
99 |
biomass_expr = biomass_model_config['sympy_expr']
|
100 |
biomass_p_syms = biomass_model_config['param_symbols']
|
|
|
101 |
|
102 |
-
|
103 |
-
biomass_subs_dict = {sym: biomass_params_fitted[name] for sym, name in zip(biomass_p_syms, biomass_model_config['params'])}
|
104 |
|
105 |
-
#
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
# Si X_val es un array (porque t_data es un array), no podemos sustituirlo directamente
|
128 |
-
# en la expresión simbólica para crear una única función lambdify que tome `t`.
|
129 |
-
# En su lugar, evaluamos la expresión punto por punto después de sustituir parámetros.
|
130 |
-
# Esto es menos eficiente que un lambdify completo, pero más flexible.
|
131 |
-
|
132 |
-
# Evaluación numérica punto por punto
|
133 |
-
# No necesitamos lambdify si X_val es un array y se evalúa punto por punto
|
134 |
-
# La función `fit_model_wrapper` ya está iterando sobre los `t_data` (implícitamente a través de numpy)
|
135 |
-
# Entonces, si X_val es un array, la expresión `final_expr.subs(subs_dict)` debería funcionar si
|
136 |
-
# las operaciones son compatibles con numpy arrays (sympy suele hacerlo).
|
137 |
#
|
138 |
-
#
|
139 |
-
#
|
140 |
-
#
|
|
|
141 |
|
142 |
-
#
|
143 |
-
|
|
|
144 |
|
145 |
-
#
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
# Si X_val es un array, tomar el valor correspondiente a ti
|
156 |
-
point_subs[X_val_sym] = point_subs[X_val_sym][i]
|
157 |
-
|
158 |
-
y_calculated[i] = equation_expr.subs(point_subs).evalf()
|
159 |
-
return y_calculated
|
160 |
-
|
161 |
-
else: # Modelo de Biomasa o modelo S/P que no usa X_val (raro)
|
162 |
-
func = lambdify(args_for_lambdify, equation_expr, 'numpy')
|
163 |
-
return func(t_data, *current_p_values)
|
164 |
-
|
165 |
-
|
166 |
-
p0 = np.ones(len(current_param_names)) # Estimaciones iniciales
|
167 |
-
lower_bounds, upper_bounds = bounds
|
168 |
|
169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
lower_bounds = np.array(lower_bounds if len(lower_bounds) == len(p0) else [-np.inf] * len(p0))
|
171 |
upper_bounds = np.array(upper_bounds if len(upper_bounds) == len(p0) else [np.inf] * len(p0))
|
172 |
|
173 |
-
|
|
|
|
|
|
|
174 |
with warnings.catch_warnings():
|
175 |
warnings.simplefilter("ignore", RuntimeWarning)
|
176 |
-
warnings.simplefilter("ignore", UserWarning)
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
self.params[model_type] = dict(zip(current_param_names, popt))
|
180 |
|
181 |
-
#
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
-
|
|
|
|
3 |
from scipy.optimize import curve_fit
|
4 |
from sympy import symbols, sympify, lambdify
|
5 |
import warnings
|
6 |
+
from sklearn.metrics import mean_squared_error
|
7 |
|
8 |
class BioprocessModel:
|
9 |
def __init__(self):
|
10 |
+
self.params = {}
|
11 |
+
self.models = {}
|
12 |
+
self.r2 = {}
|
13 |
+
self.rmse = {}
|
14 |
+
print("DEBUG (models.py): BioprocessModel instanciado.")
|
15 |
|
16 |
def set_model(self, model_type, equation_str, param_str):
|
17 |
+
print(f"\nDEBUG (models.py): set_model llamado para tipo='{model_type}'")
|
18 |
+
print(f" Equation str (raw): '{equation_str}'")
|
19 |
+
print(f" Param str (raw): '{param_str}'")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
equation_str_cleaned = str(equation_str).strip()
|
22 |
+
if '=' in equation_str_cleaned:
|
23 |
+
equation_str_cleaned = equation_str_cleaned.split('=', 1)[1].strip()
|
24 |
|
25 |
+
if not equation_str_cleaned:
|
26 |
+
print(f"ERROR (models.py): Ecuación vacía para {model_type}.")
|
27 |
+
raise ValueError(f"La cadena de la ecuación para '{model_type}' no puede estar vacía.")
|
28 |
+
if not param_str:
|
29 |
+
print(f"ERROR (models.py): Cadena de parámetros vacía para {model_type}.")
|
30 |
+
raise ValueError(f"La cadena de parámetros para '{model_type}' no puede estar vacía.")
|
31 |
+
|
32 |
+
params_list = [param.strip() for param in param_str.split(',')]
|
33 |
+
if not all(params_list): # Chequear si algún nombre de parámetro es vacío
|
34 |
+
print(f"ERROR (models.py): Algún nombre de parámetro está vacío en '{param_str}' para {model_type}.")
|
35 |
+
raise ValueError(f"Los nombres de los parámetros no pueden ser vacíos para '{model_type}'.")
|
36 |
+
|
37 |
+
print(f" Equation (cleaned): '{equation_str_cleaned}'")
|
38 |
+
print(f" Params list: {params_list}")
|
39 |
+
|
40 |
self.models[model_type] = {
|
41 |
+
'equation_str': equation_str_cleaned,
|
42 |
'params': params_list
|
43 |
}
|
44 |
|
45 |
+
try:
|
46 |
+
# Símbolos para el tiempo y los parámetros del modelo actual
|
47 |
+
t_sym = symbols('t')
|
48 |
+
# Asegurar que los parámetros sean símbolos válidos
|
49 |
+
current_param_syms = []
|
50 |
+
for p_name in params_list:
|
51 |
+
if not p_name.isidentifier(): # Chequeo básico de validez del nombre del símbolo
|
52 |
+
raise ValueError(f"Nombre de parámetro '{p_name}' no es un identificador Python válido para sympy.")
|
53 |
+
current_param_syms.append(symbols(p_name))
|
54 |
+
|
55 |
+
# Símbolo para X(t) si es necesario (solo para modelos S y P)
|
56 |
+
X_val_sym = symbols('X_val')
|
57 |
+
|
58 |
+
# Crear la expresión simbólica
|
59 |
+
sympy_expr = sympify(equation_str_cleaned)
|
60 |
+
print(f" Sympy expression: {sympy_expr}")
|
61 |
+
print(f" Free symbols in expr: {sympy_expr.free_symbols}")
|
62 |
+
|
63 |
+
# Guardar la expresión y los símbolos para uso en fit_model
|
64 |
+
self.models[model_type]['sympy_expr'] = sympy_expr
|
65 |
+
self.models[model_type]['param_symbols'] = tuple(current_param_syms) # Usar tupla
|
66 |
+
self.models[model_type]['time_symbol'] = t_sym
|
67 |
+
self.models[model_type]['X_val_symbol'] = X_val_sym # Guardar por si se usa
|
68 |
+
print(f" Modelo '{model_type}' configurado exitosamente.")
|
69 |
+
|
70 |
+
except Exception as e:
|
71 |
+
print(f"ERROR (models.py): Fallo al procesar con sympy para '{model_type}': {e}")
|
72 |
+
raise ValueError(f"Error en la ecuación o parámetros para '{model_type}': {e}")
|
73 |
+
|
74 |
|
75 |
def fit_model(self, model_type, time, data, bounds, biomass_params_fitted=None):
|
76 |
+
print(f"\nDEBUG (models.py): fit_model llamado para tipo='{model_type}'")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
if model_type not in self.models:
|
78 |
+
print(f"ERROR (models.py): Modelo para '{model_type}' no configurado.")
|
79 |
raise ValueError(f"Modelo para '{model_type}' no configurado. Llama a set_model primero.")
|
80 |
|
81 |
model_config = self.models[model_type]
|
82 |
equation_expr = model_config['sympy_expr']
|
83 |
+
current_param_names = model_config['params'] # Lista de strings
|
84 |
+
current_param_syms = model_config['param_symbols'] # Tupla de símbolos sympy
|
85 |
t_sym = model_config['time_symbol']
|
86 |
+
X_val_sym = model_config['X_val_symbol']
|
87 |
+
|
88 |
+
print(f" Ajustando con ecuación: {equation_expr}")
|
89 |
+
print(f" Parámetros a ajustar: {current_param_names}")
|
90 |
+
print(f" Datos de tiempo (primeros 5): {time[:5]}")
|
91 |
+
print(f" Datos experimentales (primeros 5): {data[:5]}")
|
92 |
+
print(f" Límites: {bounds}")
|
93 |
+
if biomass_params_fitted:
|
94 |
+
print(f" Parámetros de biomasa ajustados (para S/P): {biomass_params_fitted}")
|
95 |
|
96 |
+
# Función que será pasada a curve_fit
|
97 |
+
def fit_model_wrapper(t_data_wrapper, *current_p_values_wrapper):
|
98 |
+
# `t_data_wrapper` es un array numpy de tiempos.
|
99 |
+
# `current_p_values_wrapper` es una tupla de los valores actuales de los parámetros.
|
|
|
100 |
|
101 |
+
# Diccionario de sustitución para los parámetros del modelo actual
|
102 |
+
subs_dict_wrapper = {sym: val for sym, val in zip(current_param_syms, current_p_values_wrapper)}
|
103 |
+
|
104 |
+
# Preparar la expresión para lambdify: solo con `t` y los parámetros del modelo actual
|
105 |
+
lambdify_args_wrapper = [t_sym] + list(current_param_syms)
|
106 |
+
expr_to_lambdify = equation_expr
|
107 |
+
|
108 |
+
# Manejar dependencia de X_val para sustrato y producto
|
109 |
if model_type in ['substrate', 'product'] and X_val_sym in equation_expr.free_symbols:
|
110 |
+
if biomass_params_fitted is None or 'biomass' not in self.models or 'sympy_expr' not in self.models['biomass']:
|
111 |
+
print("ERROR (models.py fit_model_wrapper): Falta config/params de biomasa para modelo S/P dependiente.")
|
112 |
+
# Devolver algo que cause error en curve_fit o un array de NaNs de tamaño correcto
|
113 |
+
return np.full_like(t_data_wrapper, np.nan)
|
114 |
|
|
|
115 |
biomass_model_config = self.models['biomass']
|
116 |
biomass_expr = biomass_model_config['sympy_expr']
|
117 |
biomass_p_syms = biomass_model_config['param_symbols']
|
118 |
+
biomass_t_sym = biomass_model_config['time_symbol']
|
119 |
|
120 |
+
biomass_subs_for_calc = {sym: biomass_params_fitted[name] for sym, name in zip(biomass_p_syms, biomass_model_config['params'])}
|
|
|
121 |
|
122 |
+
# Calcular X(t) para cada tiempo en t_data_wrapper
|
123 |
+
# Esto DEBE resultar en un array numpy
|
124 |
+
try:
|
125 |
+
# Lambdify la expresión de biomasa una vez si es posible
|
126 |
+
if 'biomass_func_lambdified' not in biomass_model_config:
|
127 |
+
biomass_model_config['biomass_func_lambdified'] = lambdify(
|
128 |
+
[biomass_t_sym] + list(biomass_p_syms),
|
129 |
+
biomass_expr,
|
130 |
+
'numpy'
|
131 |
+
)
|
132 |
+
# Obtener los valores de los parámetros de biomasa en el orden correcto
|
133 |
+
biomass_p_values_for_calc = [biomass_params_fitted[p_name] for p_name in biomass_model_config['params']]
|
134 |
+
X_t_values_wrapper = biomass_model_config['biomass_func_lambdified'](t_data_wrapper, *biomass_p_values_for_calc)
|
135 |
+
|
136 |
+
except Exception as e_biomass_calc:
|
137 |
+
print(f"ERROR (models.py fit_model_wrapper): Calculando X(t) para S/P: {e_biomass_calc}")
|
138 |
+
return np.full_like(t_data_wrapper, np.nan)
|
139 |
+
|
140 |
+
# Ahora, X_val_sym necesita ser reemplazado por X_t_values_wrapper en expr_to_lambdify
|
141 |
+
# Esto es complicado porque X_t_values_wrapper es un array, no un escalar simbólico.
|
142 |
+
# La forma más segura es sustituirlo en la expresión antes de lambdify, si sympy lo permite,
|
143 |
+
# o pasarlo como un argumento extra a una función lambdify que lo espere.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
#
|
145 |
+
# Alternativa: si lambdify no maneja bien un array como X_val, evaluar punto por punto.
|
146 |
+
# Por ahora, intentaremos pasar X_val como un argumento adicional a lambdify.
|
147 |
+
if X_val_sym not in current_param_syms: # Asegurar que no es ya un parámetro del modelo S/P
|
148 |
+
lambdify_args_wrapper.append(X_val_sym)
|
149 |
|
150 |
+
# Crear la función compilada para el modelo actual (S o P)
|
151 |
+
# Esta función ahora tomará t, params_actuales..., y X_val_array
|
152 |
+
func_compiled = lambdify(lambdify_args_wrapper, expr_to_lambdify, 'numpy')
|
153 |
|
154 |
+
# Llamar a la función compilada
|
155 |
+
try:
|
156 |
+
# Pasar X_t_values_wrapper como el último argumento si X_val_sym fue añadido
|
157 |
+
call_args = [t_data_wrapper] + list(current_p_values_wrapper)
|
158 |
+
if X_val_sym in lambdify_args_wrapper[-1:]: # Si X_val_sym es el último argumento esperado
|
159 |
+
call_args.append(X_t_values_wrapper)
|
160 |
+
return func_compiled(*call_args)
|
161 |
+
except Exception as e_sp_eval:
|
162 |
+
print(f"ERROR (models.py fit_model_wrapper): Evaluando S/P con X_val: {e_sp_eval}")
|
163 |
+
return np.full_like(t_data_wrapper, np.nan)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
|
165 |
+
else: # Es el modelo de biomasa o un modelo S/P que no usa X_val
|
166 |
+
func_compiled = lambdify(lambdify_args_wrapper, expr_to_lambdify, 'numpy')
|
167 |
+
try:
|
168 |
+
return func_compiled(t_data_wrapper, *current_p_values_wrapper)
|
169 |
+
except Exception as e_bio_eval:
|
170 |
+
print(f"ERROR (models.py fit_model_wrapper): Evaluando biomasa: {e_bio_eval}")
|
171 |
+
return np.full_like(t_data_wrapper, np.nan)
|
172 |
+
|
173 |
+
|
174 |
+
p0 = np.ones(len(current_param_names))
|
175 |
+
lower_bounds, upper_bounds = bounds
|
176 |
lower_bounds = np.array(lower_bounds if len(lower_bounds) == len(p0) else [-np.inf] * len(p0))
|
177 |
upper_bounds = np.array(upper_bounds if len(upper_bounds) == len(p0) else [np.inf] * len(p0))
|
178 |
|
179 |
+
print(f" Estimaciones iniciales p0: {p0}")
|
180 |
+
print(f" Límites para curve_fit: L={lower_bounds}, U={upper_bounds}")
|
181 |
+
|
182 |
+
popt, pcov = None, None # Inicializar
|
183 |
with warnings.catch_warnings():
|
184 |
warnings.simplefilter("ignore", RuntimeWarning)
|
185 |
+
warnings.simplefilter("ignore", UserWarning)
|
186 |
+
try:
|
187 |
+
popt, pcov = curve_fit(fit_model_wrapper, time, data, p0=p0, bounds=(lower_bounds, upper_bounds), maxfev=50000, method='trf') # 'trf' es bueno con límites
|
188 |
+
print(f" curve_fit completado. Parámetros optimizados (popt): {popt}")
|
189 |
+
except RuntimeError as e_curvefit: # A menudo "Optimal parameters not found"
|
190 |
+
print(f"ERROR (models.py): curve_fit falló para {model_type} con RuntimeError: {e_curvefit}")
|
191 |
+
self.params[model_type] = {p: np.nan for p in current_param_names}
|
192 |
+
self.r2[model_type] = np.nan
|
193 |
+
self.rmse[model_type] = np.nan
|
194 |
+
return np.full_like(data, np.nan), None # Devolver NaNs y None para popt
|
195 |
+
except ValueError as e_val_curvefit: # A menudo por límites o datos incompatibles
|
196 |
+
print(f"ERROR (models.py): curve_fit falló para {model_type} con ValueError: {e_val_curvefit}")
|
197 |
+
self.params[model_type] = {p: np.nan for p in current_param_names}
|
198 |
+
self.r2[model_type] = np.nan
|
199 |
+
self.rmse[model_type] = np.nan
|
200 |
+
return np.full_like(data, np.nan), None
|
201 |
+
except Exception as e_gen_curvefit: # Cualquier otro error
|
202 |
+
print(f"ERROR (models.py): curve_fit falló inesperadamente para {model_type}: {e_gen_curvefit}")
|
203 |
+
self.params[model_type] = {p: np.nan for p in current_param_names}
|
204 |
+
self.r2[model_type] = np.nan
|
205 |
+
self.rmse[model_type] = np.nan
|
206 |
+
return np.full_like(data, np.nan), None
|
207 |
+
|
208 |
+
|
209 |
+
if popt is None: # Si curve_fit falló y ya manejamos el error
|
210 |
+
return np.full_like(data, np.nan), None
|
211 |
|
212 |
self.params[model_type] = dict(zip(current_param_names, popt))
|
213 |
|
214 |
+
# Re-calcular y_pred con los parámetros optimizados
|
215 |
+
try:
|
216 |
+
y_pred = fit_model_wrapper(time, *popt)
|
217 |
+
if np.any(np.isnan(y_pred)): # Si la evaluación con popt da NaN
|
218 |
+
print(f"ADVERTENCIA (models.py): y_pred contiene NaNs después del ajuste para {model_type}.")
|
219 |
+
self.r2[model_type] = np.nan
|
220 |
+
self.rmse[model_type] = np.nan
|
221 |
+
# No sobrescribir self.params[model_type] aquí si popt fue encontrado
|
222 |
+
else:
|
223 |
+
ss_res = np.sum((data - y_pred) ** 2)
|
224 |
+
ss_tot = np.sum((data - np.mean(data)) ** 2)
|
225 |
+
if ss_tot == 0:
|
226 |
+
self.r2[model_type] = 1.0 if ss_res < 1e-9 else 0.0
|
227 |
+
else:
|
228 |
+
self.r2[model_type] = 1 - (ss_res / ss_tot)
|
229 |
+
self.rmse[model_type] = np.sqrt(mean_squared_error(data, y_pred))
|
230 |
+
except Exception as e_ypred:
|
231 |
+
print(f"ERROR (models.py): Calculando y_pred final para {model_type}: {e_ypred}")
|
232 |
+
y_pred = np.full_like(data, np.nan) # Devolver NaNs si la predicción final falla
|
233 |
+
self.r2[model_type] = np.nan
|
234 |
+
self.rmse[model_type] = np.nan
|
235 |
+
|
236 |
|
237 |
+
print(f" Ajuste para {model_type} completado. R2: {self.r2.get(model_type)}, RMSE: {self.rmse.get(model_type)}")
|
238 |
+
return y_pred, popt
|