Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,93 +4,73 @@ import torch
|
|
4 |
import os
|
5 |
import gc
|
6 |
|
7 |
-
# Variables globales para el modelo y tokenizador
|
8 |
model = None
|
9 |
tokenizer = None
|
10 |
|
11 |
-
# Cargar el prompt desde el archivo
|
12 |
def get_system_prompt():
|
13 |
with open("prompt.txt", "r", encoding="utf-8") as f:
|
14 |
return f.read().strip()
|
15 |
|
16 |
-
# Función para cargar el modelo (se ejecutará solo cuando sea necesario)
|
17 |
def load_model_if_needed():
|
18 |
global model, tokenizer
|
19 |
-
|
20 |
if model is None:
|
21 |
-
print("Cargando modelo
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
-
|
27 |
-
# Configuración para CPU con optimizaciones de memoria
|
28 |
model = AutoModelForCausalLM.from_pretrained(
|
29 |
model_name,
|
30 |
-
torch_dtype=torch.float32,
|
31 |
-
|
|
|
32 |
)
|
33 |
-
|
34 |
-
# Mover el modelo a CPU explícitamente
|
35 |
model = model.to("cpu")
|
36 |
-
|
37 |
-
# Forzar recolección de basura para liberar memoria
|
38 |
gc.collect()
|
39 |
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
40 |
-
|
41 |
-
print("Modelo cargado correctamente en CPU!")
|
42 |
|
43 |
-
|
|
|
44 |
def generate_response(user_message):
|
45 |
try:
|
46 |
-
# Cargar el modelo si aún no está cargado
|
47 |
load_model_if_needed()
|
48 |
-
|
49 |
if not user_message.strip():
|
50 |
return "Por favor, escribe una pregunta para que pueda ayudarte."
|
51 |
-
|
52 |
-
# Obtener el prompt del sistema
|
53 |
system_prompt = get_system_prompt()
|
54 |
-
|
55 |
-
# Formato de prompt para Mistral-7B-Instruct-v0.1
|
56 |
-
prompt = f"""<s>[INST] {system_prompt}
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
# Tokenizar el prompt
|
61 |
inputs = tokenizer(prompt, return_tensors="pt")
|
62 |
-
|
63 |
-
# Configuración de generación optimizada para CPU
|
64 |
generation_config = {
|
65 |
-
"max_new_tokens":
|
66 |
"temperature": 0.7,
|
67 |
"top_p": 0.9,
|
68 |
"do_sample": True,
|
69 |
"pad_token_id": tokenizer.eos_token_id,
|
70 |
"num_return_sequences": 1
|
71 |
}
|
72 |
-
|
73 |
-
# Generar respuesta
|
74 |
with torch.no_grad():
|
75 |
outputs = model.generate(**inputs, **generation_config)
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
# Extraer solo la respuesta del asistente (después del prompt)
|
81 |
-
assistant_response = full_response.replace(prompt.replace("<s>", "").replace("</s>", ""), "").strip()
|
82 |
-
|
83 |
-
# Forzar recolección de basura para liberar memoria
|
84 |
gc.collect()
|
85 |
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
86 |
-
|
87 |
-
return
|
88 |
-
|
89 |
except Exception as e:
|
90 |
print(f"Error: {str(e)}")
|
91 |
-
return f"Lo siento, ha ocurrido un error: {str(e)}"
|
92 |
|
93 |
-
# Crear la interfaz de Gradio
|
94 |
demo = gr.Interface(
|
95 |
fn=generate_response,
|
96 |
inputs=gr.Textbox(
|
@@ -108,7 +88,5 @@ demo = gr.Interface(
|
|
108 |
allow_flagging="never"
|
109 |
)
|
110 |
|
111 |
-
# Lanzar la aplicación con configuración para ahorrar memoria
|
112 |
if __name__ == "__main__":
|
113 |
-
# Configurar menos workers para ahorrar memoria
|
114 |
demo.queue(max_size=1).launch(share=False, debug=False)
|
|
|
4 |
import os
|
5 |
import gc
|
6 |
|
|
|
7 |
model = None
|
8 |
tokenizer = None
|
9 |
|
|
|
10 |
def get_system_prompt():
|
11 |
with open("prompt.txt", "r", encoding="utf-8") as f:
|
12 |
return f.read().strip()
|
13 |
|
|
|
14 |
def load_model_if_needed():
|
15 |
global model, tokenizer
|
16 |
+
|
17 |
if model is None:
|
18 |
+
print("🔁 Cargando modelo Falcon-7B-Instruct...")
|
19 |
+
|
20 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
21 |
+
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
|
|
|
|
23 |
model = AutoModelForCausalLM.from_pretrained(
|
24 |
model_name,
|
25 |
+
torch_dtype=torch.float32,
|
26 |
+
trust_remote_code=True,
|
27 |
+
low_cpu_mem_usage=True
|
28 |
)
|
29 |
+
|
|
|
30 |
model = model.to("cpu")
|
31 |
+
|
|
|
32 |
gc.collect()
|
33 |
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
|
|
|
|
34 |
|
35 |
+
print("✅ Modelo Falcon-7B cargado en CPU")
|
36 |
+
|
37 |
def generate_response(user_message):
|
38 |
try:
|
|
|
39 |
load_model_if_needed()
|
40 |
+
|
41 |
if not user_message.strip():
|
42 |
return "Por favor, escribe una pregunta para que pueda ayudarte."
|
43 |
+
|
|
|
44 |
system_prompt = get_system_prompt()
|
|
|
|
|
|
|
45 |
|
46 |
+
prompt = f"{system_prompt}\n\nUsuario: {user_message}\nBITER:"
|
47 |
+
|
|
|
48 |
inputs = tokenizer(prompt, return_tensors="pt")
|
49 |
+
|
|
|
50 |
generation_config = {
|
51 |
+
"max_new_tokens": 400,
|
52 |
"temperature": 0.7,
|
53 |
"top_p": 0.9,
|
54 |
"do_sample": True,
|
55 |
"pad_token_id": tokenizer.eos_token_id,
|
56 |
"num_return_sequences": 1
|
57 |
}
|
58 |
+
|
|
|
59 |
with torch.no_grad():
|
60 |
outputs = model.generate(**inputs, **generation_config)
|
61 |
+
|
62 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
63 |
+
answer = response.replace(prompt, "").strip()
|
64 |
+
|
|
|
|
|
|
|
|
|
65 |
gc.collect()
|
66 |
torch.cuda.empty_cache() if torch.cuda.is_available() else None
|
67 |
+
|
68 |
+
return answer
|
69 |
+
|
70 |
except Exception as e:
|
71 |
print(f"Error: {str(e)}")
|
72 |
+
return f"❌ Lo siento, ha ocurrido un error: {str(e)}"
|
73 |
|
|
|
74 |
demo = gr.Interface(
|
75 |
fn=generate_response,
|
76 |
inputs=gr.Textbox(
|
|
|
88 |
allow_flagging="never"
|
89 |
)
|
90 |
|
|
|
91 |
if __name__ == "__main__":
|
|
|
92 |
demo.queue(max_size=1).launch(share=False, debug=False)
|