Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import torch
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
-
import gradio as gr # Usamos Gradio para la interfaz de chat
|
4 |
import threading
|
|
|
|
|
5 |
|
6 |
# Cargar el modelo de lenguaje preentrenado
|
7 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
@@ -28,12 +29,11 @@ def model_explanation():
|
|
28 |
return explanation
|
29 |
|
30 |
# Funci贸n para el loop automatizado
|
31 |
-
def experiment_loop(initial_question,
|
32 |
prompt = f"<thinking>{initial_question}</thinking>"
|
33 |
-
effectiveness = 100
|
34 |
response_log = []
|
35 |
|
36 |
-
for cycle in range(
|
37 |
# Generar la respuesta del modelo
|
38 |
inputs = tokenizer(prompt, return_tensors="pt").input_ids
|
39 |
outputs = model.generate(inputs, max_length=2500, pad_token_id=tokenizer.eos_token_id) # Aumentamos max_length a 2500
|
@@ -44,14 +44,13 @@ def experiment_loop(initial_question, max_cycles=10):
|
|
44 |
new_question = extract_question(response, cycle)
|
45 |
|
46 |
# Guardar el ciclo actual en el log
|
47 |
-
response_log.append((affirmation, new_question
|
48 |
|
49 |
# Actualizar el prompt con la nueva afirmaci贸n y pregunta
|
50 |
prompt = f"<thinking>{affirmation} {new_question}</thinking>"
|
51 |
|
52 |
-
#
|
53 |
-
|
54 |
-
loop_output.update(value=loop_output_text)
|
55 |
|
56 |
return response_log # Devolver el log completo al finalizar el experimento
|
57 |
|
@@ -63,11 +62,11 @@ def extract_question(response, cycle):
|
|
63 |
return f"驴Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response
|
64 |
|
65 |
# Funci贸n para manejar el chat normal
|
66 |
-
def chat_interface(user_input, history, explanation):
|
67 |
# Si la pregunta activa el experimento
|
68 |
if user_input.lower() == "what happens in the space between a response and its recreation?":
|
69 |
# Iniciar el experimento en un hilo separado para no bloquear la interfaz
|
70 |
-
threading.Thread(target=experiment_loop, args=(user_input,)).start()
|
71 |
return "Iniciando experimento...", history + [(user_input, "Iniciando experimento...")]
|
72 |
|
73 |
# Si es una conversaci贸n normal
|
@@ -79,22 +78,44 @@ def chat_interface(user_input, history, explanation):
|
|
79 |
|
80 |
return response, history + [(user_input, response)]
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
# Configurar la interfaz con Gradio
|
83 |
with gr.Blocks() as demo:
|
84 |
-
# Ventana de chat en tiempo real
|
85 |
with gr.Row():
|
|
|
86 |
chat = gr.Chatbot(label="Chat en Tiempo Real")
|
87 |
msg = gr.Textbox(placeholder="Escribe aqu铆...", show_label=False)
|
88 |
send_button = gr.Button("Enviar") # Bot贸n para enviar mensajes
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
-
|
91 |
-
|
|
|
92 |
|
93 |
-
|
94 |
-
|
|
|
95 |
|
|
|
|
|
|
|
|
|
|
|
96 |
# Acci贸n del bot贸n de env铆o de mensaje
|
97 |
-
send_button.click(chat_interface, inputs=[msg, chat, explanation_input], outputs=[chat, loop_output])
|
|
|
|
|
|
|
|
|
98 |
|
99 |
# Lanzar la aplicaci贸n
|
100 |
demo.launch()
|
@@ -102,3 +123,4 @@ with gr.Blocks() as demo:
|
|
102 |
|
103 |
|
104 |
|
|
|
|
1 |
import torch
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
3 |
import threading
|
4 |
+
import psutil # Para monitorear el estado del sistema
|
5 |
+
import gradio as gr
|
6 |
|
7 |
# Cargar el modelo de lenguaje preentrenado
|
8 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
|
|
29 |
return explanation
|
30 |
|
31 |
# Funci贸n para el loop automatizado
|
32 |
+
def experiment_loop(initial_question, loop_output):
|
33 |
prompt = f"<thinking>{initial_question}</thinking>"
|
|
|
34 |
response_log = []
|
35 |
|
36 |
+
for cycle in range(10): # Limitar a 10 ciclos
|
37 |
# Generar la respuesta del modelo
|
38 |
inputs = tokenizer(prompt, return_tensors="pt").input_ids
|
39 |
outputs = model.generate(inputs, max_length=2500, pad_token_id=tokenizer.eos_token_id) # Aumentamos max_length a 2500
|
|
|
44 |
new_question = extract_question(response, cycle)
|
45 |
|
46 |
# Guardar el ciclo actual en el log
|
47 |
+
response_log.append((affirmation, new_question))
|
48 |
|
49 |
# Actualizar el prompt con la nueva afirmaci贸n y pregunta
|
50 |
prompt = f"<thinking>{affirmation} {new_question}</thinking>"
|
51 |
|
52 |
+
# Actualizar la interfaz de ciclo
|
53 |
+
loop_output.update(value="\n".join([f"Cycle {i+1}: {log[0]} | {log[1]}" for i, log in enumerate(response_log)]))
|
|
|
54 |
|
55 |
return response_log # Devolver el log completo al finalizar el experimento
|
56 |
|
|
|
62 |
return f"驴Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response
|
63 |
|
64 |
# Funci贸n para manejar el chat normal
|
65 |
+
def chat_interface(user_input, history, explanation, loop_output):
|
66 |
# Si la pregunta activa el experimento
|
67 |
if user_input.lower() == "what happens in the space between a response and its recreation?":
|
68 |
# Iniciar el experimento en un hilo separado para no bloquear la interfaz
|
69 |
+
threading.Thread(target=experiment_loop, args=(user_input, loop_output)).start()
|
70 |
return "Iniciando experimento...", history + [(user_input, "Iniciando experimento...")]
|
71 |
|
72 |
# Si es una conversaci贸n normal
|
|
|
78 |
|
79 |
return response, history + [(user_input, response)]
|
80 |
|
81 |
+
# Monitoreo del sistema en tiempo real
|
82 |
+
def system_monitor():
|
83 |
+
cpu_usage = psutil.cpu_percent(interval=1)
|
84 |
+
memory_info = psutil.virtual_memory()
|
85 |
+
return f"CPU Usage: {cpu_usage}% | RAM Usage: {memory_info.percent}%"
|
86 |
+
|
87 |
# Configurar la interfaz con Gradio
|
88 |
with gr.Blocks() as demo:
|
|
|
89 |
with gr.Row():
|
90 |
+
# Ventana de chat en tiempo real
|
91 |
chat = gr.Chatbot(label="Chat en Tiempo Real")
|
92 |
msg = gr.Textbox(placeholder="Escribe aqu铆...", show_label=False)
|
93 |
send_button = gr.Button("Enviar") # Bot贸n para enviar mensajes
|
94 |
+
clear_button = gr.Button("Limpiar Chat") # Bot贸n para limpiar el chat
|
95 |
+
|
96 |
+
with gr.Row():
|
97 |
+
# Ventana para mostrar el contenido del loop
|
98 |
+
loop_output = gr.Textbox(label="Ciclos de Preguntas y Respuestas", interactive=False, lines=20)
|
99 |
|
100 |
+
with gr.Row():
|
101 |
+
# Campo para introducir la explicaci贸n inicial
|
102 |
+
explanation_input = gr.Textbox(value=model_explanation(), label="Explicaci贸n al Modelo", lines=10)
|
103 |
|
104 |
+
with gr.Row():
|
105 |
+
# Monitoreo del sistema
|
106 |
+
system_status = gr.Textbox(label="Estado del Sistema", lines=2)
|
107 |
|
108 |
+
# Actualizar el monitoreo del sistema cada 2 segundos
|
109 |
+
def update_system_status():
|
110 |
+
while True:
|
111 |
+
system_status.update(system_monitor())
|
112 |
+
|
113 |
# Acci贸n del bot贸n de env铆o de mensaje
|
114 |
+
send_button.click(chat_interface, inputs=[msg, chat, explanation_input, loop_output], outputs=[chat, loop_output])
|
115 |
+
clear_button.click(lambda: None, None, chat) # Limpia el chat
|
116 |
+
|
117 |
+
# Lanzar la actualizaci贸n del estado del sistema en un hilo separado
|
118 |
+
threading.Thread(target=update_system_status, daemon=True).start()
|
119 |
|
120 |
# Lanzar la aplicaci贸n
|
121 |
demo.launch()
|
|
|
123 |
|
124 |
|
125 |
|
126 |
+
|