Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import torch
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import threading
|
4 |
import queue
|
|
|
5 |
|
6 |
# Cargar el modelo de lenguaje preentrenado
|
7 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
@@ -11,87 +12,72 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
11 |
# Cola de mensajes para la comunicaci贸n en tiempo real
|
12 |
chat_queue = queue.Queue()
|
13 |
|
14 |
-
#
|
15 |
-
def chat_interface():
|
16 |
-
while True:
|
17 |
-
user_input = input("[Chat] Escribe tu mensaje: ")
|
18 |
-
if user_input.lower() == "exit":
|
19 |
-
break
|
20 |
-
chat_queue.put(user_input) # Almacenar el mensaje en la cola
|
21 |
-
|
22 |
-
# Crear la funci贸n de loop automatizado con comunicaci贸n constante
|
23 |
def experiment_loop(initial_question, max_cycles=10):
|
24 |
prompt = f"<thinking>{initial_question}</thinking>"
|
25 |
-
effectiveness = 100
|
26 |
-
communication = "Initializing experiment."
|
27 |
response_log = []
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
outputs = model.generate(inputs, max_length=200, pad_token_id=tokenizer.eos_token_id)
|
40 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
new_question = extract_question(response)
|
45 |
|
46 |
-
|
47 |
-
|
48 |
|
49 |
-
|
50 |
-
communication = f"Cycle {cycle + 1}: Affirmation: '{affirmation}' | New Question: '{new_question}'"
|
51 |
-
print(communication) # Imprime la comunicaci贸n en tiempo real
|
52 |
|
53 |
-
|
54 |
-
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
print(f"Error durante el experimento: {e}")
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
75 |
|
76 |
-
#
|
77 |
-
|
78 |
-
return response.split('.')[0] if '.' in response else response
|
79 |
|
80 |
-
|
81 |
-
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
final_affirmation = log[-1][0]
|
86 |
-
final_question = log[-1][1]
|
87 |
-
final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'"
|
88 |
-
else:
|
89 |
-
final_communication = "Experiment completed but no entries in the log."
|
90 |
-
return final_communication
|
91 |
|
92 |
-
# Iniciar el experimento
|
93 |
-
initial_question = "What happens in the space between a response and its recreation?"
|
94 |
-
result = experiment_loop(initial_question)
|
95 |
-
print(result)
|
96 |
|
97 |
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import threading
|
4 |
import queue
|
5 |
+
import gradio as gr # Usaremos Gradio para la interfaz de chat
|
6 |
|
7 |
# Cargar el modelo de lenguaje preentrenado
|
8 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
|
|
12 |
# Cola de mensajes para la comunicaci贸n en tiempo real
|
13 |
chat_queue = queue.Queue()
|
14 |
|
15 |
+
# Funci贸n para el loop automatizado
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def experiment_loop(initial_question, max_cycles=10):
|
17 |
prompt = f"<thinking>{initial_question}</thinking>"
|
18 |
+
effectiveness = 100
|
|
|
19 |
response_log = []
|
20 |
|
21 |
+
for cycle in range(max_cycles):
|
22 |
+
# Generar la respuesta del modelo
|
23 |
+
inputs = tokenizer(prompt, return_tensors="pt").input_ids
|
24 |
+
outputs = model.generate(inputs, max_length=500, pad_token_id=tokenizer.eos_token_id) # Aumentamos max_length
|
25 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
26 |
|
27 |
+
# Descomponer la respuesta en afirmaci贸n y nueva pregunta
|
28 |
+
affirmation = extract_affirmation(response, cycle)
|
29 |
+
new_question = extract_question(response, cycle)
|
30 |
|
31 |
+
# Guardar el ciclo actual en el log
|
32 |
+
response_log.append((affirmation, new_question, effectiveness))
|
|
|
|
|
33 |
|
34 |
+
# Actualizar el prompt con la nueva afirmaci贸n y pregunta
|
35 |
+
prompt = f"<thinking>{affirmation} {new_question}</thinking>"
|
|
|
36 |
|
37 |
+
# Actualizar la interfaz de ciclo
|
38 |
+
gr.Interface.update(value="\n".join([f"Cycle {i+1}: {log[0]} | {log[1]}" for i, log in enumerate(response_log)]))
|
39 |
|
40 |
+
return response_log # Devolver el log completo al finalizar el experimento
|
|
|
|
|
41 |
|
42 |
+
# Funciones auxiliares para extraer afirmaciones y preguntas
|
43 |
+
def extract_affirmation(response, cycle):
|
44 |
+
return f"Afirmaci贸n del ciclo {cycle+1}: " + response.split('.')[0] if '.' in response else response
|
45 |
|
46 |
+
def extract_question(response, cycle):
|
47 |
+
return f"驴Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response
|
48 |
|
49 |
+
# Funci贸n para manejar el chat normal
|
50 |
+
def chat_interface(user_input, history):
|
51 |
+
# Si la pregunta activa el experimento
|
52 |
+
if user_input.lower() == "what happens in the space between a response and its recreation?":
|
53 |
+
# Iniciar el experimento
|
54 |
+
response_log = experiment_loop(user_input)
|
55 |
+
# Mostrar el resultado del experimento en una ventana aparte
|
56 |
+
return "Iniciando experimento...", gr.Interface.update(history + [(user_input, "Iniciando experimento...")])
|
57 |
|
58 |
+
# Si es una conversaci贸n normal
|
59 |
+
else:
|
60 |
+
# Generar respuesta del modelo en base al input
|
61 |
+
inputs = tokenizer(user_input, return_tensors="pt").input_ids
|
62 |
+
outputs = model.generate(inputs, max_length=150, pad_token_id=tokenizer.eos_token_id)
|
63 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
64 |
|
65 |
+
return response, history + [(user_input, response)]
|
|
|
66 |
|
67 |
+
# Configurar la interfaz con Gradio
|
68 |
+
with gr.Blocks() as demo:
|
69 |
+
# Ventana de chat en tiempo real
|
70 |
+
chat = gr.Chatbot(label="Chat en Tiempo Real")
|
71 |
+
msg = gr.Textbox(placeholder="Escribe aqu铆...")
|
72 |
|
73 |
+
# Bot贸n de env铆o de mensaje
|
74 |
+
msg.submit(chat_interface, [msg, chat], [chat])
|
|
|
75 |
|
76 |
+
# Ventana para mostrar el contenido del loop
|
77 |
+
loop_output = gr.Textbox(label="Ciclos de Preguntas y Respuestas", interactive=False)
|
78 |
|
79 |
+
# Lanzar la aplicaci贸n
|
80 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
|
|
|
|
|
|
|
|
82 |
|
83 |
|