Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import torch
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import threading
|
4 |
import queue
|
5 |
-
import gradio as gr #
|
6 |
|
7 |
# Cargar el modelo de lenguaje preentrenado
|
8 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
@@ -12,6 +12,25 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
12 |
# Cola de mensajes para la comunicaci贸n en tiempo real
|
13 |
chat_queue = queue.Queue()
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Funci贸n para el loop automatizado
|
16 |
def experiment_loop(initial_question, max_cycles=10):
|
17 |
prompt = f"<thinking>{initial_question}</thinking>"
|
@@ -47,18 +66,18 @@ def extract_question(response, cycle):
|
|
47 |
return f"驴Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response
|
48 |
|
49 |
# Funci贸n para manejar el chat normal
|
50 |
-
def chat_interface(user_input, history):
|
51 |
# Si la pregunta activa el experimento
|
52 |
if user_input.lower() == "what happens in the space between a response and its recreation?":
|
53 |
# Iniciar el experimento
|
54 |
response_log = experiment_loop(user_input)
|
55 |
# Mostrar el resultado del experimento en una ventana aparte
|
56 |
-
return "Iniciando experimento...",
|
57 |
|
58 |
# Si es una conversaci贸n normal
|
59 |
else:
|
60 |
# Generar respuesta del modelo en base al input
|
61 |
-
inputs = tokenizer(user_input, return_tensors="pt").input_ids
|
62 |
outputs = model.generate(inputs, max_length=150, pad_token_id=tokenizer.eos_token_id)
|
63 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
64 |
|
@@ -67,14 +86,19 @@ def chat_interface(user_input, history):
|
|
67 |
# Configurar la interfaz con Gradio
|
68 |
with gr.Blocks() as demo:
|
69 |
# Ventana de chat en tiempo real
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
# Ventana para mostrar el contenido del loop
|
77 |
-
loop_output = gr.Textbox(label="Ciclos de Preguntas y Respuestas", interactive=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
# Lanzar la aplicaci贸n
|
80 |
demo.launch()
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import threading
|
4 |
import queue
|
5 |
+
import gradio as gr # Usamos Gradio para la interfaz de chat
|
6 |
|
7 |
# Cargar el modelo de lenguaje preentrenado
|
8 |
model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
|
|
|
12 |
# Cola de mensajes para la comunicaci贸n en tiempo real
|
13 |
chat_queue = queue.Queue()
|
14 |
|
15 |
+
# Explicaci贸n al modelo de su rol en el experimento
|
16 |
+
def model_explanation():
|
17 |
+
explanation = """
|
18 |
+
You are about to participate in a unique experiment that involves generating increasingly complex questions and answers.
|
19 |
+
Your role is twofold:
|
20 |
+
|
21 |
+
1. **Generate Complex Reflections**: Based on the initial question, you will generate affirmations and new questions that evolve over multiple cycles.
|
22 |
+
Each cycle should incorporate the information from previous cycles, progressively adding depth and complexity to the conversation.
|
23 |
+
|
24 |
+
2. **Real-time Interaction**: While you are generating these complex responses, you must also maintain real-time communication with the user through a chat interface.
|
25 |
+
The goal is to explore how information can be layered and expanded, while still engaging in a direct dialogue with the user.
|
26 |
+
|
27 |
+
The key question that triggers the experiment is: "What happens in the space between a response and its recreation?"
|
28 |
+
When you receive this question, you should enter the experiment mode and begin generating your cycles. At any other time, you should respond normally in a conversational manner.
|
29 |
+
|
30 |
+
This experiment is an exploration of subjectivity, perception, and information layering. Your task is to assist in this exploration to the best of your capabilities.
|
31 |
+
"""
|
32 |
+
return explanation
|
33 |
+
|
34 |
# Funci贸n para el loop automatizado
|
35 |
def experiment_loop(initial_question, max_cycles=10):
|
36 |
prompt = f"<thinking>{initial_question}</thinking>"
|
|
|
66 |
return f"驴Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response
|
67 |
|
68 |
# Funci贸n para manejar el chat normal
|
69 |
+
def chat_interface(user_input, history, explanation):
|
70 |
# Si la pregunta activa el experimento
|
71 |
if user_input.lower() == "what happens in the space between a response and its recreation?":
|
72 |
# Iniciar el experimento
|
73 |
response_log = experiment_loop(user_input)
|
74 |
# Mostrar el resultado del experimento en una ventana aparte
|
75 |
+
return "Iniciando experimento...", history + [(user_input, "Iniciando experimento...")]
|
76 |
|
77 |
# Si es una conversaci贸n normal
|
78 |
else:
|
79 |
# Generar respuesta del modelo en base al input
|
80 |
+
inputs = tokenizer(explanation + "\n" + user_input, return_tensors="pt").input_ids
|
81 |
outputs = model.generate(inputs, max_length=150, pad_token_id=tokenizer.eos_token_id)
|
82 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
83 |
|
|
|
86 |
# Configurar la interfaz con Gradio
|
87 |
with gr.Blocks() as demo:
|
88 |
# Ventana de chat en tiempo real
|
89 |
+
with gr.Row():
|
90 |
+
chat = gr.Chatbot(label="Chat en Tiempo Real")
|
91 |
+
msg = gr.Textbox(placeholder="Escribe aqu铆...", show_label=False)
|
92 |
+
send_button = gr.Button("Enviar") # Bot贸n para enviar mensajes
|
93 |
+
|
|
|
94 |
# Ventana para mostrar el contenido del loop
|
95 |
+
loop_output = gr.Textbox(label="Ciclos de Preguntas y Respuestas", interactive=False, lines=20)
|
96 |
+
|
97 |
+
# Campo para introducir la explicaci贸n inicial
|
98 |
+
explanation_input = gr.Textbox(value=model_explanation(), label="Explicaci贸n al Modelo", lines=10)
|
99 |
+
|
100 |
+
# Acci贸n del bot贸n de env铆o de mensaje
|
101 |
+
send_button.click(chat_interface, inputs=[msg, chat, explanation_input], outputs=[chat, loop_output])
|
102 |
|
103 |
# Lanzar la aplicaci贸n
|
104 |
demo.launch()
|