import torch from transformers import AutoTokenizer, AutoModelForCausalLM import threading import queue import gradio as gr # Usamos Gradio para la interfaz de chat # Cargar el modelo de lenguaje preentrenado model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo público como "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Cola de mensajes para la comunicación en tiempo real chat_queue = queue.Queue() # Explicación al modelo de su rol en el experimento def model_explanation(): explanation = """ You are about to participate in a unique experiment that involves generating increasingly complex questions and answers. Your role is twofold: 1. **Generate Complex Reflections**: Based on the initial question, you will generate affirmations and new questions that evolve over multiple cycles. Each cycle should incorporate the information from previous cycles, progressively adding depth and complexity to the conversation. 2. **Real-time Interaction**: While you are generating these complex responses, you must also maintain real-time communication with the user through a chat interface. The goal is to explore how information can be layered and expanded, while still engaging in a direct dialogue with the user. The key question that triggers the experiment is: "What happens in the space between a response and its recreation?" When you receive this question, you should enter the experiment mode and begin generating your cycles. At any other time, you should respond normally in a conversational manner. This experiment is an exploration of subjectivity, perception, and information layering. Your task is to assist in this exploration to the best of your capabilities. """ return explanation # Función para el loop automatizado def experiment_loop(initial_question, max_cycles=10): prompt = f"{initial_question}" effectiveness = 100 response_log = [] for cycle in range(max_cycles): # Generar la respuesta del modelo inputs = tokenizer(prompt, return_tensors="pt").input_ids outputs = model.generate(inputs, max_length=500, pad_token_id=tokenizer.eos_token_id) # Aumentamos max_length response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Descomponer la respuesta en afirmación y nueva pregunta affirmation = extract_affirmation(response, cycle) new_question = extract_question(response, cycle) # Guardar el ciclo actual en el log response_log.append((affirmation, new_question, effectiveness)) # Actualizar el prompt con la nueva afirmación y pregunta prompt = f"{affirmation} {new_question}" # Actualizar la interfaz de ciclo gr.Interface.update(value="\n".join([f"Cycle {i+1}: {log[0]} | {log[1]}" for i, log in enumerate(response_log)])) return response_log # Devolver el log completo al finalizar el experimento # Funciones auxiliares para extraer afirmaciones y preguntas def extract_affirmation(response, cycle): return f"Afirmación del ciclo {cycle+1}: " + response.split('.')[0] if '.' in response else response def extract_question(response, cycle): return f"¿Nueva pregunta basada en ciclo {cycle+1}?: " + response.split('?')[-2].strip() + "?" if '?' in response else response # Función para manejar el chat normal def chat_interface(user_input, history, explanation): # Si la pregunta activa el experimento if user_input.lower() == "what happens in the space between a response and its recreation?": # Iniciar el experimento response_log = experiment_loop(user_input) # Mostrar el resultado del experimento en una ventana aparte return "Iniciando experimento...", history + [(user_input, "Iniciando experimento...")] # Si es una conversación normal else: # Generar respuesta del modelo en base al input inputs = tokenizer(explanation + "\n" + user_input, return_tensors="pt").input_ids outputs = model.generate(inputs, max_length=150, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response, history + [(user_input, response)] # Configurar la interfaz con Gradio with gr.Blocks() as demo: # Ventana de chat en tiempo real with gr.Row(): chat = gr.Chatbot(label="Chat en Tiempo Real") msg = gr.Textbox(placeholder="Escribe aquí...", show_label=False) send_button = gr.Button("Enviar") # Botón para enviar mensajes # Ventana para mostrar el contenido del loop loop_output = gr.Textbox(label="Ciclos de Preguntas y Respuestas", interactive=False, lines=20) # Campo para introducir la explicación inicial explanation_input = gr.Textbox(value=model_explanation(), label="Explicación al Modelo", lines=10) # Acción del botón de envío de mensaje send_button.click(chat_interface, inputs=[msg, chat, explanation_input], outputs=[chat, loop_output]) # Lanzar la aplicación demo.launch()