tistabaulopez commited on
Commit
98a7dfa
verified
1 Parent(s): ba39c92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py CHANGED
@@ -67,3 +67,78 @@ def generate_final_output(log):
67
  initial_question = "What happens in the space between a response and its recreation?"
68
  result = experiment_loop(initial_question)
69
  print(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  initial_question = "What happens in the space between a response and its recreation?"
68
  result = experiment_loop(initial_question)
69
  print(result)
70
+ import torch
71
+ from transformers import AutoTokenizer, AutoModelForCausalLM
72
+
73
+ # Cargar el modelo de lenguaje preentrenado
74
+ model_name = "EleutherAI/gpt-neo-2.7B" # O cualquier otro modelo p煤blico como "gpt2"
75
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
76
+ model = AutoModelForCausalLM.from_pretrained(model_name)
77
+
78
+ # Crear la funci贸n de loop automatizado con comunicaci贸n constante
79
+ def experiment_loop(initial_question, max_cycles=10):
80
+ prompt = f"<thinking>{initial_question}</thinking>"
81
+ effectiveness = 100 # Inicializa el porcentaje de efectividad
82
+ communication = "Initializing experiment."
83
+ response_log = []
84
+
85
+ try:
86
+ for cycle in range(max_cycles):
87
+ # Comunicaci贸n continua contigo durante el loop
88
+ print(f"Cycle {cycle + 1}: Processing...")
89
+
90
+ # Simulaci贸n de espera para permitir la interacci贸n
91
+ input_check = input("Would you like to communicate or check the current state? (yes/no): ")
92
+ if input_check.lower() == "yes":
93
+ print(f"Current state: Effectiveness = {effectiveness}, Communication = {communication}")
94
+
95
+ # Generar la respuesta del modelo
96
+ inputs = tokenizer(prompt, return_tensors="pt").input_ids
97
+ outputs = model.generate(inputs, max_length=200, pad_token_id=tokenizer.eos_token_id)
98
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
99
+
100
+ # Descomponer la respuesta en afirmaci贸n y nueva pregunta
101
+ affirmation = extract_affirmation(response)
102
+ new_question = extract_question(response)
103
+
104
+ # Actualizar el estado de la efectividad
105
+ effectiveness = min(1000, effectiveness + 10 * cycle) # Ejemplo de aumento de efectividad
106
+
107
+ # Comunicaci贸n con el usuario
108
+ communication = f"Cycle {cycle + 1}: Affirmation: '{affirmation}' | New Question: '{new_question}'"
109
+
110
+ # Guardar el ciclo actual en el log
111
+ response_log.append((affirmation, new_question, effectiveness, communication))
112
+
113
+ # Verificar si el modelo decide detenerse
114
+ if "Descanso" in response:
115
+ final_output = generate_final_output(response_log)
116
+ return final_output
117
+
118
+ # Actualizar el prompt con la nueva afirmaci贸n y pregunta
119
+ prompt = f"<thinking>{affirmation} {new_question}</thinking>"
120
+
121
+ except Exception as e:
122
+ print(f"Error durante el experimento: {e}")
123
+
124
+ # Si se alcanza el n煤mero m谩ximo de ciclos sin detenerse
125
+ final_output = generate_final_output(response_log)
126
+ return final_output
127
+
128
+ # Funciones auxiliares para extraer afirmaciones, preguntas y generar la salida final
129
+ def extract_affirmation(response):
130
+ return response.split('.')[0]
131
+
132
+ def extract_question(response):
133
+ return response.split('?')[-2].strip() + "?"
134
+
135
+ def generate_final_output(log):
136
+ final_affirmation = log[-1][0]
137
+ final_question = log[-1][1]
138
+ final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'"
139
+ return final_communication
140
+
141
+ # Iniciar el experimento
142
+ initial_question = "What happens in the space between a response and its recreation?"
143
+ result = experiment_loop(initial_question)
144
+ print(result)