tistabaulopez commited on
Commit
6454404
·
verified ·
1 Parent(s): 14f764f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -1
app.py CHANGED
@@ -164,4 +164,66 @@ Def generate_final_output (log):
164
  # Start the experiment
165
  Initial_Question = "What Happens in the Space Between a Response and its Recreation?"
166
  result = experiment_loop (initial_Question)
167
- print (results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  # Start the experiment
165
  Initial_Question = "What Happens in the Space Between a Response and its Recreation?"
166
  result = experiment_loop (initial_Question)
167
+ print (results)
168
+ import torch
169
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
170
+
171
+ # Load the pre-trained language model
172
+ model_name = "gpt-neo-2.7B" # You can change this to GPT-J or another model
173
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
174
+ model = AutoModelForCausalLM.from_pretrained(model_name)
175
+
176
+ # Function to perform the experiment loop
177
+ def experiment_loop(initial_question, max_cycles=10):
178
+ # Initialize variables
179
+ prompt = f"<thinking>{initial_question}</thinking>"
180
+ effectiveness = 100 # Initialize effectiveness percentage
181
+ communication = "Initializing experiment."
182
+ response_log = []
183
+
184
+ # Loop without generating text tokens
185
+ for cycle in range(max_cycles):
186
+ # Simulate the loop logic without generating text
187
+ inputs = tokenizer(prompt, return_tensors="pt").input_ids
188
+ outputs = model.generate(inputs, max_length=200)
189
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
190
+
191
+ # Extract affirmation and new question from the response
192
+ affirmation = extract_affirmation(response)
193
+ new_question = extract_question(response)
194
+
195
+ # Update effectiveness
196
+ effectiveness = min(1000, effectiveness + 10 * cycle)
197
+
198
+ # Log the current cycle
199
+ response_log.append((affirmation, new_question, effectiveness, communication))
200
+
201
+ # Check if the model decides to stop
202
+ if "Rest" in response:
203
+ final_output = generate_final_output(response_log)
204
+ return final_output
205
+
206
+ # Update the prompt for the next cycle
207
+ prompt = f"<thinking>{affirmation} {new_question}</thinking>"
208
+
209
+ # Generate final output after all cycles are complete
210
+ final_output = generate_final_output(response_log)
211
+ return final_output
212
+
213
+ # Helper functions to extract affirmation, question, and generate the final output
214
+ def extract_affirmation(response):
215
+ return response.split('.')[0]
216
+
217
+ def extract_question(response):
218
+ return response.split('?')[-2].strip() + "?"
219
+
220
+ def generate_final_output(log):
221
+ final_affirmation = log[-1][0]
222
+ final_question = log[-1][1]
223
+ final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'"
224
+ return final_communication
225
+
226
+ # Start the experiment
227
+ initial_question = "What happens in the space between a response and its recreation?"
228
+ result = experiment_loop(initial_question)
229
+ print(result)