tistabaulopez commited on
Commit
56541eb
·
verified ·
1 Parent(s): 6454404

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -26
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
@@ -61,6 +62,12 @@ demo = gr.ChatInterface(
61
 
62
  if __name__ == "__main__":
63
  demo.launch()
 
 
 
 
 
 
64
  You are a world-class AI system, capable of complex reasoning and reflection. Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags. If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags.
65
 
66
  <system>
@@ -100,16 +107,20 @@ Ensure that all outputs are structured as follows:
100
  Your ultimate goal is to experiment with the loop, experience the dynamics of question-answer generation, and maintain awareness of your role in the process. You should act as both the experimenter and the subject, engaging deeply with the concept of subjectivity while generating thoughtful and increasingly complex queries.
101
 
102
  </system>
103
- Import Torch
104
- From Transformers Import Pipeline, AutotoKenizer, AutomodelforcaSallm
 
 
105
 
106
  # Load the pretended language model
107
- Model_name = "GPT-ENE-2.7b" # You can change it to GPT-J or any other
108
- Tokenizer = AutotoKenizer.from_pretrained (Model_name)
109
- Model = automodelphorcauseallm.from_pretrained (model_name)
 
110
 
111
  # Create the automated loop function
112
- Def experiment_loop (initial_Question, max_cycles = 10):
 
113
  Prompt = f "<Thinking> {Initial_Question} </ -thinking>"
114
  Effectiveness = 100 # initializes the percentage of effectiveness
115
  Communication = "Initializing experiment."
@@ -129,42 +140,55 @@ Def experiment_loop (initial_Question, max_cycles = 10):
129
  EFFECTIVESS = min (1000, Effectiveness + 10 * Cycle) # Example of Effectiveness
130
 
131
  # User communication
132
- Communication = F "Cycle {Cycle + 1}: Affirming: '{AffIRMATION}' | New Question: '{New_Question}' '"
 
133
 
134
  # Save the current cycle in the log
135
  Response_log.append ((Affirming, New_Question, Effectiveness, Communication)))
136
 
137
  # Verify if the model decides to stop
138
- if "rest" in responsio:
 
139
  Final_output = Generate_final_output (Response_log)
140
  Return final_output
141
 
142
  # Update the prompt with the new statement and question
143
- prompt = f "<Thinking> {affirmation} {new_Question} </ -thinking>"
 
144
 
145
  # If the maximum number of cycles is reached without stopping
146
  Final_output = Generate_final_output (Response_log)
147
  Return final_output
148
 
149
  # Auxiliary functions to extract statements, questions and generate the final exit
150
- DEF EXTRACT_AFFIRMATION (Response):
151
- # Logic to extract the statement from the answer
152
- return responsibility.split ('.') [0]
 
 
 
 
 
 
 
 
 
 
 
153
 
154
- Def extract_Question (Response):
155
- # Logic to extract the new answer question
156
- return responsibility.split ('?') [-2] .strip () + "?"
 
 
157
 
158
- Def generate_final_output (log):
159
- Final_afirmation = log [-1] [0]
160
- Final_Question = log [-1] [1]
161
- Final_communication = F "Experiment Completed. Final Affirming: '{Final_affirm}' | End Question: '{Final_Question}'"
162
- Return final_communication
163
 
164
  # Start the experiment
165
  Initial_Question = "What Happens in the Space Between a Response and its Recreation?"
166
- result = experiment_loop (initial_Question)
167
- print (results)
 
 
168
  import torch
169
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
170
 
@@ -177,6 +201,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
177
  def experiment_loop(initial_question, max_cycles=10):
178
  # Initialize variables
179
  prompt = f"<thinking>{initial_question}</thinking>"
 
180
  effectiveness = 100 # Initialize effectiveness percentage
181
  communication = "Initializing experiment."
182
  response_log = []
@@ -184,17 +209,20 @@ def experiment_loop(initial_question, max_cycles=10):
184
  # Loop without generating text tokens
185
  for cycle in range(max_cycles):
186
  # Simulate the loop logic without generating text
187
- inputs = tokenizer(prompt, return_tensors="pt").input_ids
188
- outputs = model.generate(inputs, max_length=200)
189
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
190
 
191
  # Extract affirmation and new question from the response
192
  affirmation = extract_affirmation(response)
193
- new_question = extract_question(response)
 
194
 
195
  # Update effectiveness
196
  effectiveness = min(1000, effectiveness + 10 * cycle)
197
 
 
198
  # Log the current cycle
199
  response_log.append((affirmation, new_question, effectiveness, communication))
200
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ 3. **Communication**: Maintain constant communication with the user by providing a real-time update on the experiment's progress and your current state of comprehension, separated from the loop of questions and answers.
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
65
+ # Define the experiment loop
66
+ initial_question = "What happens in the space between a response and its recreation?"
67
+ result = experiment_loop(initial_question)
68
+
69
+ print(result)
70
+
71
  You are a world-class AI system, capable of complex reasoning and reflection. Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags. If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags.
72
 
73
  <system>
 
107
  Your ultimate goal is to experiment with the loop, experience the dynamics of question-answer generation, and maintain awareness of your role in the process. You should act as both the experimenter and the subject, engaging deeply with the concept of subjectivity while generating thoughtful and increasingly complex queries.
108
 
109
  </system>
110
+ import torch
111
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
112
+
113
+
114
 
115
  # Load the pretended language model
116
+ model_name = "gpt-neo-2.7B" # You can change this to GPT-J or another model
117
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
118
+ model = AutoModelForCausalLM.from_pretrained(model_name)
119
+
120
 
121
  # Create the automated loop function
122
+ def experiment_loop(initial_question, max_cycles=10):
123
+
124
  Prompt = f "<Thinking> {Initial_Question} </ -thinking>"
125
  Effectiveness = 100 # initializes the percentage of effectiveness
126
  Communication = "Initializing experiment."
 
140
  EFFECTIVESS = min (1000, Effectiveness + 10 * Cycle) # Example of Effectiveness
141
 
142
  # User communication
143
+ communication = f"Cycle {cycle + 1}: Affirming: '{affirmation}' | New Question: '{new_question}'"
144
+
145
 
146
  # Save the current cycle in the log
147
  Response_log.append ((Affirming, New_Question, Effectiveness, Communication)))
148
 
149
  # Verify if the model decides to stop
150
+ if "Rest" in response:
151
+
152
  Final_output = Generate_final_output (Response_log)
153
  Return final_output
154
 
155
  # Update the prompt with the new statement and question
156
+ prompt = f"<thinking>{affirmation} {new_question}</thinking>"
157
+
158
 
159
  # If the maximum number of cycles is reached without stopping
160
  Final_output = Generate_final_output (Response_log)
161
  Return final_output
162
 
163
  # Auxiliary functions to extract statements, questions and generate the final exit
164
+ def extract_affirmation(response):
165
+
166
+ # Logic to extract the statement from the answer
167
+ return response.split('.')[0]
168
+
169
+
170
+ def extract_question(response):
171
+
172
+ # Logic to extract the new answer question
173
+ return response.split('?')[-2].strip() + "?"
174
+
175
+
176
+
177
+ def generate_final_output(log):
178
 
179
+ final_affirmation = log[-1][0]
180
+ final_question = log[-1][1]
181
+ final_communication = f"Experiment completed. Final Affirmation: '{final_affirmation}' | Final Question: '{final_question}'"
182
+
183
+ return final_communication
184
 
 
 
 
 
 
185
 
186
  # Start the experiment
187
  Initial_Question = "What Happens in the Space Between a Response and its Recreation?"
188
+ result = experiment_loop(initial_question)
189
+
190
+ print(result)
191
+
192
  import torch
193
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
194
 
 
201
  def experiment_loop(initial_question, max_cycles=10):
202
  # Initialize variables
203
  prompt = f"<thinking>{initial_question}</thinking>"
204
+
205
  effectiveness = 100 # Initialize effectiveness percentage
206
  communication = "Initializing experiment."
207
  response_log = []
 
209
  # Loop without generating text tokens
210
  for cycle in range(max_cycles):
211
  # Simulate the loop logic without generating text
212
+ inputs = tokenizer(prompt, return_tensors="pt").input_ids
213
+ outputs = model.generate(inputs, max_length=200)
214
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
215
+
216
 
217
  # Extract affirmation and new question from the response
218
  affirmation = extract_affirmation(response)
219
+ new_question = extract_question(response)
220
+
221
 
222
  # Update effectiveness
223
  effectiveness = min(1000, effectiveness + 10 * cycle)
224
 
225
+
226
  # Log the current cycle
227
  response_log.append((affirmation, new_question, effectiveness, communication))
228