Aluode commited on
Commit
39b7cf5
·
verified ·
1 Parent(s): 8eb4748

Upload appV2.py

Browse files
Files changed (1) hide show
  1. appV2.py +730 -0
appV2.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+ import re
6
+ import json
7
+ import gradio as gr
8
+ import networkx as nx
9
+ import matplotlib.pyplot as plt
10
+ from mpl_toolkits.mplot3d import Axes3D
11
+ import io
12
+ import time
13
+ from PIL import Image # Added for image handling
14
+ import asyncio
15
+ import aiohttp
16
+ from tqdm import tqdm # For progress visualization
17
+
18
+ # Helper functions for serialization
19
+ def convert_ndarray_to_list(obj):
20
+ """
21
+ Recursively convert all ndarray objects in a nested structure to lists.
22
+ """
23
+ if isinstance(obj, dict):
24
+ return {k: convert_ndarray_to_list(v) for k, v in obj.items()}
25
+ elif isinstance(obj, list):
26
+ return [convert_ndarray_to_list(item) for item in obj]
27
+ elif isinstance(obj, np.ndarray):
28
+ return obj.tolist()
29
+ else:
30
+ return obj
31
+
32
+ def convert_list_to_ndarray(obj):
33
+ """
34
+ Recursively convert all lists in a nested structure back to ndarrays where appropriate.
35
+ """
36
+ if isinstance(obj, dict):
37
+ return {k: convert_list_to_ndarray(v) for k, v in obj.items()}
38
+ elif isinstance(obj, list):
39
+ # Attempt to convert lists of numbers back to ndarrays
40
+ try:
41
+ return np.array(obj)
42
+ except:
43
+ return [convert_list_to_ndarray(item) for item in obj]
44
+ else:
45
+ return obj
46
+
47
+ class FractalNeuron:
48
+ def __init__(self, word, position):
49
+ """
50
+ Initialize a neuron with a given word and position in the space.
51
+ """
52
+ self.word = word
53
+ self.position = position
54
+ self.connections = {} # Connections to other neurons {word: neuron}
55
+ self.activation = np.random.uniform(-0.1, 0.1) # Random initial activation
56
+ self.bias = np.random.uniform(-0.1, 0.1) # Random bias
57
+ self.gradient = 0.0
58
+ self.weights = {} # Weights of connections {word: weight}
59
+ self.time_step = 0.01 # Small step size for Euler's method
60
+ self.gradients = {} # Gradients for each connection
61
+
62
+ def activate(self, input_signal):
63
+ """
64
+ Update the neuron's activation based on the input signal.
65
+ """
66
+ # Ensure input_signal is a scalar
67
+ if isinstance(input_signal, np.ndarray):
68
+ input_signal = np.mean(input_signal)
69
+
70
+ # Update activation using activation function with bias
71
+ self.activation = np.tanh(input_signal + self.bias)
72
+
73
+ # Ensure activation remains a scalar float
74
+ if isinstance(self.activation, np.ndarray):
75
+ self.activation = float(np.mean(self.activation))
76
+
77
+ # Debugging
78
+ print(f"Neuron '{self.word}' activation after update: {self.activation}")
79
+
80
+ def connect(self, other_neuron, weight):
81
+ """
82
+ Establish a connection to another neuron with a specified weight.
83
+ """
84
+ self.connections[other_neuron.word] = other_neuron
85
+ self.weights[other_neuron.word] = weight
86
+
87
+
88
+ class AdamOptimizer:
89
+ def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, weight_decay=0.0001):
90
+ self.lr = learning_rate
91
+ self.beta1 = beta1
92
+ self.beta2 = beta2
93
+ self.epsilon = epsilon
94
+ self.weight_decay = weight_decay
95
+ self.m = {}
96
+ self.v = {}
97
+ self.t = 0
98
+
99
+ def update(self, network):
100
+ """
101
+ Update the network's weights using Adam optimization.
102
+ """
103
+ self.t += 1
104
+ for word, neuron in network.neurons.items():
105
+ for connected_word, weight in neuron.weights.items():
106
+ grad = neuron.gradients.get(connected_word, 0.0) + self.weight_decay * weight
107
+ if word not in self.m:
108
+ self.m[word] = {}
109
+ if connected_word not in self.m[word]:
110
+ self.m[word][connected_word] = 0.0
111
+ if word not in self.v:
112
+ self.v[word] = {}
113
+ if connected_word not in self.v[word]:
114
+ self.v[word][connected_word] = 0.0
115
+ # Update biased first moment estimate
116
+ self.m[word][connected_word] = self.beta1 * self.m[word][connected_word] + (1 - self.beta1) * grad
117
+ # Update biased second raw moment estimate
118
+ self.v[word][connected_word] = self.beta2 * self.v[word][connected_word] + (1 - self.beta2) * (grad ** 2)
119
+ # Compute bias-corrected first moment estimate
120
+ m_hat = self.m[word][connected_word] / (1 - self.beta1 ** self.t)
121
+ # Compute bias-corrected second raw moment estimate
122
+ v_hat = self.v[word][connected_word] / (1 - self.beta2 ** self.t)
123
+ # Update weights
124
+ update = self.lr * m_hat / (np.sqrt(v_hat) + self.epsilon)
125
+ neuron.weights[connected_word] += update
126
+
127
+
128
+ class FractalNeuralNetwork:
129
+ def __init__(self, space_size=10, seed=None):
130
+ """
131
+ Initialize the Fractal Neural Network.
132
+ """
133
+ self.neurons = {}
134
+ self.space_size = space_size
135
+ self.learning_rate = 0.001
136
+ self.beta1 = 0.9
137
+ self.beta2 = 0.999
138
+ self.epsilon = 1e-8
139
+ self.m = {} # First moment vector (mean) for Adam optimizer
140
+ self.v = {} # Second moment vector (variance) for Adam optimizer
141
+ self.t = 0 # Timestep for Adam optimizer
142
+ self.rng = np.random.default_rng(seed)
143
+ self.optimizer = AdamOptimizer(learning_rate=self.learning_rate, beta1=self.beta1,
144
+ beta2=self.beta2, epsilon=self.epsilon, weight_decay=0.0001)
145
+
146
+ def tokenize_text(self, text):
147
+ # Convert to lowercase and split on whitespace
148
+ tokens = text.lower().split()
149
+ # Optional: Remove any remaining punctuation
150
+ tokens = [token.strip('.,!?:;()[]{}') for token in tokens]
151
+ # Remove any empty tokens
152
+ tokens = [token for token in tokens if token]
153
+ return tokens
154
+
155
+ def add_word(self, word):
156
+ """
157
+ Add a word as a neuron to the network if it doesn't already exist.
158
+ """
159
+ if word not in self.neurons:
160
+ position = self.rng.random(3) * self.space_size
161
+ self.neurons[word] = FractalNeuron(word, position)
162
+ return f"Added word: '{word}'."
163
+ else:
164
+ return f"Word '{word}' already exists in the network."
165
+
166
+ def connect_words(self, word1, word2):
167
+ """
168
+ Connect two words in the network with a randomly initialized weight.
169
+ """
170
+ if word1 not in self.neurons:
171
+ return f"Word '{word1}' does not exist in the network."
172
+ if word2 not in self.neurons:
173
+ return f"Word '{word2}' does not exist in the network."
174
+ weight = self.rng.normal()
175
+ self.neurons[word1].connect(self.neurons[word2], weight)
176
+ # Initialize optimizer moments for the new connection
177
+ if word1 not in self.optimizer.m:
178
+ self.optimizer.m[word1] = {}
179
+ if word2 not in self.optimizer.m[word1]:
180
+ self.optimizer.m[word1][word2] = 0.0
181
+ if word1 not in self.optimizer.v:
182
+ self.optimizer.v[word1] = {}
183
+ if word2 not in self.optimizer.v[word1]:
184
+ self.optimizer.v[word1][word2] = 0.0
185
+ return f"Connected '{word1}' to '{word2}' with weight {weight:.4f}."
186
+
187
+ async def fetch_wikipedia_content_async(self, session, topic):
188
+ url = f"https://en.wikipedia.org/wiki/{topic.replace(' ', '_')}"
189
+ try:
190
+ async with session.get(url) as response:
191
+ if response.status == 200:
192
+ html = await response.text()
193
+ soup = BeautifulSoup(html, 'html.parser')
194
+ paragraphs = soup.find_all('p')
195
+ content = ' '.join([p.text for p in paragraphs])
196
+ return topic, content
197
+ else:
198
+ print(f"Failed to fetch {topic}: Status {response.status}")
199
+ return topic, None
200
+ except Exception as e:
201
+ print(f"Exception fetching {topic}: {e}")
202
+ return topic, None
203
+
204
+ async def learn_from_wikipedia_async(self, topics, concurrency=5):
205
+ """
206
+ Asynchronously learn from Wikipedia articles with controlled concurrency.
207
+ """
208
+ async with aiohttp.ClientSession() as session:
209
+ tasks = []
210
+ for topic in topics:
211
+ task = asyncio.ensure_future(self.fetch_wikipedia_content_async(session, topic))
212
+ tasks.append(task)
213
+ responses = await asyncio.gather(*tasks)
214
+
215
+ results = []
216
+ for topic, content in responses:
217
+ if content:
218
+ tokens = self.tokenize_text(content)
219
+ for token in tokens:
220
+ self.add_word(token)
221
+ for i in range(len(tokens) - 1):
222
+ self.connect_words(tokens[i], tokens[i + 1])
223
+ results.append(f"Learned from Wikipedia article: {topic}")
224
+ else:
225
+ results.append(f"Failed to fetch content for: {topic}")
226
+ return "\n".join(results)
227
+
228
+ def fetch_training_data(self, num_sequences=100, seq_length=5):
229
+ training_data = []
230
+ for _ in range(num_sequences):
231
+ if not self.neurons:
232
+ break
233
+ start_word = self.rng.choice(list(self.neurons.keys()))
234
+ url = f"https://api.datamuse.com/words?rel_trg={start_word}&max={seq_length*2}"
235
+ try:
236
+ response = requests.get(url)
237
+ response.raise_for_status()
238
+ related_words = response.json()
239
+ if not related_words:
240
+ continue
241
+ input_sequence = [start_word] + [self.tokenize_text(word['word'])[0] for word in related_words[:seq_length-1]]
242
+ target_sequence = [min(float(word['score']) / 100000, 1.0) for word in related_words[:seq_length]]
243
+ if len(input_sequence) == seq_length and len(target_sequence) == seq_length:
244
+ training_data.append((input_sequence, target_sequence))
245
+ except requests.RequestException as e:
246
+ print(f"Error fetching data for {start_word}: {e}")
247
+ return training_data
248
+
249
+ def backpropagate(self, input_sequence, target_sequence, optimizer, dropout_rate=0.2):
250
+ """
251
+ Perform backpropagation to update weights based on the error.
252
+ """
253
+ activations = self.forward_pass(input_sequence, dropout_rate)
254
+ if not activations or not target_sequence:
255
+ return 0.0 # Skip backpropagation for empty sequences
256
+
257
+ # Ensure activations and target_sequence have the same shape
258
+ min_length = min(len(activations), len(target_sequence))
259
+ activations = activations[:min_length]
260
+ target_sequence = target_sequence[:min_length]
261
+
262
+ # Debugging: Print activations and target_sequence
263
+ print(f"Activations: {activations}")
264
+ print(f"Target Sequence: {target_sequence}")
265
+
266
+ try:
267
+ # Ensure both are flat lists of floats
268
+ activations = [float(a) for a in activations]
269
+ target_sequence = [float(t) for t in target_sequence]
270
+ error = np.array(target_sequence, dtype=float) - np.array(activations, dtype=float)
271
+ except (ValueError, TypeError) as e:
272
+ print(f"Error computing error: {e}")
273
+ print(f"Activations: {activations}")
274
+ print(f"Target Sequence: {target_sequence}")
275
+ return 0.0 # Skip this backpropagation step due to data inconsistency
276
+
277
+ total_loss = 0.0
278
+
279
+ for i, word in enumerate(input_sequence[:min_length]):
280
+ if word in self.neurons:
281
+ neuron = self.neurons[word]
282
+ neuron.gradient = error[i] * (1 - neuron.activation ** 2)
283
+ for connected_word in neuron.connections:
284
+ connected_neuron = self.neurons[connected_word]
285
+ gradient = neuron.gradient * connected_neuron.activation
286
+ neuron.gradients[connected_word] = gradient
287
+ # Update weights using the optimizer
288
+ optimizer.update(self)
289
+ # Calculate loss
290
+ loss = np.mean(error ** 2)
291
+ return loss
292
+
293
+ def forward_pass(self, input_sequence, dropout_rate=0.2):
294
+ """
295
+ Perform a forward pass through the network with the given input sequence.
296
+ """
297
+ activations = []
298
+ for word in input_sequence:
299
+ if word in self.neurons:
300
+ neuron = self.neurons[word]
301
+ # Calculate input_signal as sum of activations * weights
302
+ input_signal = 0.0
303
+ for connected_word in neuron.connections:
304
+ connected_neuron = self.neurons[connected_word]
305
+ act = connected_neuron.activation
306
+ input_signal += act * neuron.weights.get(connected_word, 0)
307
+ neuron.activate(input_signal)
308
+ # Apply dropout (during training)
309
+ if random.random() < dropout_rate:
310
+ neuron.activation = 0.0
311
+ activations.append(neuron.activation)
312
+ else:
313
+ activations.append(0.0)
314
+ return activations
315
+
316
+ def attention(self, query, keys, values):
317
+ """
318
+ Compute attention weights and context vector.
319
+ """
320
+ attention_weights = np.dot(query, np.array(keys).T)
321
+ attention_weights = np.exp(attention_weights) / np.sum(np.exp(attention_weights))
322
+ context = np.dot(attention_weights, values)
323
+ return context, attention_weights
324
+
325
+ def generate_response(self, input_sequence, max_length=20, temperature=0.5):
326
+ """
327
+ Generate a response based on the input sequence.
328
+ """
329
+ response = []
330
+ context = self.forward_pass(input_sequence)
331
+ dropout_rate = 0.0 # No dropout during generation
332
+
333
+ for _ in range(max_length):
334
+ query = np.mean(context) if context else 0.0
335
+ keys = [n.activation for n in self.neurons.values()]
336
+ values = [n.position for n in self.neurons.values()]
337
+
338
+ if not keys or not values:
339
+ break # Prevent errors if there are no neurons
340
+
341
+ attended_context, _ = self.attention(query, keys, values)
342
+
343
+ # Calculate distances and convert to probabilities
344
+ distances = [np.linalg.norm(n.position - attended_context) for n in self.neurons.values()]
345
+ probabilities = np.exp(-np.array(distances) / temperature)
346
+ probabilities /= np.sum(probabilities)
347
+
348
+ # Sample word based on probabilities, avoiding repetition
349
+ try:
350
+ next_word = self.rng.choice(list(self.neurons.keys()), p=probabilities)
351
+ except ValueError as e:
352
+ print(f"Error in sampling next_word: {e}")
353
+ return "Unable to generate a response at this time."
354
+
355
+ if response and next_word == response[-1]:
356
+ continue # Avoid immediate repetition
357
+
358
+ response.append(next_word)
359
+ context = self.forward_pass(response[-3:], dropout_rate=dropout_rate) # Update context with recent words
360
+
361
+ if next_word in ['.', '!', '?']:
362
+ break
363
+
364
+ return ' '.join(response)
365
+
366
+ def train_with_api_data(self, num_sequences=100, seq_length=5, epochs=10, batch_size=32, learning_rate=0.001, dropout_rate=0.2, weight_decay=0.0001):
367
+ """
368
+ Train the network using data fetched from an API with adjustable parameters.
369
+ """
370
+ self.learning_rate = learning_rate # Update learning rate
371
+ self.optimizer.lr = learning_rate
372
+ self.optimizer.weight_decay = weight_decay
373
+ training_data = self.fetch_training_data(num_sequences, seq_length)
374
+ if not training_data:
375
+ return "No training data could be fetched. Please ensure the network has words and the API is accessible."
376
+ for epoch in range(epochs):
377
+ total_loss = 0
378
+ valid_sequences = 0
379
+ for i in range(0, len(training_data), batch_size):
380
+ batch = training_data[i:i+batch_size]
381
+ for input_sequence, target_sequence in batch:
382
+ if len(input_sequence) != len(target_sequence):
383
+ print(f"Skipping sequence due to length mismatch: {len(input_sequence)} != {len(target_sequence)}")
384
+ continue
385
+ loss = self.backpropagate(input_sequence, target_sequence, self.optimizer, dropout_rate)
386
+ total_loss += loss
387
+ valid_sequences += 1
388
+ average_loss = total_loss / valid_sequences if valid_sequences else 0
389
+ print(f"Epoch {epoch+1}/{epochs}, Average Loss: {average_loss:.6f}, Valid Sequences: {valid_sequences}")
390
+ return f"Training completed with {valid_sequences} valid sequences for {epochs} epochs"
391
+
392
+ async def initialize_with_wikipedia_topics(self, topics):
393
+ """
394
+ Initialize the network with a predefined list of Wikipedia topics.
395
+ """
396
+ results = await self.learn_from_wikipedia_async(topics, concurrency=5)
397
+ return results
398
+
399
+ def fetch_wikipedia_content(self, topic):
400
+ """
401
+ Fetch content from a Wikipedia article based on the topic.
402
+ """
403
+ url = f"https://en.wikipedia.org/wiki/{topic.replace(' ', '_')}"
404
+ try:
405
+ response = requests.get(url)
406
+ response.raise_for_status()
407
+ soup = BeautifulSoup(response.content, 'html.parser')
408
+ paragraphs = soup.find_all('p')
409
+ content = ' '.join([p.text for p in paragraphs])
410
+ return content
411
+ except requests.RequestException as e:
412
+ print(f"Error fetching {topic}: {e}")
413
+ return None
414
+
415
+ def learn_from_wikipedia(self, topic):
416
+ """
417
+ Learn from a Wikipedia article by tokenizing and adding tokens to the network.
418
+ """
419
+ content = self.fetch_wikipedia_content(topic)
420
+ if content:
421
+ tokens = self.tokenize_text(content)
422
+ for token in tokens:
423
+ self.add_word(token)
424
+ for i in range(len(tokens) - 1):
425
+ self.connect_words(tokens[i], tokens[i + 1])
426
+ return f"Learned from Wikipedia article: {topic}"
427
+ else:
428
+ return f"Failed to fetch content for: {topic}"
429
+
430
+ def save_state(self, filename):
431
+ """
432
+ Save the current state of the network to a JSON file.
433
+ """
434
+ state = {
435
+ 'neurons': {
436
+ word: {
437
+ 'position': neuron.position.tolist(),
438
+ 'connections': {w: weight for w, weight in neuron.weights.items()}
439
+ }
440
+ for word, neuron in self.neurons.items()
441
+ },
442
+ 'space_size': self.space_size,
443
+ 'learning_rate': self.learning_rate,
444
+ 'optimizer': {
445
+ 'm': convert_ndarray_to_list(self.optimizer.m),
446
+ 'v': convert_ndarray_to_list(self.optimizer.v),
447
+ 't': self.optimizer.t
448
+ },
449
+ 'rng_state': convert_ndarray_to_list(self.rng.bit_generator.state) # Convert ndarrays to lists
450
+ }
451
+ try:
452
+ with open(filename, 'w') as f:
453
+ json.dump(state, f, indent=4)
454
+ return f"State saved to {filename}"
455
+ except Exception as e:
456
+ return f"Failed to save state to {filename}: {e}"
457
+
458
+ @staticmethod
459
+ def load_state(filename):
460
+ """
461
+ Load the network state from a JSON file.
462
+ """
463
+ try:
464
+ with open(filename, 'r') as f:
465
+ state = json.load(f)
466
+ network = FractalNeuralNetwork(state['space_size'])
467
+ network.learning_rate = state['learning_rate']
468
+ # Restore optimizer state
469
+ network.optimizer.m = convert_list_to_ndarray(state['optimizer']['m'])
470
+ network.optimizer.v = convert_list_to_ndarray(state['optimizer']['v'])
471
+ network.optimizer.t = state['optimizer']['t']
472
+ # Restore RNG state by converting lists back to ndarrays
473
+ restored_rng_state = convert_list_to_ndarray(state['rng_state'])
474
+ network.rng.bit_generator.state = restored_rng_state
475
+ for word, data in state['neurons'].items():
476
+ network.add_word(word)
477
+ network.neurons[word].position = np.array(data['position'])
478
+ for connected_word, weight in data['connections'].items():
479
+ network.connect_words(word, connected_word)
480
+ network.neurons[word].weights[connected_word] = weight
481
+ return network
482
+ except Exception as e:
483
+ print(f"Failed to load state from {filename}: {e}")
484
+ return None
485
+
486
+ def visualize(self):
487
+ """
488
+ Visualize the network structure using a 3D plot.
489
+ Returns a PIL Image compatible with Gradio.
490
+ """
491
+ if not self.neurons:
492
+ return "The network is empty. Add words to visualize."
493
+
494
+ G = nx.Graph()
495
+ for word, neuron in self.neurons.items():
496
+ G.add_node(word, pos=neuron.position)
497
+ for word, neuron in self.neurons.items():
498
+ for connected_word in neuron.connections:
499
+ G.add_edge(word, connected_word)
500
+
501
+ fig = plt.figure(figsize=(10, 8))
502
+ ax = fig.add_subplot(111, projection='3d')
503
+
504
+ pos = nx.get_node_attributes(G, 'pos')
505
+
506
+ # Extract positions
507
+ xs = [pos[word][0] for word in G.nodes()]
508
+ ys = [pos[word][1] for word in G.nodes()]
509
+ zs = [pos[word][2] for word in G.nodes()]
510
+
511
+ # Draw nodes
512
+ ax.scatter(xs, ys, zs, c='r', s=20)
513
+
514
+ # Draw edges
515
+ for edge in G.edges():
516
+ x = [pos[edge[0]][0], pos[edge[1]][0]]
517
+ y = [pos[edge[0]][1], pos[edge[1]][1]]
518
+ z = [pos[edge[0]][2], pos[edge[1]][2]]
519
+ ax.plot(x, y, z, c='gray', alpha=0.5)
520
+
521
+ ax.set_xlim(0, self.space_size)
522
+ ax.set_ylim(0, self.space_size)
523
+ ax.set_zlim(0, self.space_size)
524
+ plt.title("Fractal Neural Network Visualization")
525
+
526
+ buf = io.BytesIO()
527
+ plt.savefig(buf, format='png')
528
+ plt.close()
529
+
530
+ buf.seek(0)
531
+ image = Image.open(buf)
532
+ return image
533
+
534
+ def chat(self, input_text, temperature=0.5):
535
+ """
536
+ Handle chat interactions by generating responses based on input text.
537
+ """
538
+ tokens = self.tokenize_text(input_text)
539
+ if not tokens:
540
+ return "I didn't understand that. Please try again."
541
+ response = self.generate_response(tokens, temperature=temperature)
542
+ # Optionally, train the network with the input and response to improve over time
543
+ # Here, we train with the input tokens and the response activations
544
+ response_tokens = self.tokenize_text(response)
545
+ self.train_with_api_data(
546
+ num_sequences=1,
547
+ seq_length=len(tokens),
548
+ epochs=1,
549
+ batch_size=1,
550
+ learning_rate=self.learning_rate
551
+ )
552
+ return response
553
+
554
+
555
+ def create_gradio_interface():
556
+ """
557
+ Create the Gradio interface for interacting with the Fractal Neural Network.
558
+ """
559
+ network = FractalNeuralNetwork(seed=42) # Set a seed for reproducibility
560
+
561
+ with gr.Blocks() as iface:
562
+ gr.Markdown("# 🧠 Fractal Neural Network Interface")
563
+ gr.Markdown("""
564
+ **⚠️ Warning:** Training the model with extensive data and high epochs will take a significant amount of time and computational resources. Please ensure your system is equipped to handle the training process.
565
+ """)
566
+
567
+ with gr.Tab("Initialize with Wikipedia Topics"):
568
+ gr.Markdown("### Initialize the Network with Comprehensive Wikipedia Topics")
569
+ gr.Markdown("""
570
+ **Instructions:**
571
+ - Enter a list of Wikipedia topics separated by commas.
572
+ - Example topics are pre-filled to guide you.
573
+ - Click **"Start Initialization"** to begin the process.
574
+ - **Note:** This may take several minutes depending on the number of topics and your internet connection.
575
+ """)
576
+
577
+ wiki_input = gr.Textbox(
578
+ label="Wikipedia Topics",
579
+ placeholder="Enter Wikipedia topics separated by commas...",
580
+ lines=5,
581
+ value="Artificial Intelligence, History of Computing, Biology, Physics, Chemistry, Mathematics, World History, Geography, Literature, Philosophy"
582
+ )
583
+ init_button = gr.Button("Start Initialization")
584
+ init_output = gr.Textbox(label="Initialization Output", interactive=False, lines=10)
585
+
586
+ async def handle_initialization(wiki_topics):
587
+ # Split the input string into a list of topics
588
+ topics = [topic.strip() for topic in wiki_topics.split(",") if topic.strip()]
589
+ if not topics:
590
+ return "Please enter at least one valid Wikipedia topic."
591
+ # Learn from the provided Wikipedia topics
592
+ result = await network.initialize_with_wikipedia_topics(topics)
593
+ # Save the state after initialization
594
+ save_result = network.save_state("fnn_state.json")
595
+ return f"{result}\n\n{save_result}"
596
+
597
+ init_button.click(fn=handle_initialization, inputs=wiki_input, outputs=init_output)
598
+
599
+ with gr.Tab("API Training"):
600
+ gr.Markdown("### Configure and Start API-Based Training")
601
+ gr.Markdown("""
602
+ **Instructions:**
603
+ - Adjust the training parameters below according to your requirements.
604
+ - Higher values will result in longer training times and increased computational load.
605
+ - Click **"Start Training"** to begin the API-based training process.
606
+ """)
607
+
608
+ with gr.Row():
609
+ num_sequences_input = gr.Number(label="Number of Sequences", value=50000, precision=0, step=1000)
610
+ seq_length_input = gr.Number(label="Sequence Length", value=15, precision=0, step=1)
611
+ with gr.Row():
612
+ epochs_input = gr.Number(label="Number of Epochs", value=100, precision=0, step=1)
613
+ batch_size_input = gr.Number(label="Batch Size", value=500, precision=0, step=50)
614
+ with gr.Row():
615
+ learning_rate_input = gr.Number(label="Learning Rate", value=0.0005, precision=5, step=0.0001)
616
+ train_button = gr.Button("Start Training")
617
+ train_output = gr.Textbox(label="Training Output", interactive=False, lines=10)
618
+
619
+ def handle_api_training(num_sequences, seq_length, epochs, batch_size, learning_rate):
620
+ if not network.neurons:
621
+ return "The network has no words. Please initialize it with Wikipedia topics first."
622
+ if num_sequences <= 0 or seq_length <= 0 or epochs <= 0 or batch_size <= 0 or learning_rate <= 0:
623
+ return "All training parameters must be positive numbers."
624
+ # Start training
625
+ result = network.train_with_api_data(
626
+ num_sequences=int(num_sequences),
627
+ seq_length=int(seq_length),
628
+ epochs=int(epochs),
629
+ batch_size=int(batch_size),
630
+ learning_rate=float(learning_rate)
631
+ )
632
+ # Save the state after training
633
+ save_result = network.save_state("fnn_state.json")
634
+ return f"{result}\n\n{save_result}"
635
+
636
+ train_button.click(
637
+ fn=handle_api_training,
638
+ inputs=[num_sequences_input, seq_length_input, epochs_input, batch_size_input, learning_rate_input],
639
+ outputs=train_output
640
+ )
641
+
642
+ with gr.Tab("Visualization"):
643
+ gr.Markdown("### Visualize the Fractal Neural Network")
644
+ gr.Markdown("""
645
+ **Instructions:**
646
+ - Click **"Visualize Network"** to generate a 3D visualization of the network's structure.
647
+ - Ensure the network has been initialized and trained before visualizing.
648
+ """)
649
+
650
+ visualize_button = gr.Button("Visualize Network")
651
+ visualize_image = gr.Image(label="Network Visualization")
652
+
653
+ def handle_visualize():
654
+ if not network.neurons:
655
+ return "The network is empty. Add words to visualize."
656
+ return network.visualize()
657
+
658
+ visualize_button.click(fn=handle_visualize, inputs=None, outputs=visualize_image)
659
+
660
+ with gr.Tab("Chat"):
661
+ gr.Markdown("### Interact with the Fractal Neural Network")
662
+ gr.Markdown("""
663
+ **Instructions:**
664
+ - Enter your message in the textbox below.
665
+ - Adjust the **Temperature** slider to control the randomness of the response.
666
+ - **Lower values (e.g., 0.2):** More deterministic and focused responses.
667
+ - **Higher values (e.g., 0.8):** More creative and varied responses.
668
+ - Click **"Chat"** to receive a generated response.
669
+ """)
670
+
671
+ with gr.Row():
672
+ chat_input = gr.Textbox(label="Your Message", placeholder="Type your message here...", lines=2)
673
+ chat_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.5, step=0.1, label="Temperature")
674
+ chat_button = gr.Button("Chat")
675
+ chat_output = gr.Textbox(label="Response", interactive=False, lines=2)
676
+
677
+ def handle_chat(input_text, temperature):
678
+ if not input_text.strip():
679
+ return "Please enter a message to chat."
680
+ response = network.chat(input_text, temperature=temperature)
681
+ return response
682
+
683
+ chat_button.click(fn=handle_chat, inputs=[chat_input, chat_temperature], outputs=chat_output)
684
+
685
+ with gr.Tab("State Management"):
686
+ gr.Markdown("### Save or Load the Network State")
687
+ gr.Markdown("""
688
+ **Instructions:**
689
+ - **Save State:** Enter a filename and click **"Save State"** to save the current network configuration.
690
+ - **Load State:** Enter a filename and click **"Load State"** to load a previously saved network configuration.
691
+ - Ensure that the filenames are correctly specified and that the files exist when loading.
692
+ """)
693
+
694
+ with gr.Row():
695
+ save_filename_input = gr.Textbox(label="Filename to Save State", value="fnn_state.json", placeholder="e.g., fnn_state.json")
696
+ save_button = gr.Button("Save State")
697
+ save_output = gr.Textbox(label="Save Output", interactive=False, lines=2)
698
+
699
+ def handle_save(filename):
700
+ if not filename.strip():
701
+ return "Please enter a valid filename."
702
+ result = network.save_state(filename)
703
+ return result
704
+
705
+ save_button.click(fn=handle_save, inputs=save_filename_input, outputs=save_output)
706
+
707
+ with gr.Row():
708
+ load_filename_input = gr.Textbox(label="Filename to Load State", value="fnn_state.json", placeholder="e.g., fnn_state.json")
709
+ load_button = gr.Button("Load State")
710
+ load_output = gr.Textbox(label="Load Output", interactive=False, lines=2)
711
+
712
+ def handle_load(filename):
713
+ if not filename.strip():
714
+ return "Please enter a valid filename."
715
+ loaded_network = FractalNeuralNetwork.load_state(filename)
716
+ if loaded_network:
717
+ nonlocal network
718
+ network = loaded_network
719
+ return f"Loaded state from {filename}."
720
+ else:
721
+ return f"Failed to load state from {filename}."
722
+
723
+ load_button.click(fn=handle_load, inputs=load_filename_input, outputs=load_output)
724
+
725
+ return iface
726
+
727
+
728
+ if __name__ == "__main__":
729
+ iface = create_gradio_interface()
730
+ iface.launch()