Update node.py
Browse files
node.py
CHANGED
@@ -1,49 +1,45 @@
|
|
1 |
-
# cognitive_net/node.py
|
2 |
import torch
|
3 |
import torch.nn as nn
|
4 |
from collections import deque
|
5 |
from .memory import CognitiveMemory
|
6 |
|
7 |
class CognitiveNode(nn.Module):
|
8 |
-
"""
|
9 |
def __init__(self, node_id: int, input_size: int):
|
10 |
super().__init__()
|
11 |
self.id = node_id
|
12 |
self.input_size = input_size
|
13 |
|
14 |
-
#
|
15 |
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
|
16 |
self.bias = nn.Parameter(torch.zeros(1))
|
17 |
self.memory = CognitiveMemory(context_size=input_size)
|
18 |
|
19 |
-
#
|
20 |
self.dopamine = nn.Parameter(torch.tensor(0.5))
|
21 |
self.serotonin = nn.Parameter(torch.tensor(0.5))
|
22 |
|
23 |
-
#
|
24 |
self.recent_activations = deque(maxlen=100)
|
25 |
|
26 |
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
27 |
-
#
|
28 |
mem_context = self.memory.retrieve(inputs)
|
29 |
combined = inputs * 0.7 + mem_context * 0.3
|
30 |
|
31 |
-
#
|
32 |
-
|
33 |
-
modulated =
|
34 |
-
|
35 |
|
36 |
-
#
|
37 |
self.memory.add_memory(inputs, modulated.item())
|
38 |
self.recent_activations.append(modulated.item())
|
39 |
|
40 |
return modulated
|
41 |
|
42 |
def update_plasticity(self, reward: float):
|
43 |
-
"""
|
44 |
with torch.no_grad():
|
45 |
-
self.dopamine
|
46 |
-
self.serotonin
|
47 |
-
# Maintain neurotransmitter bounds
|
48 |
-
self.dopamine.clamp_(0, 1)
|
49 |
-
self.serotonin.clamp_(0, 1)
|
|
|
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
from collections import deque
|
4 |
from .memory import CognitiveMemory
|
5 |
|
6 |
class CognitiveNode(nn.Module):
|
7 |
+
"""Node dengan penanganan dimensi terintegrasi"""
|
8 |
def __init__(self, node_id: int, input_size: int):
|
9 |
super().__init__()
|
10 |
self.id = node_id
|
11 |
self.input_size = input_size
|
12 |
|
13 |
+
# Parameter dengan dimensi yang sesuai
|
14 |
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
|
15 |
self.bias = nn.Parameter(torch.zeros(1))
|
16 |
self.memory = CognitiveMemory(context_size=input_size)
|
17 |
|
18 |
+
# Sistem neuromodulator
|
19 |
self.dopamine = nn.Parameter(torch.tensor(0.5))
|
20 |
self.serotonin = nn.Parameter(torch.tensor(0.5))
|
21 |
|
22 |
+
# Aktivasi terakhir
|
23 |
self.recent_activations = deque(maxlen=100)
|
24 |
|
25 |
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
26 |
+
# Integrasi memori
|
27 |
mem_context = self.memory.retrieve(inputs)
|
28 |
combined = inputs * 0.7 + mem_context * 0.3
|
29 |
|
30 |
+
# Aktivasi dengan kontrol dimensi
|
31 |
+
activation = torch.tanh(torch.dot(combined, self.weights) + self.bias)
|
32 |
+
modulated = activation * (1 + torch.sigmoid(self.dopamine)
|
33 |
+
- torch.sigmoid(self.serotonin))
|
34 |
|
35 |
+
# Update memori
|
36 |
self.memory.add_memory(inputs, modulated.item())
|
37 |
self.recent_activations.append(modulated.item())
|
38 |
|
39 |
return modulated
|
40 |
|
41 |
def update_plasticity(self, reward: float):
|
42 |
+
"""Update neurotransmitter dengan clamping"""
|
43 |
with torch.no_grad():
|
44 |
+
self.dopamine.data = torch.clamp(self.dopamine + reward * 0.1, 0, 1)
|
45 |
+
self.serotonin.data = torch.clamp(self.serotonin - reward * 0.05, 0, 1)
|
|
|
|
|
|