Create node.py
Browse files
node.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from .memory import CognitiveMemory
|
4 |
+
|
5 |
+
class CognitiveNode(nn.Module):
|
6 |
+
"""Differentiable cognitive node with dynamic plasticity"""
|
7 |
+
def __init__(self, node_id: int, input_size: int):
|
8 |
+
super().__init__()
|
9 |
+
self.id = node_id
|
10 |
+
self.input_size = input_size
|
11 |
+
self.activation = 0.0
|
12 |
+
|
13 |
+
# Dynamic input weights with Hebbian plasticity
|
14 |
+
self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
|
15 |
+
self.bias = nn.Parameter(torch.zeros(1))
|
16 |
+
|
17 |
+
# Memory system
|
18 |
+
self.memory = CognitiveMemory(context_size=input_size)
|
19 |
+
|
20 |
+
# Neurotransmitter levels
|
21 |
+
self.dopamine = nn.Parameter(torch.tensor(0.5))
|
22 |
+
self.serotonin = nn.Parameter(torch.tensor(0.5))
|
23 |
+
|
24 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
25 |
+
# Memory influence
|
26 |
+
mem_context = self.memory.retrieve(inputs)
|
27 |
+
|
28 |
+
# Combine inputs with memory context
|
29 |
+
combined = inputs * 0.7 + mem_context * 0.3
|
30 |
+
|
31 |
+
# Adaptive activation with neurotransmitter modulation
|
32 |
+
base_activation = torch.tanh(combined @ self.weights + self.bias)
|
33 |
+
modulated = base_activation * (1 + self.dopamine - self.serotonin)
|
34 |
+
|
35 |
+
# Update memory
|
36 |
+
self.memory.add_memory(inputs, modulated.item())
|
37 |
+
|
38 |
+
return modulated
|
39 |
+
|
40 |
+
def update_plasticity(self, reward: float):
|
41 |
+
"""Update neurotransmitter levels based on reward signal"""
|
42 |
+
self.dopamine.data = torch.sigmoid(self.dopamine + reward * 0.1)
|
43 |
+
self.serotonin.data = torch.sigmoid(self.serotonin - reward * 0.05)
|