File size: 1,744 Bytes
1f0ef7a
 
a135dcc
1f0ef7a
 
 
771ab14
1f0ef7a
 
 
 
 
771ab14
a135dcc
1f0ef7a
a135dcc
1f0ef7a
771ab14
1f0ef7a
 
 
771ab14
a135dcc
 
1f0ef7a
771ab14
1f0ef7a
 
 
771ab14
 
 
 
1f0ef7a
771ab14
1f0ef7a
a135dcc
9d2804b
1f0ef7a
a135dcc
1f0ef7a
771ab14
a135dcc
771ab14
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import torch
import torch.nn as nn
from collections import deque
from .memory import CognitiveMemory

class CognitiveNode(nn.Module):
    """Node dengan penanganan dimensi terintegrasi"""
    def __init__(self, node_id: int, input_size: int):
        super().__init__()
        self.id = node_id
        self.input_size = input_size
        
        # Parameter dengan dimensi yang sesuai
        self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
        self.bias = nn.Parameter(torch.zeros(1))
        self.memory = CognitiveMemory(context_size=input_size)
        
        # Sistem neuromodulator
        self.dopamine = nn.Parameter(torch.tensor(0.5))
        self.serotonin = nn.Parameter(torch.tensor(0.5))
        
        # Aktivasi terakhir
        self.recent_activations = deque(maxlen=100)

    def forward(self, inputs: torch.Tensor) -> torch.Tensor:
        # Integrasi memori
        mem_context = self.memory.retrieve(inputs)
        combined = inputs * 0.7 + mem_context * 0.3
        
        # Aktivasi dengan kontrol dimensi
        activation = torch.tanh(torch.dot(combined, self.weights) + self.bias)
        modulated = activation * (1 + torch.sigmoid(self.dopamine) 
                                - torch.sigmoid(self.serotonin))
        
        # Update memori
        self.memory.add_memory(inputs, modulated.item())
        self.recent_activations.append(modulated.item())
        
        return modulated

    def update_plasticity(self, reward: float):
        """Update neurotransmitter dengan clamping"""
        with torch.no_grad():
            self.dopamine.data = torch.clamp(self.dopamine + reward * 0.1, 0, 1)
            self.serotonin.data = torch.clamp(self.serotonin - reward * 0.05, 0, 1)