File size: 1,806 Bytes
1f0ef7a
 
a135dcc
1f0ef7a
 
 
7011a65
1f0ef7a
 
 
 
 
7011a65
a135dcc
1f0ef7a
a135dcc
1f0ef7a
771ab14
1f0ef7a
 
a135dcc
 
1f0ef7a
7011a65
 
 
771ab14
1f0ef7a
 
 
7011a65
771ab14
 
 
1f0ef7a
7011a65
1f0ef7a
a135dcc
9d2804b
7011a65
a135dcc
1f0ef7a
771ab14
a135dcc
771ab14
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
import torch.nn as nn
from collections import deque
from .memory import CognitiveMemory

class CognitiveNode(nn.Module):
    """Unit neuron dengan operasi tensor yang aman"""
    def __init__(self, node_id: int, input_size: int):
        super().__init__()
        self.id = node_id
        self.input_size = input_size
        
        # Parameter dengan dimensi sesuai input
        self.weights = nn.Parameter(torch.randn(input_size) * 0.1)
        self.bias = nn.Parameter(torch.zeros(1))
        self.memory = CognitiveMemory(context_size=input_size)
        
        # Sistem neuromodulator
        self.dopamine = nn.Parameter(torch.tensor(0.5))
        self.serotonin = nn.Parameter(torch.tensor(0.5))
        self.recent_activations = deque(maxlen=100)

    def forward(self, inputs: torch.Tensor) -> torch.Tensor:
        # Validasi dimensi input
        inputs = inputs.view(-1)
        
        # Integrasi memori
        mem_context = self.memory.retrieve(inputs)
        combined = inputs * 0.7 + mem_context * 0.3
        
        # Operasi linear yang aman
        activation = torch.tanh(torch.dot(combined, self.weights) + self.bias)
        modulated = activation * (1 + torch.sigmoid(self.dopamine) 
                                - torch.sigmoid(self.serotonin))
        
        # Update memori dengan scalar value
        self.memory.add_memory(inputs, modulated.item())
        self.recent_activations.append(modulated.item())
        
        return modulated.squeeze()

    def update_plasticity(self, reward: float):
        """Update neurotransmitter dengan clamping"""
        with torch.no_grad():
            self.dopamine.data = torch.clamp(self.dopamine + reward * 0.1, 0, 1)
            self.serotonin.data = torch.clamp(self.serotonin - reward * 0.05, 0, 1)