File size: 3,330 Bytes
1c3eb47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch
import torch.nn as nn

from mmpl.registry import MODELS
from mmengine.model import BaseModule


@MODELS.register_module()
class Sirens(BaseModule):
    def __init__(self,
                 in_channels,
                 out_channels=3,
                 base_channels=256,
                 num_inner_layers=2,
                 is_residual=True
                 ):
        super(Sirens, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.base_channels = base_channels
        self.num_inner_layers = num_inner_layers
        self.is_residual = is_residual

        self.first_coord = nn.Linear(in_channels, base_channels)
        self.inner_coords = nn.ModuleList(nn.Linear(base_channels, base_channels) for _ in range(self.num_inner_layers))
        self.last_coord = nn.Linear(base_channels, out_channels)

    def forward(self, x):
        x = self.first_coord(x)
        x = torch.sin(x)
        for idx in range(self.num_inner_layers):
            residual = x
            x = self.inner_coords[idx](x)
            if self.is_residual:
                x = x + residual
            x = torch.sin(x)
        x = self.last_coord(x)
        return x


@MODELS.register_module()
class ModulatedSirens(BaseModule):
    def __init__(self,
                 num_inner_layers,
                 in_dim,
                 modulation_dim,
                 out_dim=3,
                 base_channels=256,
                 is_residual=True
                 ):
        super(ModulatedSirens, self).__init__()
        self.in_dim = in_dim
        self.num_inner_layers = num_inner_layers
        self.is_residual = is_residual

        self.first_mod = nn.Sequential(
            nn.Conv2d(modulation_dim, base_channels, 1),
            nn.ReLU()
        )
        self.first_coord = nn.Conv2d(in_dim, base_channels, 1)

        self.inner_mods = nn.ModuleList()
        self.inner_coords = nn.ModuleList()
        for _ in range(self.num_inner_layers):
            self.inner_mods.append(
                nn.Sequential(
                    nn.Conv2d(modulation_dim+base_channels+base_channels, base_channels, 1),
                    nn.ReLU()
                )
            )
            self.inner_coords.append(
                    nn.Conv2d(base_channels, base_channels, 1)
            )
        self.last_coord = nn.Sequential(
            # nn.Conv2d(base_channels, base_channels//2, 1),
            # nn.ReLU(),
            nn.Conv2d(base_channels, out_dim, 1)
        )

    def forward(self, x, ori_modulations=None):
        modulations = self.first_mod(ori_modulations)
        x = self.first_coord(x)  # B 2 H W -> B C H W
        x = x + modulations
        x = torch.sin(x)
        for i_layer in range(self.num_inner_layers):
            modulations = self.inner_mods[i_layer](
                torch.cat((ori_modulations, modulations, x), dim=1))
            # modulations = self.inner_mods[i_layer](
            #     torch.cat((ori_modulations, x), dim=1))
            residual = self.inner_coords[i_layer](x)
            residual = residual + modulations
            residual = torch.sin(residual)
            if self.is_residual:
                x = x + residual
            else:
                x = residual
        x = self.last_coord(x)
        return x