File size: 3,338 Bytes
60b6dc7 d48e82c 60b6dc7 d48e82c 60b6dc7 d48e82c 60b6dc7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
from torch import nn
#from autoencoder_model.configuration_autoencoder import AutoEncoderConfig
from transformers import PretrainedConfig, PreTrainedModel
from transformers import PretrainedConfig
class AutoEncoderConfig(PretrainedConfig):
model_type = "autoencoder"
def __init__(
self,
input_dim=None,
latent_dim=None,
layer_types=None,
dropout_rate=None,
num_layers=None,
compression_rate=None,
**kwargs
):
super().__init__(**kwargs)
self.input_dim = input_dim
self.latent_dim = latent_dim
self.layer_types = layer_types
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.compression_rate = compression_rate
def create_layers(model_section, layer_types, input_dim, latent_dim, num_layers, dropout_rate, compression_rate):
layers = []
current_dim = input_dim
input_diamensions = []
output_diamensions = []
for _ in range(num_layers):
input_diamensions.append(current_dim)
next_dim = max(int(current_dim * compression_rate), latent_dim)
current_dim = next_dim
output_diamensions.append(current_dim)
output_diamensions[num_layers - 1] = latent_dim
if model_section == "decoder":
input_diamensions, output_diamensions = output_diamensions, input_diamensions
input_diamensions.reverse()
output_diamensions.reverse()
for idx, (input_dim, output_dim) in enumerate(zip(input_diamensions, output_diamensions)):
if layer_types == 'linear':
layers.append(nn.Linear(input_dim, output_dim))
elif layer_types == 'lstm':
# Assuming we are using LSTMs in a way that returns a sequence output
layers.append(nn.LSTM(input_dim, output_dim, batch_first=True))
elif layer_types == 'rnn':
# Assuming we are using LSTMs in a way that returns a sequence output
layers.append(nn.RNN(input_dim, output_dim, batch_first=True))
elif layer_types == 'gru':
# Assuming we are using LSTMs in a way that returns a sequence output
layers.append(nn.GRU(input_dim, output_dim, batch_first=True))
if (idx != num_layers - 1) & (dropout_rate != None):
layers.append(nn.Dropout(dropout_rate))
return nn.Sequential(*layers)
class AutoEncoder(PreTrainedModel):
config_class = AutoEncoderConfig
def __init__(self, config):
super(AutoEncoder, self).__init__(config)
self.encoder = create_layers("encoder",
config.layer_types, config.input_dim, config.latent_dim,
config.num_layers, config.dropout_rate, config.compression_rate
)
# Assuming symmetry between encoder and decoder
self.decoder = create_layers("decoder",
config.layer_types, config.input_dim, config.latent_dim,
config.num_layers, config.dropout_rate, config.compression_rate
)
def forward(self, x):
# Handle LSTM differently since it outputs (output, (h_n, c_n))
if config.layer_types == ['lstm', 'rnn', 'gru']:
x, _ = self.encoder(x)
x, _ = self.decoder(x)
else:
x = self.encoder(x)
x = self.decoder(x)
return x |