from transformers import PretrainedConfig | |
class AutoEncoderConfig(PretrainedConfig): | |
model_type = "autoencoder" | |
def __init__( | |
self, | |
input_dim=None, | |
latent_dim=None, | |
layer_types=None, | |
dropout_rate=None, | |
num_layers=None, | |
compression_rate=None, | |
**kwargs | |
): | |
super().__init__(**kwargs) | |
self.input_dim = input_dim | |
self.latent_dim = latent_dim | |
self.layer_types = layer_types | |
self.dropout_rate = dropout_rate | |
self.num_layers = num_layers | |
self.compression_rate = compression_rate |