from transformers import PretrainedConfig | |
class MapperConfig(PretrainedConfig): | |
model_type = "embedding_mapper" | |
def __init__(self, | |
d_in: int = 64, | |
d_hidden: int = 1024, | |
n_layers: int = 6, | |
d_out: int = 64, | |
n_out: int = 2, | |
dropout: float = 0.1, | |
layer_norm_eps: float = 1e-12, | |
**kwargs | |
): | |
self.d_in = d_in | |
self.d_hidden = d_hidden | |
self.n_layers = n_layers | |
self.d_out = d_out | |
self.n_out = n_out | |
self.dropout = dropout | |
self.layer_norm_eps = layer_norm_eps | |
super().__init__(**kwargs) |