Update README.md
Browse files
README.md
CHANGED
@@ -20,6 +20,17 @@ from transformers import AutoConfig, AutoModel
|
|
20 |
|
21 |
config = AutoConfig.from_pretrained("amaye15/autoencoder", trust_remote_code = True)
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
### Change Configuration
|
24 |
|
25 |
model = AutoModel.from_config(config, trust_remote_code = True)
|
@@ -31,16 +42,12 @@ input_data = torch.rand((32, 10, 784)) # Adjust shape according to your needs
|
|
31 |
with torch.no_grad(): # Assuming inference only
|
32 |
output = model(input_data)
|
33 |
|
34 |
-
|
35 |
-
|
36 |
### To-Do
|
37 |
# The `output` is a dictionary with 'encoder_final' and 'decoder_final' keys
|
38 |
-
encoded_representation = output['encoder_final']
|
39 |
-
reconstructed_data = output['decoder_final']
|
40 |
```
|
41 |
|
42 |
-
Replace `your_model_directory` with the actual path where your `AutoEncoder` and `AutoEncoderConfig` classes are located.
|
43 |
-
|
44 |
## Training Data
|
45 |
*Omitted - to be filled in with details about the training data used for the model.*
|
46 |
|
|
|
20 |
|
21 |
config = AutoConfig.from_pretrained("amaye15/autoencoder", trust_remote_code = True)
|
22 |
|
23 |
+
# Let's say you want to change the input_dim and latent_dim
|
24 |
+
config.input_dim = 1024 # New input dimension
|
25 |
+
config.latent_dim = 64 # New latent dimension
|
26 |
+
|
27 |
+
# Similarly, update other parameters as needed
|
28 |
+
config.layer_types = 'gru' # Change layer types to 'gru'
|
29 |
+
config.dropout_rate = 0.2 # Update dropout rate
|
30 |
+
config.num_layers = 4 # Change the number of layers
|
31 |
+
config.compression_rate = 0.6 # Update compression rate
|
32 |
+
config.bidirectional = False # Change to unidirectional
|
33 |
+
|
34 |
### Change Configuration
|
35 |
|
36 |
model = AutoModel.from_config(config, trust_remote_code = True)
|
|
|
42 |
with torch.no_grad(): # Assuming inference only
|
43 |
output = model(input_data)
|
44 |
|
|
|
|
|
45 |
### To-Do
|
46 |
# The `output` is a dictionary with 'encoder_final' and 'decoder_final' keys
|
47 |
+
# encoded_representation = output['encoder_final']
|
48 |
+
# reconstructed_data = output['decoder_final']
|
49 |
```
|
50 |
|
|
|
|
|
51 |
## Training Data
|
52 |
*Omitted - to be filled in with details about the training data used for the model.*
|
53 |
|