chongjie's picture
Upload 2 files
c455281 verified
exp_root_dir: "outputs"
name: "michelangelo-autoencoder/l256-e64-ne8-nd16"
tag: michelangelo-autoencoder+n4096+noise0.0+pfeat3+normembFalse+lr5e-05+qkvbiasFalse+nfreq8+ln_postTrue
resume: ./ckpts/vae_pretrained/model.ckpt
seed: 0
data_type: "objaverse-datamodule"
data:
root_dir: 'data/objaverse-MIX'
data_type: "occupancy"
n_samples: 4096
noise_sigma: 0.
load_supervision: True
supervision_type: "occupancy"
n_supervision: 10000
load_image: False # whether to load images
load_caption: False # whether to load captions
batch_size: 8
num_workers: 16
system_type: "shape-autoencoder-system"
system:
sample_posterior: true
shape_model_type: "michelangelo-aligned-autoencoder"
shape_model:
num_latents: 256
embed_dim: 64
point_feats: 3
out_dim: 1
num_freqs: 8
include_pi: false
heads: 12
width: 768
num_encoder_layers: 8
num_decoder_layers: 16
use_ln_post: true
init_scale: 0.25
qkv_bias: false
use_flash: true
use_checkpoint: true
loggers:
wandb:
enable: false
project: "CraftsMan"
name: shape-autoencoder+${name}+${tag}
loss:
lambda_logits: 1.
lambda_kl: 0.001
optimizer:
name: AdamW
args:
lr: 5e-05
betas: [0.9, 0.99]
eps: 1.e-6
scheduler:
name: SequentialLR
interval: step
schedulers:
- name: LinearLR
interval: step
args:
start_factor: 1e-6
end_factor: 1.0
total_iters: 5000
- name: CosineAnnealingLR
interval: step
args:
T_max: 5000
eta_min: 0.
milestones: [5000]
trainer:
num_nodes: 1
max_epochs: 100000
log_every_n_steps: 5
num_sanity_val_steps: 1
# val_check_interval: 200
check_val_every_n_epoch: 1
enable_progress_bar: true
precision: 16-mixed
checkpoint:
save_last: true
save_top_k: -1
every_n_train_steps: 5000