File size: 1,447 Bytes
0a18390 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
{
"_class_name": "AutoencoderKL",
"_commit_hash": null,
"_diffusers_version": "0.25.0",
"_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--Lykon--dreamshaper-xl-1-0/snapshots/41e6644752a8c9aa63930e6043c4fd83c7708420/vae",
"act_fn": "silu",
"block_out_channels": [
128,
256,
512,
512
],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D"
],
"force_upcast": true,
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 2,
"neuron": {
"auto_cast": "all",
"auto_cast_type": "bf16",
"compiler_type": "neuronx-cc",
"compiler_version": "2.12.68.0+4480452af",
"disable_fallback": false,
"disable_fast_relayout": false,
"dynamic_batch_size": false,
"input_names": [
"sample"
],
"model_type": "vae-encoder",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"latent_sample"
],
"static_batch_size": 1,
"static_height": 1024,
"static_num_beams": 1,
"static_num_channels": 3,
"static_width": 1024
},
"norm_num_groups": 32,
"out_channels": 3,
"sample_size": 1024,
"scaling_factor": 0.13025,
"task": "semantic-segmentation",
"transformers_version": null,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D"
]
}
|