Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json +18 -0
- models/CogVideo/CogVideoX-5b-I2V/transformer/config.json +29 -0
- models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors +3 -0
- models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors +3 -0
- models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors +3 -0
- models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
- models/CogVideo/CogVideoX-5b-I2V/vae/config.json +40 -0
- models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors +3 -0
- models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc +0 -0
- models/RMBG/RMBG-2.0/__pycache__/BiRefNet_config.cpython-310.pyc +0 -0
- models/checkpoints/bopbt/FT_Epoch_latest.pt +3 -0
- models/checkpoints/bopbt/FaceSR_512/latest_net_G.pth +3 -0
- models/checkpoints/bopbt/Setting_9_epoch_100/latest_net_G.pth +3 -0
- models/checkpoints/bopbt/mapping_Patch_Attention/latest_net_mapping_net.pth +3 -0
- models/checkpoints/bopbt/mapping_quality/latest_net_mapping_net.pth +3 -0
- models/checkpoints/bopbt/mapping_scratch/latest_net_mapping_net.pth +3 -0
- models/checkpoints/bopbtf +3 -0
- models/checkpoints/juggernautXL_juggXIByRundiffusion.safetensors +3 -0
- models/checkpoints/kantanmix_v10.safetensors +3 -0
- models/checkpoints/models/checkpoints/kantanmixSD15_v10.safetensors +3 -0
- models/checkpoints/tmp_795765.safetensors +3 -0
- models/clip/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors +3 -0
- models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main +1 -1
- models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/snapshots/18d0535469bb561bf468d76c1d73aa35156c922b/open_clip_model.safetensors +3 -0
- models/diffusion_models/flux/STOIQONewrealityFLUXSD_F1DAlpha.safetensors +3 -0
- models/diffusion_models/wan/Phantom-Wan-14B_fp8_e4m3fn.safetensors +3 -0
- models/diffusion_models/wan/aniWan2114BFp8E4m3fn_t2v.safetensors +3 -0
- models/face_parsing/config.json +111 -0
- models/face_parsing/model.safetensors +3 -0
- models/face_parsing/preprocessor_config.json +23 -0
- models/facedetection/shape_predictor_68_face_landmarks.dat +3 -0
- models/float/.gitattributes +35 -0
- models/float/float.pth +3 -0
- models/float/wav2vec-english-speech-emotion-recognition/.gitattributes +32 -0
- models/float/wav2vec-english-speech-emotion-recognition/README.md +86 -0
- models/float/wav2vec-english-speech-emotion-recognition/config.json +137 -0
- models/float/wav2vec-english-speech-emotion-recognition/preprocessor_config.json +10 -0
- models/float/wav2vec-english-speech-emotion-recognition/pytorch_model.bin +3 -0
- models/float/wav2vec-english-speech-emotion-recognition/training_args.bin +3 -0
- models/float/wav2vec2-base-960h/.gitattributes +18 -0
- models/float/wav2vec2-base-960h/README.md +128 -0
- models/float/wav2vec2-base-960h/config.json +77 -0
- models/float/wav2vec2-base-960h/feature_extractor_config.json +8 -0
- models/float/wav2vec2-base-960h/model.safetensors +3 -0
- models/float/wav2vec2-base-960h/preprocessor_config.json +8 -0
- models/float/wav2vec2-base-960h/pytorch_model.bin +3 -0
- models/float/wav2vec2-base-960h/special_tokens_map.json +1 -0
- models/float/wav2vec2-base-960h/tf_model.h5 +3 -0
- models/float/wav2vec2-base-960h/tokenizer_config.json +1 -0
.gitattributes
CHANGED
@@ -1022,3 +1022,5 @@ custom_nodes/mikey_nodes/noise_bw.png filter=lfs diff=lfs merge=lfs -text
|
|
1022 |
models/clipseg/models--CIDAS--clipseg-rd64-refined/blobs/d00ca85d6b859f9d07b7cfb8ef26fe9771cb275b34c9368f2ecf603139307f55 filter=lfs diff=lfs merge=lfs -text
|
1023 |
models/prompt_generator/opus-mt-id-en/source.spm filter=lfs diff=lfs merge=lfs -text
|
1024 |
models/prompt_generator/opus-mt-id-en/target.spm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
1022 |
models/clipseg/models--CIDAS--clipseg-rd64-refined/blobs/d00ca85d6b859f9d07b7cfb8ef26fe9771cb275b34c9368f2ecf603139307f55 filter=lfs diff=lfs merge=lfs -text
|
1023 |
models/prompt_generator/opus-mt-id-en/source.spm filter=lfs diff=lfs merge=lfs -text
|
1024 |
models/prompt_generator/opus-mt-id-en/target.spm filter=lfs diff=lfs merge=lfs -text
|
1025 |
+
models/checkpoints/bopbtf filter=lfs diff=lfs merge=lfs -text
|
1026 |
+
models/facedetection/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
|
models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "CogVideoXDDIMScheduler",
|
3 |
+
"_diffusers_version": "0.31.0.dev0",
|
4 |
+
"beta_end": 0.012,
|
5 |
+
"beta_schedule": "scaled_linear",
|
6 |
+
"beta_start": 0.00085,
|
7 |
+
"clip_sample": false,
|
8 |
+
"clip_sample_range": 1.0,
|
9 |
+
"num_train_timesteps": 1000,
|
10 |
+
"prediction_type": "v_prediction",
|
11 |
+
"rescale_betas_zero_snr": true,
|
12 |
+
"sample_max_value": 1.0,
|
13 |
+
"set_alpha_to_one": true,
|
14 |
+
"snr_shift_scale": 1.0,
|
15 |
+
"steps_offset": 0,
|
16 |
+
"timestep_spacing": "trailing",
|
17 |
+
"trained_betas": null
|
18 |
+
}
|
models/CogVideo/CogVideoX-5b-I2V/transformer/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "CogVideoXTransformer3DModel",
|
3 |
+
"_diffusers_version": "0.31.0.dev0",
|
4 |
+
"activation_fn": "gelu-approximate",
|
5 |
+
"attention_bias": true,
|
6 |
+
"attention_head_dim": 64,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"flip_sin_to_cos": true,
|
9 |
+
"freq_shift": 0,
|
10 |
+
"in_channels": 32,
|
11 |
+
"max_text_seq_length": 226,
|
12 |
+
"norm_elementwise_affine": true,
|
13 |
+
"norm_eps": 1e-05,
|
14 |
+
"num_attention_heads": 48,
|
15 |
+
"num_layers": 42,
|
16 |
+
"out_channels": 16,
|
17 |
+
"patch_size": 2,
|
18 |
+
"sample_frames": 49,
|
19 |
+
"sample_height": 60,
|
20 |
+
"sample_width": 90,
|
21 |
+
"spatial_interpolation_scale": 1.875,
|
22 |
+
"temporal_compression_ratio": 4,
|
23 |
+
"temporal_interpolation_scale": 1.0,
|
24 |
+
"text_embed_dim": 4096,
|
25 |
+
"time_embed_dim": 512,
|
26 |
+
"timestep_activation_fn": "silu",
|
27 |
+
"use_learned_positional_embeddings": true,
|
28 |
+
"use_rotary_positional_embeddings": true
|
29 |
+
}
|
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2e3060199c34a0d18892a19d687455f938b0ac3d2ea7d48f37cb4090e141965
|
3 |
+
size 4992465072
|
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e8d0c62d366b0d9cc3476d2b21ca54afbecea154d54d923da120b2ec174c7e7
|
3 |
+
size 4985800640
|
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da91a0051da3f39caf10944b7c9aa66b14ddeffb37a25b087c49fc1692c1a361
|
3 |
+
size 1272025856
|
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
models/CogVideo/CogVideoX-5b-I2V/vae/config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "AutoencoderKLCogVideoX",
|
3 |
+
"_diffusers_version": "0.32.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"block_out_channels": [
|
6 |
+
128,
|
7 |
+
256,
|
8 |
+
256,
|
9 |
+
512
|
10 |
+
],
|
11 |
+
"down_block_types": [
|
12 |
+
"CogVideoXDownBlock3D",
|
13 |
+
"CogVideoXDownBlock3D",
|
14 |
+
"CogVideoXDownBlock3D",
|
15 |
+
"CogVideoXDownBlock3D"
|
16 |
+
],
|
17 |
+
"force_upcast": true,
|
18 |
+
"in_channels": 3,
|
19 |
+
"latent_channels": 16,
|
20 |
+
"latents_mean": null,
|
21 |
+
"latents_std": null,
|
22 |
+
"layers_per_block": 3,
|
23 |
+
"norm_eps": 1e-06,
|
24 |
+
"norm_num_groups": 32,
|
25 |
+
"out_channels": 3,
|
26 |
+
"sample_height": 480,
|
27 |
+
"sample_width": 720,
|
28 |
+
"scaling_factor": 0.7,
|
29 |
+
"shift_factor": null,
|
30 |
+
"temporal_compression_ratio": 4,
|
31 |
+
"up_block_types": [
|
32 |
+
"CogVideoXUpBlock3D",
|
33 |
+
"CogVideoXUpBlock3D",
|
34 |
+
"CogVideoXUpBlock3D",
|
35 |
+
"CogVideoXUpBlock3D"
|
36 |
+
],
|
37 |
+
"use_post_quant_conv": false,
|
38 |
+
"use_quant_conv": false,
|
39 |
+
"invert_scale_latents": false
|
40 |
+
}
|
models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e
|
3 |
+
size 862388596
|
models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc
CHANGED
Binary files a/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc and b/models/RMBG/BEN2/__pycache__/BEN2.cpython-310.pyc differ
|
|
models/RMBG/RMBG-2.0/__pycache__/BiRefNet_config.cpython-310.pyc
ADDED
Binary file (643 Bytes). View file
|
|
models/checkpoints/bopbt/FT_Epoch_latest.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2d7ab04e9b3885c6b1991bb7a0b823129dd6e3ac078a9fd059ebd2a7ba59a95
|
3 |
+
size 451663663
|
models/checkpoints/bopbt/FaceSR_512/latest_net_G.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b3dc71f29cc98a61d9a7ccc48f329726feca19b67c2d3710b69b2deb96aef22
|
3 |
+
size 368779463
|
models/checkpoints/bopbt/Setting_9_epoch_100/latest_net_G.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaafe424c22225891f60e672f331e14521bef870e6ce490f988ce6ada4068569
|
3 |
+
size 368738923
|
models/checkpoints/bopbt/mapping_Patch_Attention/latest_net_mapping_net.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15f7a145b4eb94565d2f5988f6ca9948d04cef1599f4ca6eb99429e7d2c7d783
|
3 |
+
size 295645915
|
models/checkpoints/bopbt/mapping_quality/latest_net_mapping_net.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb193cc0c82a6f4ed1c746beec5366c8a1e4a1b8dad3c83148afd0f93814624b
|
3 |
+
size 144549781
|
models/checkpoints/bopbt/mapping_scratch/latest_net_mapping_net.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a63f9e33802a262fa62b160785fa3c53166f3d1449046ad9afc742848e195578
|
3 |
+
size 205392667
|
models/checkpoints/bopbtf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb193cc0c82a6f4ed1c746beec5366c8a1e4a1b8dad3c83148afd0f93814624b
|
3 |
+
size 144549781
|
models/checkpoints/juggernautXL_juggXIByRundiffusion.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33e58e86686f6b386c526682b5da9228ead4f91d994abd4b053442dc5b42719e
|
3 |
+
size 7105350536
|
models/checkpoints/kantanmix_v10.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6c46570180b4defbf0841cfe4f491f5eaa38f48a61f85f29cf83fdf0dd82ddf
|
3 |
+
size 2132626758
|
models/checkpoints/models/checkpoints/kantanmixSD15_v10.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97d38c162efd7bd6f9fd3298019360a3dcccbc2d79da9ca095e1052000398af3
|
3 |
+
size 505062656
|
models/checkpoints/tmp_795765.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9a6a04edf5ec290a89838947ca683cf53bceab73b5ab95c30635464c731ce2d8
|
3 |
+
size 19263640
|
models/clip/ViT-L-14-BEST-smooth-GmP-TE-only-HF-format.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1bc257ac78ef7cf40c145b4319e759525557609b96820831f6eea2e49da99b5
|
3 |
+
size 323409740
|
models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/refs/main
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
18d0535469bb561bf468d76c1d73aa35156c922b
|
models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/snapshots/18d0535469bb561bf468d76c1d73aa35156c922b/open_clip_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ce2e8a8ebfff3793d7d375ad6d3c35cb9aebf3de7ace0fc7308accab7cd207e
|
3 |
+
size 1710517724
|
models/diffusion_models/flux/STOIQONewrealityFLUXSD_F1DAlpha.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0396ef03b03a2f73ee473127fbc4b15d009ce561e03c4ba3005f92452393d163
|
3 |
+
size 11901517328
|
models/diffusion_models/wan/Phantom-Wan-14B_fp8_e4m3fn.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:205c2924aadcd4e1312d6aac0b4cfba80eeea33db99419b113c10eec4810cabc
|
3 |
+
size 15001320640
|
models/diffusion_models/wan/aniWan2114BFp8E4m3fn_t2v.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a59b04fc09ef456bbce817875684c05923bdb62fbe11f248f8e037293c04925
|
3 |
+
size 14289631824
|
models/face_parsing/config.json
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "jonathandinu/face-parsing",
|
3 |
+
"architectures": [
|
4 |
+
"SegformerForSemanticSegmentation"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"classifier_dropout_prob": 0.1,
|
8 |
+
"decoder_hidden_size": 768,
|
9 |
+
"depths": [
|
10 |
+
3,
|
11 |
+
6,
|
12 |
+
40,
|
13 |
+
3
|
14 |
+
],
|
15 |
+
"downsampling_rates": [
|
16 |
+
1,
|
17 |
+
4,
|
18 |
+
8,
|
19 |
+
16
|
20 |
+
],
|
21 |
+
"drop_path_rate": 0.1,
|
22 |
+
"hidden_act": "gelu",
|
23 |
+
"hidden_dropout_prob": 0.0,
|
24 |
+
"hidden_sizes": [
|
25 |
+
64,
|
26 |
+
128,
|
27 |
+
320,
|
28 |
+
512
|
29 |
+
],
|
30 |
+
"id2label": {
|
31 |
+
"0": "background",
|
32 |
+
"1": "skin",
|
33 |
+
"2": "nose",
|
34 |
+
"3": "eye_g",
|
35 |
+
"4": "l_eye",
|
36 |
+
"5": "r_eye",
|
37 |
+
"6": "l_brow",
|
38 |
+
"7": "r_brow",
|
39 |
+
"8": "l_ear",
|
40 |
+
"9": "r_ear",
|
41 |
+
"10": "mouth",
|
42 |
+
"11": "u_lip",
|
43 |
+
"12": "l_lip",
|
44 |
+
"13": "hair",
|
45 |
+
"14": "hat",
|
46 |
+
"15": "ear_r",
|
47 |
+
"16": "neck_l",
|
48 |
+
"17": "neck",
|
49 |
+
"18": "cloth"
|
50 |
+
},
|
51 |
+
"image_size": 224,
|
52 |
+
"initializer_range": 0.02,
|
53 |
+
"label2id": {
|
54 |
+
"background": 0,
|
55 |
+
"skin": 1,
|
56 |
+
"nose": 2,
|
57 |
+
"eye_g": 3,
|
58 |
+
"l_eye": 4,
|
59 |
+
"r_eye": 5,
|
60 |
+
"l_brow": 6,
|
61 |
+
"r_brow": 7,
|
62 |
+
"l_ear": 8,
|
63 |
+
"r_ear": 9,
|
64 |
+
"mouth": 10,
|
65 |
+
"u_lip": 11,
|
66 |
+
"l_lip": 12,
|
67 |
+
"hair": 13,
|
68 |
+
"hat": 14,
|
69 |
+
"ear_r": 15,
|
70 |
+
"neck_l": 16,
|
71 |
+
"neck": 17,
|
72 |
+
"cloth": 18
|
73 |
+
},
|
74 |
+
"layer_norm_eps": 1e-06,
|
75 |
+
"mlp_ratios": [
|
76 |
+
4,
|
77 |
+
4,
|
78 |
+
4,
|
79 |
+
4
|
80 |
+
],
|
81 |
+
"model_type": "segformer",
|
82 |
+
"num_attention_heads": [
|
83 |
+
1,
|
84 |
+
2,
|
85 |
+
5,
|
86 |
+
8
|
87 |
+
],
|
88 |
+
"num_channels": 3,
|
89 |
+
"num_encoder_blocks": 4,
|
90 |
+
"patch_sizes": [
|
91 |
+
7,
|
92 |
+
3,
|
93 |
+
3,
|
94 |
+
3
|
95 |
+
],
|
96 |
+
"reshape_last_stage": true,
|
97 |
+
"semantic_loss_ignore_index": 255,
|
98 |
+
"sr_ratios": [
|
99 |
+
8,
|
100 |
+
4,
|
101 |
+
2,
|
102 |
+
1
|
103 |
+
],
|
104 |
+
"strides": [
|
105 |
+
4,
|
106 |
+
2,
|
107 |
+
2,
|
108 |
+
2
|
109 |
+
],
|
110 |
+
"transformers_version": "4.37.0.dev0"
|
111 |
+
}
|
models/face_parsing/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2bec795a8c243db71bd95be538fd62559003566466c71237e45c99b920f4b62
|
3 |
+
size 338580732
|
models/face_parsing/preprocessor_config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_reduce_labels": false,
|
4 |
+
"do_rescale": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"image_mean": [
|
7 |
+
0.485,
|
8 |
+
0.456,
|
9 |
+
0.406
|
10 |
+
],
|
11 |
+
"image_processor_type": "SegformerFeatureExtractor",
|
12 |
+
"image_std": [
|
13 |
+
0.229,
|
14 |
+
0.224,
|
15 |
+
0.225
|
16 |
+
],
|
17 |
+
"resample": 2,
|
18 |
+
"rescale_factor": 0.00392156862745098,
|
19 |
+
"size": {
|
20 |
+
"height": 512,
|
21 |
+
"width": 512
|
22 |
+
}
|
23 |
+
}
|
models/facedetection/shape_predictor_68_face_landmarks.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
|
3 |
+
size 99693937
|
models/float/.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
models/float/float.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5cd815510908284eca7c5b9e81bdd63fc4cbef761f83bfb8fbe5e2880b2b830
|
3 |
+
size 826739645
|
models/float/wav2vec-english-speech-emotion-recognition/.gitattributes
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
24 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
models/float/wav2vec-english-speech-emotion-recognition/README.md
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- generated_from_trainer
|
5 |
+
metrics:
|
6 |
+
- accuracy
|
7 |
+
model_index:
|
8 |
+
name: wav2vec-english-speech-emotion-recognition
|
9 |
+
---
|
10 |
+
# Speech Emotion Recognition By Fine-Tuning Wav2Vec 2.0
|
11 |
+
The model is a fine-tuned version of [jonatasgrosman/wav2vec2-large-xlsr-53-english](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english) for a Speech Emotion Recognition (SER) task.
|
12 |
+
|
13 |
+
Several datasets were used the fine-tune the original model:
|
14 |
+
- Surrey Audio-Visual Expressed Emotion [(SAVEE)](http://kahlan.eps.surrey.ac.uk/savee/Database.html) - 480 audio files from 4 male actors
|
15 |
+
- Ryerson Audio-Visual Database of Emotional Speech and Song [(RAVDESS)](https://zenodo.org/record/1188976) - 1440 audio files from 24 professional actors (12 female, 12 male)
|
16 |
+
- Toronto emotional speech set [(TESS)](https://tspace.library.utoronto.ca/handle/1807/24487) - 2800 audio files from 2 female actors
|
17 |
+
|
18 |
+
7 labels/emotions were used as classification labels
|
19 |
+
```python
|
20 |
+
emotions = ['angry' 'disgust' 'fear' 'happy' 'neutral' 'sad' 'surprise']
|
21 |
+
```
|
22 |
+
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 0.104075
|
24 |
+
- Accuracy: 0.97463
|
25 |
+
|
26 |
+
## Model Usage
|
27 |
+
```bash
|
28 |
+
pip install transformers librosa torch
|
29 |
+
```
|
30 |
+
```python
|
31 |
+
from transformers import *
|
32 |
+
import librosa
|
33 |
+
import torch
|
34 |
+
|
35 |
+
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("r-f/wav2vec-english-speech-emotion-recognition")
|
36 |
+
model = Wav2Vec2ForCTC.from_pretrained("r-f/wav2vec-english-speech-emotion-recognition")
|
37 |
+
|
38 |
+
def predict_emotion(audio_path):
|
39 |
+
audio, rate = librosa.load(audio_path, sr=16000)
|
40 |
+
inputs = feature_extractor(audio, sampling_rate=rate, return_tensors="pt", padding=True)
|
41 |
+
|
42 |
+
with torch.no_grad():
|
43 |
+
outputs = model(inputs.input_values)
|
44 |
+
predictions = torch.nn.functional.softmax(outputs.logits.mean(dim=1), dim=-1) # Average over sequence length
|
45 |
+
predicted_label = torch.argmax(predictions, dim=-1)
|
46 |
+
emotion = model.config.id2label[predicted_label.item()]
|
47 |
+
return emotion
|
48 |
+
|
49 |
+
emotion = predict_emotion("example_audio.wav")
|
50 |
+
print(f"Predicted emotion: {emotion}")
|
51 |
+
>> Predicted emotion: angry
|
52 |
+
```
|
53 |
+
|
54 |
+
|
55 |
+
## Training procedure
|
56 |
+
### Training hyperparameters
|
57 |
+
The following hyperparameters were used during training:
|
58 |
+
- learning_rate: 0.0001
|
59 |
+
- train_batch_size: 4
|
60 |
+
- eval_batch_size: 4
|
61 |
+
- eval_steps: 500
|
62 |
+
- seed: 42
|
63 |
+
- gradient_accumulation_steps: 2
|
64 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
65 |
+
- num_epochs: 4
|
66 |
+
- max_steps=7500
|
67 |
+
- save_steps: 1500
|
68 |
+
|
69 |
+
### Training results
|
70 |
+
| Step | Training Loss | Validation Loss | Accuracy |
|
71 |
+
| ---- | ------------- | --------------- | -------- |
|
72 |
+
| 500 | 1.8124 | 1.365212 | 0.486258 |
|
73 |
+
| 1000 | 0.8872 | 0.773145 | 0.79704 |
|
74 |
+
| 1500 | 0.7035 | 0.574954 | 0.852008 |
|
75 |
+
| 2000 | 0.6879 | 1.286738 | 0.775899 |
|
76 |
+
| 2500 | 0.6498 | 0.697455 | 0.832981 |
|
77 |
+
| 3000 | 0.5696 | 0.33724 | 0.892178 |
|
78 |
+
| 3500 | 0.4218 | 0.307072 | 0.911205 |
|
79 |
+
| 4000 | 0.3088 | 0.374443 | 0.930233 |
|
80 |
+
| 4500 | 0.2688 | 0.260444 | 0.936575 |
|
81 |
+
| 5000 | 0.2973 | 0.302985 | 0.92389 |
|
82 |
+
| 5500 | 0.1765 | 0.165439 | 0.961945 |
|
83 |
+
| 6000 | 0.1475 | 0.170199 | 0.961945 |
|
84 |
+
| 6500 | 0.1274 | 0.15531 | 0.966173 |
|
85 |
+
| 7000 | 0.0699 | 0.103882 | 0.976744 |
|
86 |
+
| 7500 | 0.083 | 0.104075 | 0.97463 |
|
models/float/wav2vec-english-speech-emotion-recognition/config.json
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "jonatasgrosman/wav2vec2-large-xlsr-53-english",
|
3 |
+
"processor_class": "Wav2Vec2CTCTokenizer",
|
4 |
+
"activation_dropout": 0.05,
|
5 |
+
"adapter_kernel_size": 3,
|
6 |
+
"adapter_stride": 2,
|
7 |
+
"add_adapter": false,
|
8 |
+
"apply_spec_augment": true,
|
9 |
+
"architectures": [
|
10 |
+
"Wav2Vec2ForCTC"
|
11 |
+
],
|
12 |
+
"attention_dropout": 0.1,
|
13 |
+
"bos_token_id": 1,
|
14 |
+
"classifier_proj_size": 256,
|
15 |
+
"codevector_dim": 256,
|
16 |
+
"contrastive_logits_temperature": 0.1,
|
17 |
+
"conv_bias": true,
|
18 |
+
"conv_dim": [
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512,
|
25 |
+
512
|
26 |
+
],
|
27 |
+
"conv_kernel": [
|
28 |
+
10,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
3,
|
33 |
+
2,
|
34 |
+
2
|
35 |
+
],
|
36 |
+
"conv_stride": [
|
37 |
+
5,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2,
|
43 |
+
2
|
44 |
+
],
|
45 |
+
"ctc_loss_reduction": "mean",
|
46 |
+
"ctc_zero_infinity": true,
|
47 |
+
"diversity_loss_weight": 0.1,
|
48 |
+
"do_stable_layer_norm": true,
|
49 |
+
"eos_token_id": 2,
|
50 |
+
"feat_extract_activation": "gelu",
|
51 |
+
"feat_extract_dropout": 0.0,
|
52 |
+
"feat_extract_norm": "layer",
|
53 |
+
"feat_proj_dropout": 0.05,
|
54 |
+
"feat_quantizer_dropout": 0.0,
|
55 |
+
"final_dropout": 0.0,
|
56 |
+
"finetuning_task": "wav2vec2_clf",
|
57 |
+
"hidden_act": "gelu",
|
58 |
+
"hidden_dropout": 0.05,
|
59 |
+
"hidden_size": 1024,
|
60 |
+
"id2label": {
|
61 |
+
"0": "angry",
|
62 |
+
"1": "disgust",
|
63 |
+
"2": "fear",
|
64 |
+
"3": "happy",
|
65 |
+
"4": "neutral",
|
66 |
+
"5": "sad",
|
67 |
+
"6": "surprise"
|
68 |
+
},
|
69 |
+
"initializer_range": 0.02,
|
70 |
+
"intermediate_size": 4096,
|
71 |
+
"label2id": {
|
72 |
+
"angry": 0,
|
73 |
+
"disgust": 1,
|
74 |
+
"fear": 2,
|
75 |
+
"happy": 3,
|
76 |
+
"neutral": 4,
|
77 |
+
"sad": 5,
|
78 |
+
"surprise": 6
|
79 |
+
},
|
80 |
+
"layer_norm_eps": 1e-05,
|
81 |
+
"layerdrop": 0.05,
|
82 |
+
"mask_channel_length": 10,
|
83 |
+
"mask_channel_min_space": 1,
|
84 |
+
"mask_channel_other": 0.0,
|
85 |
+
"mask_channel_prob": 0.0,
|
86 |
+
"mask_channel_selection": "static",
|
87 |
+
"mask_feature_length": 10,
|
88 |
+
"mask_feature_min_masks": 0,
|
89 |
+
"mask_feature_prob": 0.0,
|
90 |
+
"mask_time_length": 10,
|
91 |
+
"mask_time_min_masks": 2,
|
92 |
+
"mask_time_min_space": 1,
|
93 |
+
"mask_time_other": 0.0,
|
94 |
+
"mask_time_prob": 0.05,
|
95 |
+
"mask_time_selection": "static",
|
96 |
+
"model_type": "wav2vec2",
|
97 |
+
"num_adapter_layers": 3,
|
98 |
+
"num_attention_heads": 16,
|
99 |
+
"num_codevector_groups": 2,
|
100 |
+
"num_codevectors_per_group": 320,
|
101 |
+
"num_conv_pos_embedding_groups": 16,
|
102 |
+
"num_conv_pos_embeddings": 128,
|
103 |
+
"num_feat_extract_layers": 7,
|
104 |
+
"num_hidden_layers": 24,
|
105 |
+
"num_negatives": 100,
|
106 |
+
"output_hidden_size": 1024,
|
107 |
+
"pad_token_id": 0,
|
108 |
+
"pooling_mode": "mean",
|
109 |
+
"problem_type": "single_label_classification",
|
110 |
+
"proj_codevector_dim": 256,
|
111 |
+
"tdnn_dilation": [
|
112 |
+
1,
|
113 |
+
2,
|
114 |
+
3,
|
115 |
+
1,
|
116 |
+
1
|
117 |
+
],
|
118 |
+
"tdnn_dim": [
|
119 |
+
512,
|
120 |
+
512,
|
121 |
+
512,
|
122 |
+
512,
|
123 |
+
1500
|
124 |
+
],
|
125 |
+
"tdnn_kernel": [
|
126 |
+
5,
|
127 |
+
3,
|
128 |
+
3,
|
129 |
+
1,
|
130 |
+
1
|
131 |
+
],
|
132 |
+
"torch_dtype": "float32",
|
133 |
+
"transformers_version": "4.22.1",
|
134 |
+
"use_weighted_layer_sum": false,
|
135 |
+
"vocab_size": 33,
|
136 |
+
"xvector_output_dim": 512
|
137 |
+
}
|
models/float/wav2vec-english-speech-emotion-recognition/preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
+
"return_attention_mask": true,
|
9 |
+
"sampling_rate": 16000
|
10 |
+
}
|
models/float/wav2vec-english-speech-emotion-recognition/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6470434ecf20ae93b22284ac83877984fb8765e332037c36a54df6607e3a206
|
3 |
+
size 1266126445
|
models/float/wav2vec-english-speech-emotion-recognition/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b7a4b18e6dd098bbeba86991ea3a66623c19570bf00ab392b2b8e7e72ee8598
|
3 |
+
size 3439
|
models/float/wav2vec2-base-960h/.gitattributes
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
18 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
models/float/wav2vec2-base-960h/README.md
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
datasets:
|
4 |
+
- librispeech_asr
|
5 |
+
tags:
|
6 |
+
- audio
|
7 |
+
- automatic-speech-recognition
|
8 |
+
- hf-asr-leaderboard
|
9 |
+
license: apache-2.0
|
10 |
+
widget:
|
11 |
+
- example_title: Librispeech sample 1
|
12 |
+
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
|
13 |
+
- example_title: Librispeech sample 2
|
14 |
+
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
|
15 |
+
model-index:
|
16 |
+
- name: wav2vec2-base-960h
|
17 |
+
results:
|
18 |
+
- task:
|
19 |
+
name: Automatic Speech Recognition
|
20 |
+
type: automatic-speech-recognition
|
21 |
+
dataset:
|
22 |
+
name: LibriSpeech (clean)
|
23 |
+
type: librispeech_asr
|
24 |
+
config: clean
|
25 |
+
split: test
|
26 |
+
args:
|
27 |
+
language: en
|
28 |
+
metrics:
|
29 |
+
- name: Test WER
|
30 |
+
type: wer
|
31 |
+
value: 3.4
|
32 |
+
- task:
|
33 |
+
name: Automatic Speech Recognition
|
34 |
+
type: automatic-speech-recognition
|
35 |
+
dataset:
|
36 |
+
name: LibriSpeech (other)
|
37 |
+
type: librispeech_asr
|
38 |
+
config: other
|
39 |
+
split: test
|
40 |
+
args:
|
41 |
+
language: en
|
42 |
+
metrics:
|
43 |
+
- name: Test WER
|
44 |
+
type: wer
|
45 |
+
value: 8.6
|
46 |
+
---
|
47 |
+
|
48 |
+
# Wav2Vec2-Base-960h
|
49 |
+
|
50 |
+
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
|
51 |
+
|
52 |
+
The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio. When using the model
|
53 |
+
make sure that your speech input is also sampled at 16Khz.
|
54 |
+
|
55 |
+
[Paper](https://arxiv.org/abs/2006.11477)
|
56 |
+
|
57 |
+
Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli
|
58 |
+
|
59 |
+
**Abstract**
|
60 |
+
|
61 |
+
We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.
|
62 |
+
|
63 |
+
The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
|
64 |
+
|
65 |
+
|
66 |
+
# Usage
|
67 |
+
|
68 |
+
To transcribe audio files the model can be used as a standalone acoustic model as follows:
|
69 |
+
|
70 |
+
```python
|
71 |
+
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
72 |
+
from datasets import load_dataset
|
73 |
+
import torch
|
74 |
+
|
75 |
+
# load model and tokenizer
|
76 |
+
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
77 |
+
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
78 |
+
|
79 |
+
# load dummy dataset and read soundfiles
|
80 |
+
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
81 |
+
|
82 |
+
# tokenize
|
83 |
+
input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1
|
84 |
+
|
85 |
+
# retrieve logits
|
86 |
+
logits = model(input_values).logits
|
87 |
+
|
88 |
+
# take argmax and decode
|
89 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
90 |
+
transcription = processor.batch_decode(predicted_ids)
|
91 |
+
```
|
92 |
+
|
93 |
+
## Evaluation
|
94 |
+
|
95 |
+
This code snippet shows how to evaluate **facebook/wav2vec2-base-960h** on LibriSpeech's "clean" and "other" test data.
|
96 |
+
|
97 |
+
```python
|
98 |
+
from datasets import load_dataset
|
99 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
100 |
+
import torch
|
101 |
+
from jiwer import wer
|
102 |
+
|
103 |
+
|
104 |
+
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
105 |
+
|
106 |
+
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
|
107 |
+
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
108 |
+
|
109 |
+
def map_to_pred(batch):
|
110 |
+
input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
|
111 |
+
with torch.no_grad():
|
112 |
+
logits = model(input_values.to("cuda")).logits
|
113 |
+
|
114 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
115 |
+
transcription = processor.batch_decode(predicted_ids)
|
116 |
+
batch["transcription"] = transcription
|
117 |
+
return batch
|
118 |
+
|
119 |
+
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["audio"])
|
120 |
+
|
121 |
+
print("WER:", wer(result["text"], result["transcription"]))
|
122 |
+
```
|
123 |
+
|
124 |
+
*Result (WER)*:
|
125 |
+
|
126 |
+
| "clean" | "other" |
|
127 |
+
|---|---|
|
128 |
+
| 3.4 | 8.6 |
|
models/float/wav2vec2-base-960h/config.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-base-960h",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"codevector_dim": 256,
|
11 |
+
"contrastive_logits_temperature": 0.1,
|
12 |
+
"conv_bias": false,
|
13 |
+
"conv_dim": [
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512
|
21 |
+
],
|
22 |
+
"conv_kernel": [
|
23 |
+
10,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
3,
|
27 |
+
3,
|
28 |
+
2,
|
29 |
+
2
|
30 |
+
],
|
31 |
+
"conv_stride": [
|
32 |
+
5,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2,
|
37 |
+
2,
|
38 |
+
2
|
39 |
+
],
|
40 |
+
"ctc_loss_reduction": "sum",
|
41 |
+
"ctc_zero_infinity": false,
|
42 |
+
"diversity_loss_weight": 0.1,
|
43 |
+
"do_stable_layer_norm": false,
|
44 |
+
"eos_token_id": 2,
|
45 |
+
"feat_extract_activation": "gelu",
|
46 |
+
"feat_extract_dropout": 0.0,
|
47 |
+
"feat_extract_norm": "group",
|
48 |
+
"feat_proj_dropout": 0.1,
|
49 |
+
"feat_quantizer_dropout": 0.0,
|
50 |
+
"final_dropout": 0.1,
|
51 |
+
"gradient_checkpointing": false,
|
52 |
+
"hidden_act": "gelu",
|
53 |
+
"hidden_dropout": 0.1,
|
54 |
+
"hidden_dropout_prob": 0.1,
|
55 |
+
"hidden_size": 768,
|
56 |
+
"initializer_range": 0.02,
|
57 |
+
"intermediate_size": 3072,
|
58 |
+
"layer_norm_eps": 1e-05,
|
59 |
+
"layerdrop": 0.1,
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_prob": 0.05,
|
64 |
+
"model_type": "wav2vec2",
|
65 |
+
"num_attention_heads": 12,
|
66 |
+
"num_codevector_groups": 2,
|
67 |
+
"num_codevectors_per_group": 320,
|
68 |
+
"num_conv_pos_embedding_groups": 16,
|
69 |
+
"num_conv_pos_embeddings": 128,
|
70 |
+
"num_feat_extract_layers": 7,
|
71 |
+
"num_hidden_layers": 12,
|
72 |
+
"num_negatives": 100,
|
73 |
+
"pad_token_id": 0,
|
74 |
+
"proj_codevector_dim": 256,
|
75 |
+
"transformers_version": "4.7.0.dev0",
|
76 |
+
"vocab_size": 32
|
77 |
+
}
|
models/float/wav2vec2-base-960h/feature_extractor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_dim": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": false,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
models/float/wav2vec2-base-960h/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8aa76ab2243c81747a1f832954586bc566090c83a0ac167df6f31f0fa917d74a
|
3 |
+
size 377607901
|
models/float/wav2vec2-base-960h/preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": false,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
models/float/wav2vec2-base-960h/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c34f9827b034a1b9141dbf6f652f8a60eda61cdf5771c9e05bfa99033c92cd96
|
3 |
+
size 377667514
|
models/float/wav2vec2-base-960h/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
models/float/wav2vec2-base-960h/tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:412742825972a6e2e877255ccd8b3416e618df15a7f1e5e4f736aa3632ce33b5
|
3 |
+
size 377840624
|
models/float/wav2vec2-base-960h/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "return_attention_mask": false, "do_normalize": true}
|