Spaces:
Paused
Paused
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
from tqdm import tqdm
|
4 |
+
from model.DiffSynthSampler import DiffSynthSampler
|
5 |
+
import soundfile as sf
|
6 |
+
import pyrubberband as pyrb
|
7 |
+
from tqdm import tqdm
|
8 |
+
from model.VQGAN import get_VQGAN
|
9 |
+
from model.diffusion import get_diffusion_model
|
10 |
+
from transformers import AutoTokenizer, ClapModel
|
11 |
+
from model.diffusion_components import linear_beta_schedule
|
12 |
+
from model.timbre_encoder_pretrain import get_timbre_encoder
|
13 |
+
from model.multimodal_model import get_multi_modal_model
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
import gradio as gr
|
18 |
+
from webUI.natural_language_guided.gradio_webUI import GradioWebUI
|
19 |
+
from webUI.natural_language_guided.text2sound import get_text2sound_module
|
20 |
+
from webUI.natural_language_guided.sound2sound_with_text import get_sound2sound_with_text_module
|
21 |
+
from webUI.natural_language_guided.inpaint_with_text import get_inpaint_with_text_module
|
22 |
+
# from webUI.natural_language_guided.build_instrument import get_build_instrument_module
|
23 |
+
from webUI.natural_language_guided.README import get_readme_module
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
device = "cuda" if torch.cuda.is_available() else "CPU"
|
28 |
+
use_pretrained_CLAP = False
|
29 |
+
|
30 |
+
# load VQ-GAN
|
31 |
+
VAE_model_name = "24_1_2024-52_4x_L_D"
|
32 |
+
modelConfig = {"in_channels": 3, "hidden_channels": [80, 160], "embedding_dim": 4, "out_channels": 3, "block_depth": 2,
|
33 |
+
"attn_pos": [80, 160], "attn_with_skip": True,
|
34 |
+
"num_embeddings": 8192, "commitment_cost": 0.25, "decay": 0.99,
|
35 |
+
"norm_type": "groupnorm", "act_type": "swish", "num_groups": 16}
|
36 |
+
VAE = get_VQGAN(modelConfig, load_pretrain=True, model_name=VAE_model_name, device=device)
|
37 |
+
|
38 |
+
# load U-Net
|
39 |
+
UNet_model_name = "history/28_1_2024_CLAP_STFT_180000" if use_pretrained_CLAP else "history/28_1_2024_TE_STFT_300000"
|
40 |
+
unetConfig = {"in_dim": 4, "down_dims": [96, 96, 192, 384], "up_dims": [384, 384, 192, 96], "attn_type": "linear_add", "condition_type": "natural_language_prompt", "label_emb_dim": 512}
|
41 |
+
uNet = get_diffusion_model(unetConfig, load_pretrain=True, model_name=UNet_model_name, device=device)
|
42 |
+
|
43 |
+
# load LM
|
44 |
+
CLAP_temp = ClapModel.from_pretrained("laion/clap-htsat-unfused") # 153,492,890
|
45 |
+
CLAP_tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
|
46 |
+
|
47 |
+
timbre_encoder_name = "24_1_2024_STFT"
|
48 |
+
timbre_encoder_Config = {"input_dim": 512, "feature_dim": 512, "hidden_dim": 1024, "num_instrument_classes": 1006, "num_instrument_family_classes": 11, "num_velocity_classes": 128, "num_qualities": 10, "num_layers": 3}
|
49 |
+
timbre_encoder = get_timbre_encoder(timbre_encoder_Config, load_pretrain=True, model_name=timbre_encoder_name, device=device)
|
50 |
+
|
51 |
+
if use_pretrained_CLAP:
|
52 |
+
text_encoder = CLAP_temp
|
53 |
+
else:
|
54 |
+
multimodalmodel_name = "24_1_2024"
|
55 |
+
multimodalmodel_config = {"text_feature_dim": 512, "spectrogram_feature_dim": 1024, "multi_modal_emb_dim": 512, "num_projection_layers": 2,
|
56 |
+
"temperature": 1.0, "dropout": 0.1, "freeze_text_encoder": False, "freeze_spectrogram_encoder": False}
|
57 |
+
mmm = get_multi_modal_model(timbre_encoder, CLAP_temp, multimodalmodel_config, load_pretrain=True, model_name=multimodalmodel_name, device=device)
|
58 |
+
|
59 |
+
text_encoder = mmm.to("cpu")
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
gradioWebUI = GradioWebUI(device, VAE, uNet, text_encoder, CLAP_tokenizer, freq_resolution=512, time_resolution=256, channels=4, timesteps=1000, squared=False,
|
67 |
+
VAE_scale=4, flexible_duration=True, noise_strategy="repeat", GAN_generator=None)
|
68 |
+
|
69 |
+
with gr.Blocks(theme=gr.themes.Soft(), mode="dark") as demo:
|
70 |
+
# with gr.Blocks(theme='WeixuanYuan/Soft_dark', mode="dark") as demo:
|
71 |
+
gr.Markdown("DiffuSynth v0.2")
|
72 |
+
|
73 |
+
reconstruction_state = gr.State(value={})
|
74 |
+
text2sound_state = gr.State(value={})
|
75 |
+
sound2sound_state = gr.State(value={})
|
76 |
+
inpaint_state = gr.State(value={})
|
77 |
+
super_resolution_state = gr.State(value={})
|
78 |
+
virtual_instruments_state = gr.State(value={"virtual_instruments": {}})
|
79 |
+
|
80 |
+
get_text2sound_module(gradioWebUI, text2sound_state, virtual_instruments_state)
|
81 |
+
get_sound2sound_with_text_module(gradioWebUI, sound2sound_state, virtual_instruments_state)
|
82 |
+
get_inpaint_with_text_module(gradioWebUI, inpaint_state, virtual_instruments_state)
|
83 |
+
# get_build_instrument_module(gradioWebUI, virtual_instruments_state)
|
84 |
+
get_readme_module()
|
85 |
+
|
86 |
+
demo.launch(debug=True, share=True)
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|