import os import numpy as np import gradio as gr import pyopenjtalk from pypinyin import lazy_pinyin from util import preprocess_input, get_tokenizer, load_pitch_dict from espnet_model_zoo.downloader import ModelDownloader from espnet2.fileio.read_text import read_label from espnet2.bin.svs_inference import SingingGenerate singer_embeddings = { "singer1 (female)": "resource/singer/singer_embedding_opencpop.npy", } langs = { "zh": 2, "jp": 1, } def gen_song(lang, tempo, texts, durs, pitchs, spk): fs = 44100 # PRETRAIN_MODEL = "espnet/aceopencpop_svs_visinger2_40singer_pretrain" # pretrain_downloaded = { # "train_config": "/data7/tyx/espnet/egs2/mixed/svs1/exp/svs_train_visinger2_spk_embed_lang_raw_phn_None_mix_all/config.yaml", # "model_file": "/data7/tyx/espnet/egs2/mixed/svs1/exp/svs_train_visinger2_spk_embed_lang_raw_phn_None_mix_all/500epoch.pth", # } if texts is None: return (fs, np.array([0.0])), "Error: No Text provided!" if durs is None: return (fs, np.array([0.0])), "Error: No Dur provided!" if pitchs is None: return (fs, np.array([0.0])), "Error: No Pitch provided!" # preprocess if lang == "zh": texts = preprocess_input(texts, "") text_list = lazy_pinyin(texts) elif lang == "jp": texts = preprocess_input(texts, " ") text_list = texts.strip().split() durs = preprocess_input(durs, " ") dur_list = durs.strip().split() pitchs = preprocess_input(pitchs, " ") pitch_list = pitchs.strip().split() if len(text_list) != len(dur_list): return (fs, np.array([0.0])), f"Error: len in text({len(text_list)}) mismatch with duration({len(dur_list)})!" if len(text_list) != len(pitch_list): return (fs, np.array([0.0])), f"Error: len in text({len(text_list)}) mismatch with pitch({len(pitch_list)})!" ## text to phoneme tokenizer = get_tokenizer(lang) sybs = [] for text in text_list: if text == "AP" or text == "SP": rev = [text] else: rev = tokenizer(text) rev = [phn + f"@{lang}" for phn in rev] if rev == False: return (fs, np.array([0.0])), f"Error: text `{text}` is invalid!" phns = "_".join(rev) sybs.append(phns) pitch_dict = load_pitch_dict() labels = [] notes = [] st = 0 for phns, dur, pitch in zip(sybs, dur_list, pitch_list): if pitch not in pitch_dict: return (fs, np.array([0.0])), f"Error: pitch `{pitch}` is invalid!" pitch = pitch_dict[pitch] dur = float(dur) phn_list = phns.split("_") lyric = "".join(phn_list) note = [st, st + dur, lyric, pitch, phns] st += dur notes.append(note) for phn in phn_list: labels.append(phn) phns_str = " ".join(labels) batch = { "score": ( int(tempo), notes, ), "text": phns_str, } print(batch) return (fs, np.array([0.0])), "success!" # Infer device = "cpu" # device = "cuda" if torch.cuda.is_available() else "cpu" # d = ModelDownloader() # pretrain_downloaded = d.download_and_unpack(PRETRAIN_MODEL) svs = SingingGenerate( train_config = pretrain_downloaded["train_config"], model_file = pretrain_downloaded["model_file"], device = device ) # sid = spks[spk] lid = langs[lang] spk_embed = np.load(singer_embeddings[spk]) # output_dict = svs(batch, sids=np.array([sid])) output_dict = svs(batch, lids=np.array([lid]), spembs=spk_embed) wav_info = output_dict["wav"].cpu().numpy() return (fs, wav_info), "success!" title = "Demo of Singing Voice Synthesis in Muskits-ESPnet" description = """

This is the demo page of our toolkit Muskits-ESPnet: A Comprehensive Toolkit for Singing Voice Synthesis in New Paradigm.

Singing Voice Synthesis (SVS) takes a music score as input and generates singing vocal with the voice of a specific singer. Music score contains information about tempo (singing speed), lyrics, as well as duration and pitch of each word in lyrics.

Muskits-ESPnet logo

How to use:

  1. Choose language ID: "zh" indicates lyrics input in Chinese, and "jp" indicates lyrics input in Japanese.
  2. Input tempo: tempo is a positive integer, typically ranging between 60 and 180, representing the number of beats per minute.
  3. Input lyrics, duration, pitch in equal length: lyrics should match the language setting. Each word of input should be split with " " (blankspace) or "\\n" (newline) without quotes. Examples are provided.
  4. Choose one singer
  5. Click submit button
Notice: Values outside this range may result in suboptimal generation quality!
""" article = """

References: Muskits-ESPnet paper | espnet GitHub | pretrained model

@inproceedings{wu2024muskits,
  title = {{Muskits-ESPnet}: A Comprehensive Toolkit for Singing Voice Synthesis in New Paradigm},
  author = {Yuning Wu and Jiatong Shi and Yifeng Yu and Yuxun Tang and Tao Qian and Yueqian Lin and Jionghao Han and Xinyi Bai and Shinji Watanabe and Qin Jin},
  booktitle={Proceedings of the 32st ACM International Conference on Multimedia},
  year={2024},
}
""" # SP: silence, AP: aspirate. examples = [ ["zh", 89, "雨 淋 湿 了 SP 天 空 AP\n毁 的 SP 很 讲 究 AP", "0.23 0.16 0.36 0.16 0.07 0.28 0.5 0.21 0.3 0.12 0.12 0.25 0.5 0.48 0.34", "60 62 62 62 0 62 58 0 58 58 0 58 58 63 0", "singer1 (female)"], # ["zh", 89, "雨 淋 湿 了 SP 天 空 AP", "0.23 0.16 0.36 0.16 0.07 0.28 0.50 0.21", "C4 D4 D4 D4 rest D4 A#3 rest", "singer2 (male)"], # ["zh", 89, "雨 淋 湿 了 SP 天 空 AP", "0.23 0.16 0.36 0.16 0.07 0.28 0.50 0.21", "C4 D4 D4 D4 rest D4 Bb3 rest", "singer3 (female)"], ["jp", 152, "きっ と と べ ば そ ら ま で と ど く AP", "0.39 2.76 0.2 0.2 0.39 0.39 0.2 0.2 0.39 0.2 0.2 0.59 1.08", "64 71 68 69 71 71 69 68 66 68 69 68 0", "singer1 (female)"], ] app = gr.Interface( fn=gen_song, inputs=[ gr.Radio(label="language", choices=["zh", "jp"], value="zh"), gr.Textbox(label="Tempo"), gr.Textbox(label="Text"), gr.Textbox(label="Duration"), gr.Textbox(label="Pitch"), gr.Radio( label="Singer", choices=[ "singer1 (female)", ], value="singer1 (female)" ), ], outputs=[ gr.Audio(label="Generated Song", type="numpy"), gr.Textbox(label="Running Status"), ], title=title, description=description, article=article, examples=examples, ) app.launch()