diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..c7d9f3332a950355d5a77d85000f05e6f45435ea 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -25,7 +25,6 @@
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fb07aa3eb740e5d6b84172cbf73d2523768269ee
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,382 @@
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+##
+## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
+
+# User-specific files
+*.rsuser
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Mono auto generated files
+mono_crash.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+[Ww][Ii][Nn]32/
+[Aa][Rr][Mm]/
+[Aa][Rr][Mm]64/
+bld/
+[Bb]in/
+[Oo]bj/
+[Oo]ut/
+[Ll]og/
+[Ll]ogs/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+# Uncomment if you have tasks that create the project's static files in wwwroot
+#wwwroot/
+
+# Visual Studio 2017 auto generated files
+Generated\ Files/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUnit
+*.VisualState.xml
+TestResult.xml
+nunit-*.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+# Benchmark Results
+BenchmarkDotNet.Artifacts/
+
+# .NET Core
+project.lock.json
+project.fragment.lock.json
+artifacts/
+
+# ASP.NET Scaffolding
+ScaffoldingReadMe.txt
+
+# StyleCop
+StyleCopReport.xml
+
+# Files built by Visual Studio
+*_i.c
+*_p.c
+*_h.h
+*.ilk
+*.meta
+*.obj
+*.iobj
+*.pch
+*.pdb
+*.ipdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*_wpftmp.csproj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opendb
+*.opensdf
+*.sdf
+*.cachefile
+*.VC.db
+*.VC.VC.opendb
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+*.sap
+
+# Visual Studio Trace Files
+*.e2e
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# AxoCover is a Code Coverage Tool
+.axoCover/*
+!.axoCover/settings.json
+
+# Coverlet is a free, cross platform Code Coverage Tool
+coverage*.json
+coverage*.xml
+coverage*.info
+
+# Visual Studio code coverage results
+*.coverage
+*.coveragexml
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+nCrunchTemp_*
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+PublishScripts/
+
+# NuGet Packages
+*.nupkg
+# NuGet Symbol Packages
+*.snupkg
+# The packages folder can be ignored because of Package Restore
+**/[Pp]ackages/*
+# except build/, which is used as an MSBuild target.
+!**/[Pp]ackages/build/
+# Uncomment if necessary however generally it will be regenerated when needed
+#!**/[Pp]ackages/repositories.config
+# NuGet v3's project.json files produces more ignorable files
+*.nuget.props
+*.nuget.targets
+
+# Microsoft Azure Build Output
+csx/
+*.build.csdef
+
+# Microsoft Azure Emulator
+ecf/
+rcf/
+
+# Windows Store app package directories and files
+AppPackages/
+BundleArtifacts/
+Package.StoreAssociation.xml
+_pkginfo.txt
+*.appx
+*.appxbundle
+*.appxupload
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+*.[Cc]ache
+# but keep track of directories ending in .cache
+!?*.[Cc]ache/
+
+# Others
+ClientBin/
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.jfm
+*.pfx
+*.publishsettings
+orleans.codegen.cs
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+#*.snk
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+#bower_components/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+ServiceFabricBackup/
+*.rptproj.bak
+
+# SQL Server files
+*.mdf
+*.ldf
+*.ndf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+*.rptproj.rsuser
+*- [Bb]ackup.rdl
+*- [Bb]ackup ([0-9]).rdl
+*- [Bb]ackup ([0-9][0-9]).rdl
+
+# Microsoft Fakes
+FakesAssemblies/
+
+# GhostDoc plugin setting file
+*.GhostDoc.xml
+
+# Node.js Tools for Visual Studio
+.ntvs_analysis.dat
+node_modules/
+
+# Visual Studio 6 build log
+*.plg
+
+# Visual Studio 6 workspace options file
+*.opt
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+*.vbw
+
+# Visual Studio LightSwitch build output
+**/*.HTMLClient/GeneratedArtifacts
+**/*.DesktopClient/GeneratedArtifacts
+**/*.DesktopClient/ModelManifest.xml
+**/*.Server/GeneratedArtifacts
+**/*.Server/ModelManifest.xml
+_Pvt_Extensions
+
+# Paket dependency manager
+.paket/paket.exe
+paket-files/
+
+# FAKE - F# Make
+.fake/
+
+# CodeRush personal settings
+.cr/personal
+
+# Python Tools for Visual Studio (PTVS)
+__pycache__/
+
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+*.tss
+
+# Telerik's JustMock configuration file
+*.jmconfig
+
+# BizTalk build output
+*.btp.cs
+*.btm.cs
+*.odx.cs
+*.xsd.cs
+
+# OpenCover UI analysis results
+OpenCover/
+
+# Azure Stream Analytics local run output
+ASALocalRun/
+
+# MSBuild Binary and Structured Log
+*.binlog
+
+# NVidia Nsight GPU debugger configuration file
+*.nvuser
+
+# MFractors (Xamarin productivity tool) working folder
+.mfractor/
+
+# Local History for Visual Studio
+.localhistory/
+
+# BeatPulse healthcheck temp database
+healthchecksdb
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+MigrationBackup/
+
+# Ionide (cross platform F# VS Code tools) working folder
+.ionide/
+
+# Fody - auto-generated XML schema
+FodyWeavers.xsd
+
+# build
+build
+monotonic_align/core.c
+*.o
+*.so
+*.dll
+
+# data
+/config.json
+/*.pth
+*.wav
+/monotonic_align/monotonic_align
+/resources
+/MoeGoe.spec
+/dist/MoeGoe
+/dist
+
+.idea
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c7202d4281303c431d24ad9a0e3a24a0b37517f3
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Jingyi Li
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 5d3bb06c883a873cad0d9079597f751cda55cdc4..7e4436da629a4e5f6aeecfab4f6dddf2dc309798 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,16 @@
---
-title: GTA SOVITS
-emoji: 🐨
-colorFrom: purple
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.25.0
+title: Sovits Models
+emoji: 🎙️
+colorFrom: gray
+colorTo: pink
+sdk: gradio
+sdk_version: 3.18.0
app_file: app.py
pinned: false
-license: openrail
+license: mit
---
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+我是一个菜鸡,推理界面设计参考了大佬。
+
+**但是使用此处的模型务必注明出处(即本人:B站Cyber蝈蝈总),这是孩子唯一的愿望。**
+
diff --git a/app-slice.py b/app-slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..909fc3d594aa3f89074d687d21af90ea41034f5e
--- /dev/null
+++ b/app-slice.py
@@ -0,0 +1,135 @@
+import os
+import gradio as gr
+import edge_tts
+from pathlib import Path
+import inference.infer_tool as infer_tool
+import utils
+from inference.infer_tool import Svc
+import logging
+import webbrowser
+import argparse
+import asyncio
+import librosa
+import soundfile
+import gradio.processing_utils as gr_processing_utils
+logging.getLogger('numba').setLevel(logging.WARNING)
+logging.getLogger('markdown_it').setLevel(logging.WARNING)
+logging.getLogger('urllib3').setLevel(logging.WARNING)
+logging.getLogger('matplotlib').setLevel(logging.WARNING)
+
+limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
+
+audio_postprocess_ori = gr.Audio.postprocess
+
+def audio_postprocess(self, y):
+ data = audio_postprocess_ori(self, y)
+ if data is None:
+ return None
+ return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
+
+
+gr.Audio.postprocess = audio_postprocess
+def create_vc_fn(model, sid):
+ def vc_fn(input_audio, vc_transform, auto_f0, slice_db, noise_scale, pad_seconds, tts_text, tts_voice, tts_mode):
+ if tts_mode:
+ if len(tts_text) > 100 and limitation:
+ return "Text is too long", None
+ if tts_text is None or tts_voice is None:
+ return "You need to enter text and select a voice", None
+ asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
+ audio, sr = librosa.load("tts.mp3")
+ soundfile.write("tts.wav", audio, 24000, format="wav")
+ wav_path = "tts.wav"
+ else:
+ if input_audio is None:
+ return "You need to select an audio", None
+ raw_audio_path = f"raw/{input_audio}"
+ if "." not in raw_audio_path:
+ raw_audio_path += ".wav"
+ infer_tool.format_wav(raw_audio_path)
+ wav_path = Path(raw_audio_path).with_suffix('.wav')
+ _audio = model.slice_inference(
+ wav_path, sid, vc_transform, slice_db,
+ cluster_infer_ratio=0,
+ auto_predict_f0=auto_f0,
+ noice_scale=noise_scale,
+ pad_seconds=pad_seconds)
+ model.clear_empty()
+ return "Success", (44100, _audio)
+ return vc_fn
+
+def refresh_raw_wav():
+ return gr.Dropdown.update(choices=os.listdir("raw"))
+
+def change_to_tts_mode(tts_mode):
+ if tts_mode:
+ return gr.Audio.update(visible=False), gr.Button.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
+ else:
+ return gr.Audio.update(visible=True), gr.Button.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--device', type=str, default='cpu')
+ parser.add_argument('--api', action="store_true", default=False)
+ parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
+ parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
+ args = parser.parse_args()
+ hubert_model = utils.get_hubert_model().to(args.device)
+ models = []
+ voices = []
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
+ for r in tts_voice_list:
+ voices.append(f"{r['ShortName']}-{r['Gender']}")
+ raw = os.listdir("raw")
+ for f in os.listdir("models"):
+ name = f
+ model = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config.json", device=args.device)
+ cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
+ models.append((name, cover, create_vc_fn(model, name)))
+ with gr.Blocks() as app:
+ gr.Markdown(
+ "#
Sovits Models\n"
+ "## The input audio should be clean and pure voice without background music.\n"
+ "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=sayashi.Sovits-Umamusume)\n\n"
+ "[Open In Colab](https://colab.research.google.com/drive/1wfsBbMzmtLflOJeqc5ZnJiLY7L239hJW?usp=share_link)"
+ " without queue and length limitation.\n\n"
+ "[Original Repo](https://github.com/svc-develop-team/so-vits-svc)\n\n"
+ "Other models:\n"
+ "[rudolf](https://huggingface.co/spaces/sayashi/sovits-rudolf)\n"
+ "[teio](https://huggingface.co/spaces/sayashi/sovits-teio)\n"
+ "[goldship](https://huggingface.co/spaces/sayashi/sovits-goldship)\n"
+ "[tannhauser](https://huggingface.co/spaces/sayashi/sovits-tannhauser)\n"
+
+ )
+ with gr.Tabs():
+ for (name, cover, vc_fn) in models:
+ with gr.TabItem(name):
+ with gr.Row():
+ gr.Markdown(
+ ''
+ f'
![](file/{cover})
' if cover else ""
+ '
'
+ )
+ with gr.Row():
+ with gr.Column():
+ with gr.Row():
+ vc_input = gr.Dropdown(label="Input audio", choices=raw)
+ vc_refresh = gr.Button("🔁", variant="primary")
+ vc_transform = gr.Number(label="vc_transform", value=0)
+ slice_db = gr.Number(label="slice_db", value=-40)
+ noise_scale = gr.Number(label="noise_scale", value=0.4)
+ pad_seconds = gr.Number(label="pad_seconds", value=0.5)
+ auto_f0 = gr.Checkbox(label="auto_f0", value=False)
+ tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
+ tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
+ tts_voice = gr.Dropdown(choices=voices, visible=False)
+ vc_submit = gr.Button("Generate", variant="primary")
+ with gr.Column():
+ vc_output1 = gr.Textbox(label="Output Message")
+ vc_output2 = gr.Audio(label="Output Audio")
+ vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0, slice_db, noise_scale, pad_seconds, tts_text, tts_voice, tts_mode], [vc_output1, vc_output2])
+ vc_refresh.click(refresh_raw_wav, [], [vc_input])
+ tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, vc_refresh, tts_text, tts_voice])
+ if args.colab:
+ webbrowser.open("http://127.0.0.1:7860")
+ app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..7574522e730f314581476c53cd68f16bc509fab1
--- /dev/null
+++ b/app.py
@@ -0,0 +1,141 @@
+import os
+import io
+import gradio as gr
+import librosa
+import numpy as np
+import utils
+from inference.infer_tool import Svc
+import logging
+import soundfile
+import asyncio
+import argparse
+import edge_tts
+import gradio.processing_utils as gr_processing_utils
+logging.getLogger('numba').setLevel(logging.WARNING)
+logging.getLogger('markdown_it').setLevel(logging.WARNING)
+logging.getLogger('urllib3').setLevel(logging.WARNING)
+logging.getLogger('matplotlib').setLevel(logging.WARNING)
+
+limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
+
+audio_postprocess_ori = gr.Audio.postprocess
+
+def audio_postprocess(self, y):
+ data = audio_postprocess_ori(self, y)
+ if data is None:
+ return None
+ return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
+
+
+gr.Audio.postprocess = audio_postprocess
+def create_vc_fn(model, sid):
+ def vc_fn(input_audio, vc_transform, auto_f0, tts_text, tts_voice, tts_mode):
+ if tts_mode:
+ if len(tts_text) > 100 and limitation:
+ return "Text is too long", None
+ if tts_text is None or tts_voice is None:
+ return "You need to enter text and select a voice", None
+ asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
+ audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
+ raw_path = io.BytesIO()
+ soundfile.write(raw_path, audio, 16000, format="wav")
+ raw_path.seek(0)
+ out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
+ auto_predict_f0=auto_f0,
+ )
+ return "Success", (44100, out_audio.cpu().numpy())
+ if input_audio is None:
+ return "You need to upload an audio", None
+ sampling_rate, audio = input_audio
+ duration = audio.shape[0] / sampling_rate
+ if duration > 20 and limitation:
+ return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
+ if len(audio.shape) > 1:
+ audio = librosa.to_mono(audio.transpose(1, 0))
+ if sampling_rate != 16000:
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
+ raw_path = io.BytesIO()
+ soundfile.write(raw_path, audio, 16000, format="wav")
+ raw_path.seek(0)
+ out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
+ auto_predict_f0=auto_f0,
+ )
+ return "Success", (44100, out_audio.cpu().numpy())
+ return vc_fn
+
+def change_to_tts_mode(tts_mode):
+ if tts_mode:
+ return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True), gr.Checkbox.update(value=True)
+ else:
+ return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False), gr.Checkbox.update(value=False)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--device', type=str, default='cpu')
+ parser.add_argument('--api', action="store_true", default=False)
+ parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
+ args = parser.parse_args()
+ hubert_model = utils.get_hubert_model().to(args.device)
+ models = []
+ # others = {
+ # "rudolf": "https://huggingface.co/spaces/sayashi/sovits-rudolf",
+ # "teio": "https://huggingface.co/spaces/sayashi/sovits-teio",
+ # "goldship": "https://huggingface.co/spaces/sayashi/sovits-goldship",
+ # "tannhauser": "https://huggingface.co/spaces/sayashi/sovits-tannhauser"
+ # }
+ voices = []
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
+ for r in tts_voice_list:
+ voices.append(f"{r['ShortName']}-{r['Gender']}")
+ for f in os.listdir("models"):
+ name = f
+ model = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config_{f}.json", device=args.device)
+ cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
+ models.append((name, cover, create_vc_fn(model, name)))
+ with gr.Blocks() as app:
+ gr.Markdown(
+ "# Sovits Models\n"
+ "## The input audio should be clean and pure voice without background music.\n"
+ "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=sayashi.Sovits-Umamusume)\n\n"
+ "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wfsBbMzmtLflOJeqc5ZnJiLY7L239hJW?usp=share_link)\n\n"
+ "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/sayashi/sovits-models?duplicate=true)\n\n"
+ "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/svc-develop-team/so-vits-svc)"
+
+ )
+ with gr.Tabs():
+ for (name, cover, vc_fn) in models:
+ with gr.TabItem(name):
+ with gr.Row():
+ gr.Markdown(
+ ''
+ f'
![](file/{cover})
' if cover else ""
+ '
'
+ )
+ with gr.Row():
+ with gr.Column():
+ vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
+ vc_transform = gr.Number(label="vc_transform", value=0)
+ auto_f0 = gr.Checkbox(label="auto_f0", value=False)
+ tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
+ tts_text = gr.Textbox(visible=False, label="TTS text (100 words limitation)" if limitation else "TTS text")
+ tts_voice = gr.Dropdown(choices=voices, visible=False)
+ vc_submit = gr.Button("Generate", variant="primary")
+ with gr.Column():
+ vc_output1 = gr.Textbox(label="Output Message")
+ vc_output2 = gr.Audio(label="Output Audio")
+ vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0, tts_text, tts_voice, tts_mode], [vc_output1, vc_output2])
+ tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice, auto_f0])
+ for category, link in others.items():
+ with gr.TabItem(category):
+ gr.Markdown(
+ f'''
+
+ Click to Go
+
+
+
+ '''
+ )
+ app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
diff --git a/cluster/__init__.py b/cluster/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1b9bde04e73e9218a5d534227caa4c25332f424
--- /dev/null
+++ b/cluster/__init__.py
@@ -0,0 +1,29 @@
+import numpy as np
+import torch
+from sklearn.cluster import KMeans
+
+def get_cluster_model(ckpt_path):
+ checkpoint = torch.load(ckpt_path)
+ kmeans_dict = {}
+ for spk, ckpt in checkpoint.items():
+ km = KMeans(ckpt["n_features_in_"])
+ km.__dict__["n_features_in_"] = ckpt["n_features_in_"]
+ km.__dict__["_n_threads"] = ckpt["_n_threads"]
+ km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"]
+ kmeans_dict[spk] = km
+ return kmeans_dict
+
+def get_cluster_result(model, x, speaker):
+ """
+ x: np.array [t, 256]
+ return cluster class result
+ """
+ return model[speaker].predict(x)
+
+def get_cluster_center_result(model, x,speaker):
+ """x: np.array [t, 256]"""
+ predict = model[speaker].predict(x)
+ return model[speaker].cluster_centers_[predict]
+
+def get_center(model, x,speaker):
+ return model[speaker].cluster_centers_[x]
diff --git a/cluster/train_cluster.py b/cluster/train_cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ac025d400414226e66849407f477ae786c3d5d3
--- /dev/null
+++ b/cluster/train_cluster.py
@@ -0,0 +1,89 @@
+import os
+from glob import glob
+from pathlib import Path
+import torch
+import logging
+import argparse
+import torch
+import numpy as np
+from sklearn.cluster import KMeans, MiniBatchKMeans
+import tqdm
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+import time
+import random
+
+def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False):
+
+ logger.info(f"Loading features from {in_dir}")
+ features = []
+ nums = 0
+ for path in tqdm.tqdm(in_dir.glob("*.soft.pt")):
+ features.append(torch.load(path).squeeze(0).numpy().T)
+ # print(features[-1].shape)
+ features = np.concatenate(features, axis=0)
+ print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype)
+ features = features.astype(np.float32)
+ logger.info(f"Clustering features of shape: {features.shape}")
+ t = time.time()
+ if use_minibatch:
+ kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features)
+ else:
+ kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features)
+ print(time.time()-t, "s")
+
+ x = {
+ "n_features_in_": kmeans.n_features_in_,
+ "_n_threads": kmeans._n_threads,
+ "cluster_centers_": kmeans.cluster_centers_,
+ }
+ print("end")
+
+ return x
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--dataset', type=Path, default="./dataset/44k",
+ help='path of training data directory')
+ parser.add_argument('--output', type=Path, default="logs/44k",
+ help='path of model output directory')
+
+ args = parser.parse_args()
+
+ checkpoint_dir = args.output
+ dataset = args.dataset
+ n_clusters = 10000
+
+ ckpt = {}
+ for spk in os.listdir(dataset):
+ if os.path.isdir(dataset/spk):
+ print(f"train kmeans for {spk}...")
+ in_dir = dataset/spk
+ x = train_cluster(in_dir, n_clusters, verbose=False)
+ ckpt[spk] = x
+
+ checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt"
+ checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
+ torch.save(
+ ckpt,
+ checkpoint_path,
+ )
+
+
+ # import cluster
+ # for spk in tqdm.tqdm(os.listdir("dataset")):
+ # if os.path.isdir(f"dataset/{spk}"):
+ # print(f"start kmeans inference for {spk}...")
+ # for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)):
+ # mel_path = feature_path.replace(".discrete.npy",".mel.npy")
+ # mel_spectrogram = np.load(mel_path)
+ # feature_len = mel_spectrogram.shape[-1]
+ # c = np.load(feature_path)
+ # c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy()
+ # feature = c.T
+ # feature_class = cluster.get_cluster_result(feature, spk)
+ # np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class)
+
+
diff --git a/configs/config.json b/configs/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data_utils.py b/data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c76fd1c3a45b8304d916161718c7763874f3e35
--- /dev/null
+++ b/data_utils.py
@@ -0,0 +1,155 @@
+import time
+import os
+import random
+import numpy as np
+import torch
+import torch.utils.data
+
+import modules.commons as commons
+import utils
+from modules.mel_processing import spectrogram_torch, spec_to_mel_torch
+from utils import load_wav_to_torch, load_filepaths_and_text
+
+# import h5py
+
+
+"""Multi speaker version"""
+
+
+class TextAudioSpeakerLoader(torch.utils.data.Dataset):
+ """
+ 1) loads audio, speaker_id, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
+
+ def __init__(self, audiopaths, hparams, all_in_mem: bool = False):
+ self.audiopaths = load_filepaths_and_text(audiopaths)
+ self.max_wav_value = hparams.data.max_wav_value
+ self.sampling_rate = hparams.data.sampling_rate
+ self.filter_length = hparams.data.filter_length
+ self.hop_length = hparams.data.hop_length
+ self.win_length = hparams.data.win_length
+ self.sampling_rate = hparams.data.sampling_rate
+ self.use_sr = hparams.train.use_sr
+ self.spec_len = hparams.train.max_speclen
+ self.spk_map = hparams.spk
+
+ random.seed(1234)
+ random.shuffle(self.audiopaths)
+
+ self.all_in_mem = all_in_mem
+ if self.all_in_mem:
+ self.cache = [self.get_audio(p[0]) for p in self.audiopaths]
+
+ def get_audio(self, filename):
+ filename = filename.replace("\\", "/")
+ audio, sampling_rate = load_wav_to_torch(filename)
+ if sampling_rate != self.sampling_rate:
+ raise ValueError("{} SR doesn't match target {} SR".format(
+ sampling_rate, self.sampling_rate))
+ audio_norm = audio / self.max_wav_value
+ audio_norm = audio_norm.unsqueeze(0)
+ spec_filename = filename.replace(".wav", ".spec.pt")
+
+ # Ideally, all data generated after Mar 25 should have .spec.pt
+ if os.path.exists(spec_filename):
+ spec = torch.load(spec_filename)
+ else:
+ spec = spectrogram_torch(audio_norm, self.filter_length,
+ self.sampling_rate, self.hop_length, self.win_length,
+ center=False)
+ spec = torch.squeeze(spec, 0)
+ torch.save(spec, spec_filename)
+
+ spk = filename.split("/")[-2]
+ spk = torch.LongTensor([self.spk_map[spk]])
+
+ f0 = np.load(filename + ".f0.npy")
+ f0, uv = utils.interpolate_f0(f0)
+ f0 = torch.FloatTensor(f0)
+ uv = torch.FloatTensor(uv)
+
+ c = torch.load(filename+ ".soft.pt")
+ c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0])
+
+
+ lmin = min(c.size(-1), spec.size(-1))
+ assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename)
+ assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length
+ spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin]
+ audio_norm = audio_norm[:, :lmin * self.hop_length]
+
+ return c, f0, spec, audio_norm, spk, uv
+
+ def random_slice(self, c, f0, spec, audio_norm, spk, uv):
+ # if spec.shape[1] < 30:
+ # print("skip too short audio:", filename)
+ # return None
+ if spec.shape[1] > 800:
+ start = random.randint(0, spec.shape[1]-800)
+ end = start + 790
+ spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end]
+ audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length]
+
+ return c, f0, spec, audio_norm, spk, uv
+
+ def __getitem__(self, index):
+ if self.all_in_mem:
+ return self.random_slice(*self.cache[index])
+ else:
+ return self.random_slice(*self.get_audio(self.audiopaths[index][0]))
+
+ def __len__(self):
+ return len(self.audiopaths)
+
+
+class TextAudioCollate:
+
+ def __call__(self, batch):
+ batch = [b for b in batch if b is not None]
+
+ input_lengths, ids_sorted_decreasing = torch.sort(
+ torch.LongTensor([x[0].shape[1] for x in batch]),
+ dim=0, descending=True)
+
+ max_c_len = max([x[0].size(1) for x in batch])
+ max_wav_len = max([x[3].size(1) for x in batch])
+
+ lengths = torch.LongTensor(len(batch))
+
+ c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len)
+ f0_padded = torch.FloatTensor(len(batch), max_c_len)
+ spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len)
+ wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
+ spkids = torch.LongTensor(len(batch), 1)
+ uv_padded = torch.FloatTensor(len(batch), max_c_len)
+
+ c_padded.zero_()
+ spec_padded.zero_()
+ f0_padded.zero_()
+ wav_padded.zero_()
+ uv_padded.zero_()
+
+ for i in range(len(ids_sorted_decreasing)):
+ row = batch[ids_sorted_decreasing[i]]
+
+ c = row[0]
+ c_padded[i, :, :c.size(1)] = c
+ lengths[i] = c.size(1)
+
+ f0 = row[1]
+ f0_padded[i, :f0.size(0)] = f0
+
+ spec = row[2]
+ spec_padded[i, :, :spec.size(1)] = spec
+
+ wav = row[3]
+ wav_padded[i, :, :wav.size(1)] = wav
+
+ spkids[i, 0] = row[4]
+
+ uv = row[5]
+ uv_padded[i, :uv.size(0)] = uv
+
+ return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded
diff --git a/hubert/__init__.py b/hubert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hubert/checkpoint_best_legacy_500.pt b/hubert/checkpoint_best_legacy_500.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9a2f13fb9c7047dff746e2d5d88c0d0a5aecf643
--- /dev/null
+++ b/hubert/checkpoint_best_legacy_500.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60d936ec5a566776fc392e69ad8b630d14eb588111233fe313436e200a7b187b
+size 1330114945
diff --git a/hubert/hubert_model.py b/hubert/hubert_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fb642d89b07ca60792debab18e3454f52d8f357
--- /dev/null
+++ b/hubert/hubert_model.py
@@ -0,0 +1,222 @@
+import copy
+import random
+from typing import Optional, Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as t_func
+from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
+
+
+class Hubert(nn.Module):
+ def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
+ super().__init__()
+ self._mask = mask
+ self.feature_extractor = FeatureExtractor()
+ self.feature_projection = FeatureProjection()
+ self.positional_embedding = PositionalConvEmbedding()
+ self.norm = nn.LayerNorm(768)
+ self.dropout = nn.Dropout(0.1)
+ self.encoder = TransformerEncoder(
+ nn.TransformerEncoderLayer(
+ 768, 12, 3072, activation="gelu", batch_first=True
+ ),
+ 12,
+ )
+ self.proj = nn.Linear(768, 256)
+
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
+ self.label_embedding = nn.Embedding(num_label_embeddings, 256)
+
+ def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ mask = None
+ if self.training and self._mask:
+ mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
+ x[mask] = self.masked_spec_embed.to(x.dtype)
+ return x, mask
+
+ def encode(
+ self, x: torch.Tensor, layer: Optional[int] = None
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ x = self.feature_extractor(x)
+ x = self.feature_projection(x.transpose(1, 2))
+ x, mask = self.mask(x)
+ x = x + self.positional_embedding(x)
+ x = self.dropout(self.norm(x))
+ x = self.encoder(x, output_layer=layer)
+ return x, mask
+
+ def logits(self, x: torch.Tensor) -> torch.Tensor:
+ logits = torch.cosine_similarity(
+ x.unsqueeze(2),
+ self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
+ dim=-1,
+ )
+ return logits / 0.1
+
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ x, mask = self.encode(x)
+ x = self.proj(x)
+ logits = self.logits(x)
+ return logits, mask
+
+
+class HubertSoft(Hubert):
+ def __init__(self):
+ super().__init__()
+
+ @torch.inference_mode()
+ def units(self, wav: torch.Tensor) -> torch.Tensor:
+ wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
+ x, _ = self.encode(wav)
+ return self.proj(x)
+
+
+class FeatureExtractor(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
+ self.norm0 = nn.GroupNorm(512, 512)
+ self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
+ self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = t_func.gelu(self.norm0(self.conv0(x)))
+ x = t_func.gelu(self.conv1(x))
+ x = t_func.gelu(self.conv2(x))
+ x = t_func.gelu(self.conv3(x))
+ x = t_func.gelu(self.conv4(x))
+ x = t_func.gelu(self.conv5(x))
+ x = t_func.gelu(self.conv6(x))
+ return x
+
+
+class FeatureProjection(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.norm = nn.LayerNorm(512)
+ self.projection = nn.Linear(512, 768)
+ self.dropout = nn.Dropout(0.1)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.norm(x)
+ x = self.projection(x)
+ x = self.dropout(x)
+ return x
+
+
+class PositionalConvEmbedding(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ 768,
+ 768,
+ kernel_size=128,
+ padding=128 // 2,
+ groups=16,
+ )
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.conv(x.transpose(1, 2))
+ x = t_func.gelu(x[:, :, :-1])
+ return x.transpose(1, 2)
+
+
+class TransformerEncoder(nn.Module):
+ def __init__(
+ self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
+ ) -> None:
+ super(TransformerEncoder, self).__init__()
+ self.layers = nn.ModuleList(
+ [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
+ )
+ self.num_layers = num_layers
+
+ def forward(
+ self,
+ src: torch.Tensor,
+ mask: torch.Tensor = None,
+ src_key_padding_mask: torch.Tensor = None,
+ output_layer: Optional[int] = None,
+ ) -> torch.Tensor:
+ output = src
+ for layer in self.layers[:output_layer]:
+ output = layer(
+ output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
+ )
+ return output
+
+
+def _compute_mask(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ device: torch.device,
+ min_masks: int = 0,
+) -> torch.Tensor:
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
+ )
+
+ # compute number of masked spans in batch
+ num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
+ num_masked_spans = max(num_masked_spans, min_masks)
+
+ # make sure num masked indices <= sequence_length
+ if num_masked_spans * mask_length > sequence_length:
+ num_masked_spans = sequence_length // mask_length
+
+ # SpecAugment mask to fill
+ mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
+
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
+ uniform_dist = torch.ones(
+ (batch_size, sequence_length - (mask_length - 1)), device=device
+ )
+
+ # get random indices to mask
+ mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
+
+ # expand masked indices to masked spans
+ mask_indices = (
+ mask_indices.unsqueeze(dim=-1)
+ .expand((batch_size, num_masked_spans, mask_length))
+ .reshape(batch_size, num_masked_spans * mask_length)
+ )
+ offsets = (
+ torch.arange(mask_length, device=device)[None, None, :]
+ .expand((batch_size, num_masked_spans, mask_length))
+ .reshape(batch_size, num_masked_spans * mask_length)
+ )
+ mask_idxs = mask_indices + offsets
+
+ # scatter indices to mask
+ mask = mask.scatter(1, mask_idxs, True)
+
+ return mask
+
+
+def hubert_soft(
+ path: str,
+) -> HubertSoft:
+ r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
+ Args:
+ path (str): path of a pretrained model
+ """
+ hubert = HubertSoft()
+ checkpoint = torch.load(path)
+ consume_prefix_in_state_dict_if_present(checkpoint, "module.")
+ hubert.load_state_dict(checkpoint)
+ hubert.eval()
+ return hubert
diff --git a/hubert/hubert_model_onnx.py b/hubert/hubert_model_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..d18f3c2a0fc29592a573a9780308d38f059640b9
--- /dev/null
+++ b/hubert/hubert_model_onnx.py
@@ -0,0 +1,217 @@
+import copy
+import random
+from typing import Optional, Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as t_func
+from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
+
+
+class Hubert(nn.Module):
+ def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
+ super().__init__()
+ self._mask = mask
+ self.feature_extractor = FeatureExtractor()
+ self.feature_projection = FeatureProjection()
+ self.positional_embedding = PositionalConvEmbedding()
+ self.norm = nn.LayerNorm(768)
+ self.dropout = nn.Dropout(0.1)
+ self.encoder = TransformerEncoder(
+ nn.TransformerEncoderLayer(
+ 768, 12, 3072, activation="gelu", batch_first=True
+ ),
+ 12,
+ )
+ self.proj = nn.Linear(768, 256)
+
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
+ self.label_embedding = nn.Embedding(num_label_embeddings, 256)
+
+ def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ mask = None
+ if self.training and self._mask:
+ mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
+ x[mask] = self.masked_spec_embed.to(x.dtype)
+ return x, mask
+
+ def encode(
+ self, x: torch.Tensor, layer: Optional[int] = None
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ x = self.feature_extractor(x)
+ x = self.feature_projection(x.transpose(1, 2))
+ x, mask = self.mask(x)
+ x = x + self.positional_embedding(x)
+ x = self.dropout(self.norm(x))
+ x = self.encoder(x, output_layer=layer)
+ return x, mask
+
+ def logits(self, x: torch.Tensor) -> torch.Tensor:
+ logits = torch.cosine_similarity(
+ x.unsqueeze(2),
+ self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
+ dim=-1,
+ )
+ return logits / 0.1
+
+
+class HubertSoft(Hubert):
+ def __init__(self):
+ super().__init__()
+
+ def units(self, wav: torch.Tensor) -> torch.Tensor:
+ wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
+ x, _ = self.encode(wav)
+ return self.proj(x)
+
+ def forward(self, x):
+ return self.units(x)
+
+class FeatureExtractor(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
+ self.norm0 = nn.GroupNorm(512, 512)
+ self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
+ self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
+ self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = t_func.gelu(self.norm0(self.conv0(x)))
+ x = t_func.gelu(self.conv1(x))
+ x = t_func.gelu(self.conv2(x))
+ x = t_func.gelu(self.conv3(x))
+ x = t_func.gelu(self.conv4(x))
+ x = t_func.gelu(self.conv5(x))
+ x = t_func.gelu(self.conv6(x))
+ return x
+
+
+class FeatureProjection(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.norm = nn.LayerNorm(512)
+ self.projection = nn.Linear(512, 768)
+ self.dropout = nn.Dropout(0.1)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.norm(x)
+ x = self.projection(x)
+ x = self.dropout(x)
+ return x
+
+
+class PositionalConvEmbedding(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ 768,
+ 768,
+ kernel_size=128,
+ padding=128 // 2,
+ groups=16,
+ )
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.conv(x.transpose(1, 2))
+ x = t_func.gelu(x[:, :, :-1])
+ return x.transpose(1, 2)
+
+
+class TransformerEncoder(nn.Module):
+ def __init__(
+ self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
+ ) -> None:
+ super(TransformerEncoder, self).__init__()
+ self.layers = nn.ModuleList(
+ [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
+ )
+ self.num_layers = num_layers
+
+ def forward(
+ self,
+ src: torch.Tensor,
+ mask: torch.Tensor = None,
+ src_key_padding_mask: torch.Tensor = None,
+ output_layer: Optional[int] = None,
+ ) -> torch.Tensor:
+ output = src
+ for layer in self.layers[:output_layer]:
+ output = layer(
+ output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
+ )
+ return output
+
+
+def _compute_mask(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ device: torch.device,
+ min_masks: int = 0,
+) -> torch.Tensor:
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
+ )
+
+ # compute number of masked spans in batch
+ num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
+ num_masked_spans = max(num_masked_spans, min_masks)
+
+ # make sure num masked indices <= sequence_length
+ if num_masked_spans * mask_length > sequence_length:
+ num_masked_spans = sequence_length // mask_length
+
+ # SpecAugment mask to fill
+ mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
+
+ # uniform distribution to sample from, make sure that offset samples are < sequence_length
+ uniform_dist = torch.ones(
+ (batch_size, sequence_length - (mask_length - 1)), device=device
+ )
+
+ # get random indices to mask
+ mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
+
+ # expand masked indices to masked spans
+ mask_indices = (
+ mask_indices.unsqueeze(dim=-1)
+ .expand((batch_size, num_masked_spans, mask_length))
+ .reshape(batch_size, num_masked_spans * mask_length)
+ )
+ offsets = (
+ torch.arange(mask_length, device=device)[None, None, :]
+ .expand((batch_size, num_masked_spans, mask_length))
+ .reshape(batch_size, num_masked_spans * mask_length)
+ )
+ mask_idxs = mask_indices + offsets
+
+ # scatter indices to mask
+ mask = mask.scatter(1, mask_idxs, True)
+
+ return mask
+
+
+def hubert_soft(
+ path: str,
+) -> HubertSoft:
+ r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
+ Args:
+ path (str): path of a pretrained model
+ """
+ hubert = HubertSoft()
+ checkpoint = torch.load(path)
+ consume_prefix_in_state_dict_if_present(checkpoint, "module.")
+ hubert.load_state_dict(checkpoint)
+ hubert.eval()
+ return hubert
diff --git a/inference/__init__.py b/inference/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/inference/chunks_temp.json b/inference/chunks_temp.json
new file mode 100644
index 0000000000000000000000000000000000000000..a286da339c100056bd2f7abc8fa49e05ac2fa68a
--- /dev/null
+++ b/inference/chunks_temp.json
@@ -0,0 +1 @@
+{"info": "temp_dict"}
\ No newline at end of file
diff --git a/inference/infer_tool.py b/inference/infer_tool.py
new file mode 100644
index 0000000000000000000000000000000000000000..fed81f5abb6f2f525af616171ee9838ae341cb5f
--- /dev/null
+++ b/inference/infer_tool.py
@@ -0,0 +1,324 @@
+import hashlib
+import io
+import json
+import logging
+import os
+import time
+from pathlib import Path
+from inference import slicer
+
+import librosa
+import numpy as np
+# import onnxruntime
+import parselmouth
+import soundfile
+import torch
+import torchaudio
+
+import cluster
+from hubert import hubert_model
+import utils
+from models import SynthesizerTrn
+
+logging.getLogger('matplotlib').setLevel(logging.WARNING)
+
+
+def read_temp(file_name):
+ if not os.path.exists(file_name):
+ with open(file_name, "w") as f:
+ f.write(json.dumps({"info": "temp_dict"}))
+ return {}
+ else:
+ try:
+ with open(file_name, "r") as f:
+ data = f.read()
+ data_dict = json.loads(data)
+ if os.path.getsize(file_name) > 50 * 1024 * 1024:
+ f_name = file_name.replace("\\", "/").split("/")[-1]
+ print(f"clean {f_name}")
+ for wav_hash in list(data_dict.keys()):
+ if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600:
+ del data_dict[wav_hash]
+ except Exception as e:
+ print(e)
+ print(f"{file_name} error,auto rebuild file")
+ data_dict = {"info": "temp_dict"}
+ return data_dict
+
+
+def write_temp(file_name, data):
+ with open(file_name, "w") as f:
+ f.write(json.dumps(data))
+
+
+def timeit(func):
+ def run(*args, **kwargs):
+ t = time.time()
+ res = func(*args, **kwargs)
+ print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t))
+ return res
+
+ return run
+
+
+def format_wav(audio_path):
+ if Path(audio_path).suffix == '.wav':
+ return
+ raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None)
+ soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate)
+
+
+def get_end_file(dir_path, end):
+ file_lists = []
+ for root, dirs, files in os.walk(dir_path):
+ files = [f for f in files if f[0] != '.']
+ dirs[:] = [d for d in dirs if d[0] != '.']
+ for f_file in files:
+ if f_file.endswith(end):
+ file_lists.append(os.path.join(root, f_file).replace("\\", "/"))
+ return file_lists
+
+
+def get_md5(content):
+ return hashlib.new("md5", content).hexdigest()
+
+def fill_a_to_b(a, b):
+ if len(a) < len(b):
+ for _ in range(0, len(b) - len(a)):
+ a.append(a[0])
+
+def mkdir(paths: list):
+ for path in paths:
+ if not os.path.exists(path):
+ os.mkdir(path)
+
+def pad_array(arr, target_length):
+ current_length = arr.shape[0]
+ if current_length >= target_length:
+ return arr
+ else:
+ pad_width = target_length - current_length
+ pad_left = pad_width // 2
+ pad_right = pad_width - pad_left
+ padded_arr = np.pad(arr, (pad_left, pad_right), 'constant', constant_values=(0, 0))
+ return padded_arr
+
+def split_list_by_n(list_collection, n, pre=0):
+ for i in range(0, len(list_collection), n):
+ yield list_collection[i-pre if i-pre>=0 else i: i + n]
+
+
+class F0FilterException(Exception):
+ pass
+
+class Svc(object):
+ def __init__(self, net_g_path, config_path,
+ device=None,
+ cluster_model_path="logs/44k/kmeans_10000.pt"):
+ self.net_g_path = net_g_path
+ if device is None:
+ self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ else:
+ self.dev = torch.device(device)
+ self.net_g_ms = None
+ self.hps_ms = utils.get_hparams_from_file(config_path)
+ self.target_sample = self.hps_ms.data.sampling_rate
+ self.hop_size = self.hps_ms.data.hop_length
+ self.spk2id = self.hps_ms.spk
+ # 加载hubert
+ self.hubert_model = utils.get_hubert_model().to(self.dev)
+ self.load_model()
+ if os.path.exists(cluster_model_path):
+ self.cluster_model = cluster.get_cluster_model(cluster_model_path)
+
+ def load_model(self):
+ # 获取模型配置
+ self.net_g_ms = SynthesizerTrn(
+ self.hps_ms.data.filter_length // 2 + 1,
+ self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
+ **self.hps_ms.model)
+ _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None)
+ if "half" in self.net_g_path and torch.cuda.is_available():
+ _ = self.net_g_ms.half().eval().to(self.dev)
+ else:
+ _ = self.net_g_ms.eval().to(self.dev)
+
+
+
+ def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker, f0_filter ,F0_mean_pooling):
+
+ wav, sr = librosa.load(in_path, sr=self.target_sample)
+
+ if F0_mean_pooling == True:
+ f0, uv = utils.compute_f0_uv_torchcrepe(torch.FloatTensor(wav), sampling_rate=self.target_sample, hop_length=self.hop_size,device=self.dev)
+ if f0_filter and sum(f0) == 0:
+ raise F0FilterException("未检测到人声")
+ f0 = torch.FloatTensor(list(f0))
+ uv = torch.FloatTensor(list(uv))
+ if F0_mean_pooling == False:
+ f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
+ if f0_filter and sum(f0) == 0:
+ raise F0FilterException("未检测到人声")
+ f0, uv = utils.interpolate_f0(f0)
+ f0 = torch.FloatTensor(f0)
+ uv = torch.FloatTensor(uv)
+
+ f0 = f0 * 2 ** (tran / 12)
+ f0 = f0.unsqueeze(0).to(self.dev)
+ uv = uv.unsqueeze(0).to(self.dev)
+
+ wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
+ wav16k = torch.from_numpy(wav16k).to(self.dev)
+ c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
+ c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
+
+ if cluster_infer_ratio !=0:
+ cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
+ cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
+ c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
+
+ c = c.unsqueeze(0)
+ return c, f0, uv
+
+ def infer(self, speaker, tran, raw_path,
+ cluster_infer_ratio=0,
+ auto_predict_f0=False,
+ noice_scale=0.4,
+ f0_filter=False,
+ F0_mean_pooling=False
+ ):
+
+ speaker_id = self.spk2id.__dict__.get(speaker)
+ if not speaker_id and type(speaker) is int:
+ if len(self.spk2id.__dict__) >= speaker:
+ speaker_id = speaker
+ sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
+ c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker, f0_filter,F0_mean_pooling)
+ if "half" in self.net_g_path and torch.cuda.is_available():
+ c = c.half()
+ with torch.no_grad():
+ start = time.time()
+ audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float()
+ use_time = time.time() - start
+ print("vits use time:{}".format(use_time))
+ return audio, audio.shape[-1]
+
+ def clear_empty(self):
+ # 清理显存
+ torch.cuda.empty_cache()
+
+ def slice_inference(self,
+ raw_audio_path,
+ spk,
+ tran,
+ slice_db,
+ cluster_infer_ratio,
+ auto_predict_f0,
+ noice_scale,
+ pad_seconds=0.5,
+ clip_seconds=0,
+ lg_num=0,
+ lgr_num =0.75,
+ F0_mean_pooling = False
+ ):
+ wav_path = raw_audio_path
+ chunks = slicer.cut(wav_path, db_thresh=slice_db)
+ audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
+ per_size = int(clip_seconds*audio_sr)
+ lg_size = int(lg_num*audio_sr)
+ lg_size_r = int(lg_size*lgr_num)
+ lg_size_c_l = (lg_size-lg_size_r)//2
+ lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
+ lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
+
+ audio = []
+ for (slice_tag, data) in audio_data:
+ print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
+ # padd
+ length = int(np.ceil(len(data) / audio_sr * self.target_sample))
+ if slice_tag:
+ print('jump empty segment')
+ _audio = np.zeros(length)
+ audio.extend(list(pad_array(_audio, length)))
+ continue
+ if per_size != 0:
+ datas = split_list_by_n(data, per_size,lg_size)
+ else:
+ datas = [data]
+ for k,dat in enumerate(datas):
+ per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds!=0 else length
+ if clip_seconds!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
+ # padd
+ pad_len = int(audio_sr * pad_seconds)
+ dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
+ raw_path = io.BytesIO()
+ soundfile.write(raw_path, dat, audio_sr, format="wav")
+ raw_path.seek(0)
+ out_audio, out_sr = self.infer(spk, tran, raw_path,
+ cluster_infer_ratio=cluster_infer_ratio,
+ auto_predict_f0=auto_predict_f0,
+ noice_scale=noice_scale,
+ F0_mean_pooling = F0_mean_pooling
+ )
+ _audio = out_audio.cpu().numpy()
+ pad_len = int(self.target_sample * pad_seconds)
+ _audio = _audio[pad_len:-pad_len]
+ _audio = pad_array(_audio, per_length)
+ if lg_size!=0 and k!=0:
+ lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:]
+ lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr_num != 1 else _audio[0:lg_size]
+ lg_pre = lg1*(1-lg)+lg2*lg
+ audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size]
+ audio.extend(lg_pre)
+ _audio = _audio[lg_size_c_l+lg_size_r:] if lgr_num != 1 else _audio[lg_size:]
+ audio.extend(list(_audio))
+ return np.array(audio)
+
+class RealTimeVC:
+ def __init__(self):
+ self.last_chunk = None
+ self.last_o = None
+ self.chunk_len = 16000 # 区块长度
+ self.pre_len = 3840 # 交叉淡化长度,640的倍数
+
+ """输入输出都是1维numpy 音频波形数组"""
+
+ def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path,
+ cluster_infer_ratio=0,
+ auto_predict_f0=False,
+ noice_scale=0.4,
+ f0_filter=False):
+
+ import maad
+ audio, sr = torchaudio.load(input_wav_path)
+ audio = audio.cpu().numpy()[0]
+ temp_wav = io.BytesIO()
+ if self.last_chunk is None:
+ input_wav_path.seek(0)
+
+ audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path,
+ cluster_infer_ratio=cluster_infer_ratio,
+ auto_predict_f0=auto_predict_f0,
+ noice_scale=noice_scale,
+ f0_filter=f0_filter)
+
+ audio = audio.cpu().numpy()
+ self.last_chunk = audio[-self.pre_len:]
+ self.last_o = audio
+ return audio[-self.chunk_len:]
+ else:
+ audio = np.concatenate([self.last_chunk, audio])
+ soundfile.write(temp_wav, audio, sr, format="wav")
+ temp_wav.seek(0)
+
+ audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav,
+ cluster_infer_ratio=cluster_infer_ratio,
+ auto_predict_f0=auto_predict_f0,
+ noice_scale=noice_scale,
+ f0_filter=f0_filter)
+
+ audio = audio.cpu().numpy()
+ ret = maad.util.crossfade(self.last_o, audio, self.pre_len)
+ self.last_chunk = audio[-self.pre_len:]
+ self.last_o = audio
+ return ret[self.chunk_len:2 * self.chunk_len]
diff --git a/inference/infer_tool_grad.py b/inference/infer_tool_grad.py
new file mode 100644
index 0000000000000000000000000000000000000000..b75af49c08e2e724839828bc419792ed580809bb
--- /dev/null
+++ b/inference/infer_tool_grad.py
@@ -0,0 +1,160 @@
+import hashlib
+import json
+import logging
+import os
+import time
+from pathlib import Path
+import io
+import librosa
+import maad
+import numpy as np
+from inference import slicer
+import parselmouth
+import soundfile
+import torch
+import torchaudio
+
+from hubert import hubert_model
+import utils
+from models import SynthesizerTrn
+logging.getLogger('numba').setLevel(logging.WARNING)
+logging.getLogger('matplotlib').setLevel(logging.WARNING)
+
+def resize2d_f0(x, target_len):
+ source = np.array(x)
+ source[source < 0.001] = np.nan
+ target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)),
+ source)
+ res = np.nan_to_num(target)
+ return res
+
+def get_f0(x, p_len,f0_up_key=0):
+
+ time_step = 160 / 16000 * 1000
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+
+ f0 = parselmouth.Sound(x, 16000).to_pitch_ac(
+ time_step=time_step / 1000, voicing_threshold=0.6,
+ pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
+
+ pad_size=(p_len - len(f0) + 1) // 2
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
+
+ f0 *= pow(2, f0_up_key / 12)
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+ return f0_coarse, f0
+
+def clean_pitch(input_pitch):
+ num_nan = np.sum(input_pitch == 1)
+ if num_nan / len(input_pitch) > 0.9:
+ input_pitch[input_pitch != 1] = 1
+ return input_pitch
+
+
+def plt_pitch(input_pitch):
+ input_pitch = input_pitch.astype(float)
+ input_pitch[input_pitch == 1] = np.nan
+ return input_pitch
+
+
+def f0_to_pitch(ff):
+ f0_pitch = 69 + 12 * np.log2(ff / 440)
+ return f0_pitch
+
+
+def fill_a_to_b(a, b):
+ if len(a) < len(b):
+ for _ in range(0, len(b) - len(a)):
+ a.append(a[0])
+
+
+def mkdir(paths: list):
+ for path in paths:
+ if not os.path.exists(path):
+ os.mkdir(path)
+
+
+class VitsSvc(object):
+ def __init__(self):
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ self.SVCVITS = None
+ self.hps = None
+ self.speakers = None
+ self.hubert_soft = utils.get_hubert_model()
+
+ def set_device(self, device):
+ self.device = torch.device(device)
+ self.hubert_soft.to(self.device)
+ if self.SVCVITS != None:
+ self.SVCVITS.to(self.device)
+
+ def loadCheckpoint(self, path):
+ self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
+ self.SVCVITS = SynthesizerTrn(
+ self.hps.data.filter_length // 2 + 1,
+ self.hps.train.segment_size // self.hps.data.hop_length,
+ **self.hps.model)
+ _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None)
+ _ = self.SVCVITS.eval().to(self.device)
+ self.speakers = self.hps.spk
+
+ def get_units(self, source, sr):
+ source = source.unsqueeze(0).to(self.device)
+ with torch.inference_mode():
+ units = self.hubert_soft.units(source)
+ return units
+
+
+ def get_unit_pitch(self, in_path, tran):
+ source, sr = torchaudio.load(in_path)
+ source = torchaudio.functional.resample(source, sr, 16000)
+ if len(source.shape) == 2 and source.shape[1] >= 2:
+ source = torch.mean(source, dim=0).unsqueeze(0)
+ soft = self.get_units(source, sr).squeeze(0).cpu().numpy()
+ f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran)
+ return soft, f0
+
+ def infer(self, speaker_id, tran, raw_path):
+ speaker_id = self.speakers[speaker_id]
+ sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0)
+ soft, pitch = self.get_unit_pitch(raw_path, tran)
+ f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device)
+ stn_tst = torch.FloatTensor(soft)
+ with torch.no_grad():
+ x_tst = stn_tst.unsqueeze(0).to(self.device)
+ x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
+ audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
+ return audio, audio.shape[-1]
+
+ def inference(self,srcaudio,chara,tran,slice_db):
+ sampling_rate, audio = srcaudio
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
+ if len(audio.shape) > 1:
+ audio = librosa.to_mono(audio.transpose(1, 0))
+ if sampling_rate != 16000:
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
+ soundfile.write("tmpwav.wav", audio, 16000, format="wav")
+ chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db)
+ audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks)
+ audio = []
+ for (slice_tag, data) in audio_data:
+ length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate))
+ raw_path = io.BytesIO()
+ soundfile.write(raw_path, data, audio_sr, format="wav")
+ raw_path.seek(0)
+ if slice_tag:
+ _audio = np.zeros(length)
+ else:
+ out_audio, out_sr = self.infer(chara, tran, raw_path)
+ _audio = out_audio.cpu().numpy()
+ audio.extend(list(_audio))
+ audio = (np.array(audio) * 32768.0).astype('int16')
+ return (self.hps.data.sampling_rate,audio)
diff --git a/inference/slicer.py b/inference/slicer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462
--- /dev/null
+++ b/inference/slicer.py
@@ -0,0 +1,142 @@
+import librosa
+import torch
+import torchaudio
+
+
+class Slicer:
+ def __init__(self,
+ sr: int,
+ threshold: float = -40.,
+ min_length: int = 5000,
+ min_interval: int = 300,
+ hop_size: int = 20,
+ max_sil_kept: int = 5000):
+ if not min_length >= min_interval >= hop_size:
+ raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
+ if not max_sil_kept >= hop_size:
+ raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
+ min_interval = sr * min_interval / 1000
+ self.threshold = 10 ** (threshold / 20.)
+ self.hop_size = round(sr * hop_size / 1000)
+ self.win_size = min(round(min_interval), 4 * self.hop_size)
+ self.min_length = round(sr * min_length / 1000 / self.hop_size)
+ self.min_interval = round(min_interval / self.hop_size)
+ self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
+
+ def _apply_slice(self, waveform, begin, end):
+ if len(waveform.shape) > 1:
+ return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
+ else:
+ return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
+
+ # @timeit
+ def slice(self, waveform):
+ if len(waveform.shape) > 1:
+ samples = librosa.to_mono(waveform)
+ else:
+ samples = waveform
+ if samples.shape[0] <= self.min_length:
+ return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
+ rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
+ sil_tags = []
+ silence_start = None
+ clip_start = 0
+ for i, rms in enumerate(rms_list):
+ # Keep looping while frame is silent.
+ if rms < self.threshold:
+ # Record start of silent frames.
+ if silence_start is None:
+ silence_start = i
+ continue
+ # Keep looping while frame is not silent and silence start has not been recorded.
+ if silence_start is None:
+ continue
+ # Clear recorded silence start if interval is not enough or clip is too short
+ is_leading_silence = silence_start == 0 and i > self.max_sil_kept
+ need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
+ if not is_leading_silence and not need_slice_middle:
+ silence_start = None
+ continue
+ # Need slicing. Record the range of silent frames to be removed.
+ if i - silence_start <= self.max_sil_kept:
+ pos = rms_list[silence_start: i + 1].argmin() + silence_start
+ if silence_start == 0:
+ sil_tags.append((0, pos))
+ else:
+ sil_tags.append((pos, pos))
+ clip_start = pos
+ elif i - silence_start <= self.max_sil_kept * 2:
+ pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
+ pos += i - self.max_sil_kept
+ pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
+ pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
+ if silence_start == 0:
+ sil_tags.append((0, pos_r))
+ clip_start = pos_r
+ else:
+ sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
+ clip_start = max(pos_r, pos)
+ else:
+ pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
+ pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
+ if silence_start == 0:
+ sil_tags.append((0, pos_r))
+ else:
+ sil_tags.append((pos_l, pos_r))
+ clip_start = pos_r
+ silence_start = None
+ # Deal with trailing silence.
+ total_frames = rms_list.shape[0]
+ if silence_start is not None and total_frames - silence_start >= self.min_interval:
+ silence_end = min(total_frames, silence_start + self.max_sil_kept)
+ pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
+ sil_tags.append((pos, total_frames + 1))
+ # Apply and return slices.
+ if len(sil_tags) == 0:
+ return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
+ else:
+ chunks = []
+ # 第一段静音并非从头开始,补上有声片段
+ if sil_tags[0][0]:
+ chunks.append(
+ {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
+ for i in range(0, len(sil_tags)):
+ # 标识有声片段(跳过第一段)
+ if i:
+ chunks.append({"slice": False,
+ "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
+ # 标识所有静音片段
+ chunks.append({"slice": True,
+ "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
+ # 最后一段静音并非结尾,补上结尾片段
+ if sil_tags[-1][1] * self.hop_size < len(waveform):
+ chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
+ chunk_dict = {}
+ for i in range(len(chunks)):
+ chunk_dict[str(i)] = chunks[i]
+ return chunk_dict
+
+
+def cut(audio_path, db_thresh=-30, min_len=5000):
+ audio, sr = librosa.load(audio_path, sr=None)
+ slicer = Slicer(
+ sr=sr,
+ threshold=db_thresh,
+ min_length=min_len
+ )
+ chunks = slicer.slice(audio)
+ return chunks
+
+
+def chunks2audio(audio_path, chunks):
+ chunks = dict(chunks)
+ audio, sr = torchaudio.load(audio_path)
+ if len(audio.shape) == 2 and audio.shape[1] >= 2:
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
+ audio = audio.cpu().numpy()[0]
+ result = []
+ for k, v in chunks.items():
+ tag = v["split_time"].split(",")
+ if tag[0] != tag[1]:
+ result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
+ return result, sr
diff --git a/inference_main.py b/inference_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b2c32ac9e29e6b016e656e937fede5d2c23e7e6
--- /dev/null
+++ b/inference_main.py
@@ -0,0 +1,130 @@
+import io
+import logging
+import time
+from pathlib import Path
+
+import librosa
+import matplotlib.pyplot as plt
+import numpy as np
+import soundfile
+
+from inference import infer_tool
+from inference import slicer
+from inference.infer_tool import Svc
+
+logging.getLogger('numba').setLevel(logging.WARNING)
+chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
+
+
+
+def main():
+ import argparse
+
+ parser = argparse.ArgumentParser(description='sovits4 inference')
+
+ # 一定要设置的部分
+ parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径')
+ parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径')
+ parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s')
+ parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下')
+ parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)')
+ parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称')
+
+ # 可选项部分
+ parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调')
+ parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填')
+ parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可')
+ parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒')
+ parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭')
+
+ # 不用动的部分
+ parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50')
+ parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu')
+ parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学')
+ parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现')
+ parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式')
+ parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭')
+
+ args = parser.parse_args()
+
+ svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path)
+ infer_tool.mkdir(["raw", "results"])
+ clean_names = args.clean_names
+ trans = args.trans
+ spk_list = args.spk_list
+ slice_db = args.slice_db
+ wav_format = args.wav_format
+ auto_predict_f0 = args.auto_predict_f0
+ cluster_infer_ratio = args.cluster_infer_ratio
+ noice_scale = args.noice_scale
+ pad_seconds = args.pad_seconds
+ clip = args.clip
+ lg = args.linear_gradient
+ lgr = args.linear_gradient_retain
+ F0_mean_pooling = args.f0_mean_pooling
+
+ infer_tool.fill_a_to_b(trans, clean_names)
+ for clean_name, tran in zip(clean_names, trans):
+ raw_audio_path = f"raw/{clean_name}"
+ if "." not in raw_audio_path:
+ raw_audio_path += ".wav"
+ infer_tool.format_wav(raw_audio_path)
+ wav_path = Path(raw_audio_path).with_suffix('.wav')
+ chunks = slicer.cut(wav_path, db_thresh=slice_db)
+ audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
+ per_size = int(clip*audio_sr)
+ lg_size = int(lg*audio_sr)
+ lg_size_r = int(lg_size*lgr)
+ lg_size_c_l = (lg_size-lg_size_r)//2
+ lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
+ lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
+
+ for spk in spk_list:
+ audio = []
+ for (slice_tag, data) in audio_data:
+ print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
+
+ length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample))
+ if slice_tag:
+ print('jump empty segment')
+ _audio = np.zeros(length)
+ audio.extend(list(infer_tool.pad_array(_audio, length)))
+ continue
+ if per_size != 0:
+ datas = infer_tool.split_list_by_n(data, per_size,lg_size)
+ else:
+ datas = [data]
+ for k,dat in enumerate(datas):
+ per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length
+ if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
+ # padd
+ pad_len = int(audio_sr * pad_seconds)
+ dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
+ raw_path = io.BytesIO()
+ soundfile.write(raw_path, dat, audio_sr, format="wav")
+ raw_path.seek(0)
+ out_audio, out_sr = svc_model.infer(spk, tran, raw_path,
+ cluster_infer_ratio=cluster_infer_ratio,
+ auto_predict_f0=auto_predict_f0,
+ noice_scale=noice_scale,
+ F0_mean_pooling = F0_mean_pooling
+ )
+ _audio = out_audio.cpu().numpy()
+ pad_len = int(svc_model.target_sample * pad_seconds)
+ _audio = _audio[pad_len:-pad_len]
+ _audio = infer_tool.pad_array(_audio, per_length)
+ if lg_size!=0 and k!=0:
+ lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:]
+ lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size]
+ lg_pre = lg1*(1-lg)+lg2*lg
+ audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size]
+ audio.extend(lg_pre)
+ _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:]
+ audio.extend(list(_audio))
+ key = "auto" if auto_predict_f0 else f"{tran}key"
+ cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}"
+ res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}'
+ soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format)
+
+if __name__ == '__main__':
+ main()
diff --git a/models.py b/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..13278d680493970f5a670cf3fc955a6e9b7ab1d5
--- /dev/null
+++ b/models.py
@@ -0,0 +1,420 @@
+import copy
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import modules.attentions as attentions
+import modules.commons as commons
+import modules.modules as modules
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+
+import utils
+from modules.commons import init_weights, get_padding
+from vdecoder.hifigan.models import Generator
+from utils import f0_to_coarse
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+
+class Encoder(nn.Module):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ # print(x.shape,x_lengths.shape)
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class TextEncoder(nn.Module):
+ def __init__(self,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ n_layers,
+ gin_channels=0,
+ filter_channels=None,
+ n_heads=None,
+ p_dropout=None):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+ self.f0_emb = nn.Embedding(256, hidden_channels)
+
+ self.enc_ = attentions.Encoder(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+
+ def forward(self, x, x_mask, f0=None, noice_scale=1):
+ x = x + self.f0_emb(f0).transpose(1,2)
+ x = self.enc_(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
+
+ return z, m, logs, x_mask
+
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
+ ])
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ])
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2,3,5,7,11]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = []
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class SpeakerEncoder(torch.nn.Module):
+ def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
+ super(SpeakerEncoder, self).__init__()
+ self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
+ self.linear = nn.Linear(model_hidden_size, model_embedding_size)
+ self.relu = nn.ReLU()
+
+ def forward(self, mels):
+ self.lstm.flatten_parameters()
+ _, (hidden, _) = self.lstm(mels)
+ embeds_raw = self.relu(self.linear(hidden[-1]))
+ return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
+
+ def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
+ mel_slices = []
+ for i in range(0, total_frames-partial_frames, partial_hop):
+ mel_range = torch.arange(i, i+partial_frames)
+ mel_slices.append(mel_range)
+
+ return mel_slices
+
+ def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
+ mel_len = mel.size(1)
+ last_mel = mel[:,-partial_frames:]
+
+ if mel_len > partial_frames:
+ mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
+ mels = list(mel[:,s] for s in mel_slices)
+ mels.append(last_mel)
+ mels = torch.stack(tuple(mels), 0).squeeze(1)
+
+ with torch.no_grad():
+ partial_embeds = self(mels)
+ embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
+ #embed = embed / torch.linalg.norm(embed, 2)
+ else:
+ with torch.no_grad():
+ embed = self(last_mel)
+
+ return embed
+
+class F0Decoder(nn.Module):
+ def __init__(self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ spk_channels=0):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.spk_channels = spk_channels
+
+ self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
+ self.decoder = attentions.FFT(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1)
+ self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
+
+ def forward(self, x, norm_f0, x_mask, spk_emb=None):
+ x = torch.detach(x)
+ if (spk_emb is not None):
+ x = x + self.cond(spk_emb)
+ x += self.f0_prenet(norm_f0)
+ x = self.prenet(x) * x_mask
+ x = self.decoder(x * x_mask, x_mask)
+ x = self.proj(x) * x_mask
+ return x
+
+
+class SynthesizerTrn(nn.Module):
+ """
+ Synthesizer for Training
+ """
+
+ def __init__(self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ ssl_dim,
+ n_speakers,
+ sampling_rate=44100,
+ **kwargs):
+
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ self.ssl_dim = ssl_dim
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
+
+ self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
+
+ self.enc_p = TextEncoder(
+ inter_channels,
+ hidden_channels,
+ filter_channels=filter_channels,
+ n_heads=n_heads,
+ n_layers=n_layers,
+ kernel_size=kernel_size,
+ p_dropout=p_dropout
+ )
+ hps = {
+ "sampling_rate": sampling_rate,
+ "inter_channels": inter_channels,
+ "resblock": resblock,
+ "resblock_kernel_sizes": resblock_kernel_sizes,
+ "resblock_dilation_sizes": resblock_dilation_sizes,
+ "upsample_rates": upsample_rates,
+ "upsample_initial_channel": upsample_initial_channel,
+ "upsample_kernel_sizes": upsample_kernel_sizes,
+ "gin_channels": gin_channels,
+ }
+ self.dec = Generator(h=hps)
+ self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
+ self.f0_decoder = F0Decoder(
+ 1,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ spk_channels=gin_channels
+ )
+ self.emb_uv = nn.Embedding(2, hidden_channels)
+
+ def forward(self, c, f0, uv, spec, g=None, c_lengths=None, spec_lengths=None):
+ g = self.emb_g(g).transpose(1,2)
+ # ssl prenet
+ x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
+ x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
+
+ # f0 predict
+ lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
+ norm_lf0 = utils.normalize_f0(lf0, x_mask, uv)
+ pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
+
+ # encoder
+ z_ptemp, m_p, logs_p, _ = self.enc_p(x, x_mask, f0=f0_to_coarse(f0))
+ z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
+
+ # flow
+ z_p = self.flow(z, spec_mask, g=g)
+ z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size)
+
+ # nsf decoder
+ o = self.dec(z_slice, g=g, f0=pitch_slice)
+
+ return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0
+
+ def infer(self, c, f0, uv, g=None, noice_scale=0.35, predict_f0=False):
+ c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
+ g = self.emb_g(g).transpose(1,2)
+ x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
+ x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2)
+
+ if predict_f0:
+ lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
+ norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False)
+ pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
+ f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1)
+
+ z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), noice_scale=noice_scale)
+ z = self.flow(z_p, c_mask, g=g, reverse=True)
+ o = self.dec(z * c_mask, g=g, f0=f0)
+ return o
diff --git a/models/arthur/arthur.pth b/models/arthur/arthur.pth
new file mode 100644
index 0000000000000000000000000000000000000000..42409307834532e398abad6297cb943ceb622dbf
--- /dev/null
+++ b/models/arthur/arthur.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:70fc73a4bf772cbdabb3703d45a466f54a45e446e869dd655038bbb41784e8ca
+size 180653938
diff --git a/models/arthur/config_arthur.json b/models/arthur/config_arthur.json
new file mode 100644
index 0000000000000000000000000000000000000000..1623c88a9c1dc0802ae02528b25f551f105688ec
--- /dev/null
+++ b/models/arthur/config_arthur.json
@@ -0,0 +1,93 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 0
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "ssl_dim": 256,
+ "n_speakers": 1
+ },
+ "spk": {
+ "arthur": 0
+ }
+}
\ No newline at end of file
diff --git a/models/carl/carl.pth b/models/carl/carl.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e072126ef5f1a844c92c64421fd04efbb6913018
--- /dev/null
+++ b/models/carl/carl.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa229fa4e8de8f14b3b5cfd4b21552e7e8139656cb1ac617ff83d79aff2f241f
+size 180665609
diff --git a/models/carl/config_carl.json b/models/carl/config_carl.json
new file mode 100644
index 0000000000000000000000000000000000000000..a700609679853b04494b08d8b504e7aa3415e782
--- /dev/null
+++ b/models/carl/config_carl.json
@@ -0,0 +1,93 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 4,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 20
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "ssl_dim": 256,
+ "n_speakers": 1
+ },
+ "spk": {
+ "carl": 0
+ }
+}
\ No newline at end of file
diff --git a/models/cesar/cesar.pth b/models/cesar/cesar.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e14678f01e7079a323b519b47e9745a9eb733571
--- /dev/null
+++ b/models/cesar/cesar.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:371b46e33961609ce9ff7a3d22e11bb7e839e5c1ad8c0105a0ffb7e31c7832d6
+size 209238367
diff --git a/models/cesar/config_cesar.json b/models/cesar/config_cesar.json
new file mode 100644
index 0000000000000000000000000000000000000000..b798d067c5ca763c077393403689df68a4778324
--- /dev/null
+++ b/models/cesar/config_cesar.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 2000,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 10,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "cesar": 0
+ }
+}
\ No newline at end of file
diff --git a/models/katalina/config_katalina.json b/models/katalina/config_katalina.json
new file mode 100644
index 0000000000000000000000000000000000000000..a554e6d07c85f4936ca841b26b0bace1b054523f
--- /dev/null
+++ b/models/katalina/config_katalina.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 12,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "katalina": 0
+ }
+}
\ No newline at end of file
diff --git a/models/katalina/katalina.pth b/models/katalina/katalina.pth
new file mode 100644
index 0000000000000000000000000000000000000000..827801988e8f73287277feb14da8007bd59755aa
--- /dev/null
+++ b/models/katalina/katalina.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf2d89ab9a7862128c62a4abc6c55fc7c6ef7b1c9c92cffde0d1fa43ba0bcadc
+size 209238367
diff --git a/models/kendl/config_kendl.json b/models/kendl/config_kendl.json
new file mode 100644
index 0000000000000000000000000000000000000000..a9667c090b0728e13e23324f731da0d25ceb8552
--- /dev/null
+++ b/models/kendl/config_kendl.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "kendl": 0
+ }
+}
\ No newline at end of file
diff --git a/models/kendl/kendl.pth b/models/kendl/kendl.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3082fdd44dc0cadcb25353595892bd50868b5148
--- /dev/null
+++ b/models/kendl/kendl.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d122bdbda24bebeb25b88ad9f0c6ccdf454995bd06435ddc0bcce924505336ae
+size 209189561
diff --git a/models/ogloc/config_ogloc.json b/models/ogloc/config_ogloc.json
new file mode 100644
index 0000000000000000000000000000000000000000..a1907b1fc6cafe80efd1ffef035dcbd19a23b089
--- /dev/null
+++ b/models/ogloc/config_ogloc.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 4,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 10,
+ "all_in_mem": false,
+ "vol_aug": false
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": false
+ },
+ "spk": {
+ "ogloc": 0
+ }
+}
\ No newline at end of file
diff --git a/models/ogloc/kmeans_ogloc.pt b/models/ogloc/kmeans_ogloc.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5ef64e6d8c54437ee628f8f02736c8a5b1e01756
--- /dev/null
+++ b/models/ogloc/kmeans_ogloc.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2dc2d3c53272a0f1caeee0863c47484ad83592a2780246b98bda77284440c95b
+size 31339961
diff --git a/models/ogloc/ogloc.pth b/models/ogloc/ogloc.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f9759e2888a09ea0883f27612d5b2920b08eee3b
--- /dev/null
+++ b/models/ogloc/ogloc.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb14244a02966c812cea2462fb6ce60bf7792199ce1d80e1307285f4ae36c60a
+size 209187585
diff --git a/models/pulaski/config_pulaski.json b/models/pulaski/config_pulaski.json
new file mode 100644
index 0000000000000000000000000000000000000000..97890f272157295d11424df6d2d1a7354d3e7d19
--- /dev/null
+++ b/models/pulaski/config_pulaski.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "pulaski": 0
+ }
+}
\ No newline at end of file
diff --git a/models/pulaski/pulaski.pth b/models/pulaski/pulaski.pth
new file mode 100644
index 0000000000000000000000000000000000000000..df74e241478a7baaeb2ec0538bc093a7dfc17519
--- /dev/null
+++ b/models/pulaski/pulaski.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:720f78d3a854892e6fc2b4e1f9869cf8cff0bb331cf4f6e55eaf60c3a34a7ae9
+size 209238367
diff --git a/models/ryder/config_ryder.json b/models/ryder/config_ryder.json
new file mode 100644
index 0000000000000000000000000000000000000000..1607067c9aed9962863b5646392ea444cb175e99
--- /dev/null
+++ b/models/ryder/config_ryder.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 30000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 10,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "ryder": 0
+ }
+}
\ No newline at end of file
diff --git a/models/ryder/ryder.pth b/models/ryder/ryder.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cdc421ae088c829a64e6f45b7e8d0b24c0551989
--- /dev/null
+++ b/models/ryder/ryder.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19aaac8e141faf1b4309f20e78705c6e6c47f540c78a1ebf8afbf89b36b5de5f
+size 209189561
diff --git a/models/smoke/config_smoke.json b/models/smoke/config_smoke.json
new file mode 100644
index 0000000000000000000000000000000000000000..e20837500574c2914301a4c89c05dc6f03f2d1ec
--- /dev/null
+++ b/models/smoke/config_smoke.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "smoke": 0
+ }
+}
\ No newline at end of file
diff --git a/models/smoke/smoke.pth b/models/smoke/smoke.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7e410abf9cbac4c180f765adb897cb2b2ddd07f9
--- /dev/null
+++ b/models/smoke/smoke.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c302ae6ca088cedda29c8a8e10cec888bd15be504f6ae0e8b94cc81aca98bab7
+size 209189561
diff --git a/models/sweet/config_sweet.json b/models/sweet/config_sweet.json
new file mode 100644
index 0000000000000000000000000000000000000000..616f3264cf7cd02c5aecaa62af9ba45ad75d7acf
--- /dev/null
+++ b/models/sweet/config_sweet.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "sweet": 0
+ }
+}
\ No newline at end of file
diff --git a/models/sweet/sweet.pth b/models/sweet/sweet.pth
new file mode 100644
index 0000000000000000000000000000000000000000..29dd9bbc228e6e1c4ca613fdaa234671dad71dc2
--- /dev/null
+++ b/models/sweet/sweet.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d79642f4d1ee8b7381c2067178c8332e25195e18ad37ca0447fdc9dc1c5eb1a
+size 209189561
diff --git a/models/tenpenny/config_tenpenny.json b/models/tenpenny/config_tenpenny.json
new file mode 100644
index 0000000000000000000000000000000000000000..95af80a64fd5eebf59ad28c8be4503a236fb9329
--- /dev/null
+++ b/models/tenpenny/config_tenpenny.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 12,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "tenpenny": 0
+ }
+}
\ No newline at end of file
diff --git a/models/tenpenny/tenpenny.pth b/models/tenpenny/tenpenny.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2289b60d99f986fee474efe543f59282ced8b0d6
--- /dev/null
+++ b/models/tenpenny/tenpenny.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc76ce97c91e042abce0d454d4e140237287e170440b54109534f4404f32a73
+size 209238367
diff --git a/models/tommy/config_tommy.json b/models/tommy/config_tommy.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b01241ed87ce9a00523463d2edab3119ca256bc
--- /dev/null
+++ b/models/tommy/config_tommy.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 100000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 30,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "tommy": 0
+ }
+}
\ No newline at end of file
diff --git a/models/tommy/tommy.pth b/models/tommy/tommy.pth
new file mode 100644
index 0000000000000000000000000000000000000000..65ce4a18fda260f0b9a4c2d1a1103d75aaebb2e9
--- /dev/null
+++ b/models/tommy/tommy.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43045cbe7f62f212f93afb1ed9b1c7a05b778440e1ae227234ec984e4719a527
+size 209189561
diff --git a/models/tomori/config_tomori.json b/models/tomori/config_tomori.json
new file mode 100644
index 0000000000000000000000000000000000000000..eda27e815f4826a645c203025657de8d40c72a25
--- /dev/null
+++ b/models/tomori/config_tomori.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 3200,
+ "seed": 1234,
+ "epochs": 30000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 12,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "tomori": 0
+ }
+}
\ No newline at end of file
diff --git a/models/tomori/tomori.pth b/models/tomori/tomori.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b3b930fb5f6bafc1f312b496303b9cf950342091
--- /dev/null
+++ b/models/tomori/tomori.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e915470eed2322969ae9dfadb51393f2f5744092690c11149de0ed65f04413f
+size 209238367
diff --git a/models/tomori/tomori_index.pkl b/models/tomori/tomori_index.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..687df3f55cf9f9b73af2d3855fd0896289c35eb8
--- /dev/null
+++ b/models/tomori/tomori_index.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a069b9dfacaec64cbfa500576134535cac59b9fc08087f52c2cbeae90db9076
+size 189669700
diff --git a/models/torino/config_torino.json b/models/torino/config_torino.json
new file mode 100644
index 0000000000000000000000000000000000000000..af6aa6aebfcea0940971079adda7a76e55a5c812
--- /dev/null
+++ b/models/torino/config_torino.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "torino": 0
+ }
+}
\ No newline at end of file
diff --git a/models/torino/torino.pth b/models/torino/torino.pth
new file mode 100644
index 0000000000000000000000000000000000000000..41bedcabc0983c8f3ae65a4f2b56211b54a1947e
--- /dev/null
+++ b/models/torino/torino.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2121fdd4bddec88abadf14d9b985d1ed104cb8e02d0ceecc1fa991b14296eed7
+size 209238367
diff --git a/models/truth/config_truth.json b/models/truth/config_truth.json
new file mode 100644
index 0000000000000000000000000000000000000000..52e8976f23290d979b2a437634351ff6f221d760
--- /dev/null
+++ b/models/truth/config_truth.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 40000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 12,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "truth": 0
+ }
+}
\ No newline at end of file
diff --git a/models/truth/truth.pth b/models/truth/truth.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d6e0bd68e3abcbd3fb72caa5029a9b145beaa231
--- /dev/null
+++ b/models/truth/truth.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:591d080d17da589982501a2cd2fb3f79c7215e1166114f963113a030f3f728d6
+size 209238367
diff --git a/models/wuzimu/config_wuzimu.json b/models/wuzimu/config_wuzimu.json
new file mode 100644
index 0000000000000000000000000000000000000000..38ab9df01b454bd88ecb8a4a0315b287ec927568
--- /dev/null
+++ b/models/wuzimu/config_wuzimu.json
@@ -0,0 +1,99 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 800,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 0.0001,
+ "betas": [
+ 0.8,
+ 0.99
+ ],
+ "eps": 1e-09,
+ "batch_size": 6,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 10240,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0,
+ "use_sr": true,
+ "max_speclen": 512,
+ "port": "8001",
+ "keep_ckpts": 3,
+ "all_in_mem": false,
+ "vol_aug": true
+ },
+ "data": {
+ "training_files": "filelists/train.txt",
+ "validation_files": "filelists/val.txt",
+ "max_wav_value": 32768.0,
+ "sampling_rate": 44100,
+ "filter_length": 2048,
+ "hop_length": 512,
+ "win_length": 2048,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": 22050
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [
+ 3,
+ 7,
+ 11
+ ],
+ "resblock_dilation_sizes": [
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ],
+ "upsample_rates": [
+ 8,
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [
+ 16,
+ 16,
+ 4,
+ 4,
+ 4
+ ],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 768,
+ "ssl_dim": 768,
+ "n_speakers": 1,
+ "vocoder_name": "nsf-hifigan",
+ "speech_encoder": "vec768l12",
+ "speaker_embedding": false,
+ "vol_embedding": true
+ },
+ "spk": {
+ "wuzimu": 0
+ }
+}
\ No newline at end of file
diff --git a/models/wuzimu/wuzimu.pth b/models/wuzimu/wuzimu.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2bc7ae84aea0c576646e23ff6e593e19b1a87520
--- /dev/null
+++ b/models/wuzimu/wuzimu.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25229f0b332877bea3f4fd71c4b0b70766d5b5e9c721473356b6383dcae4271e
+size 209238367
diff --git a/modules/__init__.py b/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/modules/attentions.py b/modules/attentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f
--- /dev/null
+++ b/modules/attentions.py
@@ -0,0 +1,349 @@
+import copy
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import modules.commons as commons
+import modules.modules as modules
+from modules.modules import LayerNorm
+
+
+class FFT(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
+ proximal_bias=False, proximal_init=True, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(
+ MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias,
+ proximal_init=proximal_init))
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(
+ FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Encoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+
+ self.drop = nn.Dropout(p_dropout)
+ self.attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.attn_layers[i](x, x, attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.encdec_attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class MultiHeadAttention(nn.Module):
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
+ super().__init__()
+ assert channels % n_heads == 0
+
+ self.channels = channels
+ self.out_channels = out_channels
+ self.n_heads = n_heads
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+ self.heads_share = heads_share
+ self.block_length = block_length
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+ self.attn = None
+
+ self.k_channels = channels // n_heads
+ self.conv_q = nn.Conv1d(channels, channels, 1)
+ self.conv_k = nn.Conv1d(channels, channels, 1)
+ self.conv_v = nn.Conv1d(channels, channels, 1)
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
+ self.drop = nn.Dropout(p_dropout)
+
+ if window_size is not None:
+ n_heads_rel = 1 if heads_share else n_heads
+ rel_stddev = self.k_channels**-0.5
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+
+ nn.init.xavier_uniform_(self.conv_q.weight)
+ nn.init.xavier_uniform_(self.conv_k.weight)
+ nn.init.xavier_uniform_(self.conv_v.weight)
+ if proximal_init:
+ with torch.no_grad():
+ self.conv_k.weight.copy_(self.conv_q.weight)
+ self.conv_k.bias.copy_(self.conv_q.bias)
+
+ def forward(self, x, c, attn_mask=None):
+ q = self.conv_q(x)
+ k = self.conv_k(c)
+ v = self.conv_v(c)
+
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
+
+ x = self.conv_o(x)
+ return x
+
+ def attention(self, query, key, value, mask=None):
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
+ b, d, t_s, t_t = (*key.size(), query.size(2))
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
+ if self.window_size is not None:
+ assert t_s == t_t, "Relative attention is only available for self-attention."
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
+ scores = scores + scores_local
+ if self.proximal_bias:
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
+ if mask is not None:
+ scores = scores.masked_fill(mask == 0, -1e4)
+ if self.block_length is not None:
+ assert t_s == t_t, "Local attention is only available for self-attention."
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
+ scores = scores.masked_fill(block_mask == 0, -1e4)
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
+ p_attn = self.drop(p_attn)
+ output = torch.matmul(p_attn, value)
+ if self.window_size is not None:
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
+ return output, p_attn
+
+ def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0))
+ return ret
+
+ def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
+ return ret
+
+ def _get_relative_embeddings(self, relative_embeddings, length):
+ max_relative_position = 2 * self.window_size + 1
+ # Pad first before slice to avoid using cond ops.
+ pad_length = max(length - (self.window_size + 1), 0)
+ slice_start_position = max((self.window_size + 1) - length, 0)
+ slice_end_position = slice_start_position + 2 * length - 1
+ if pad_length > 0:
+ padded_relative_embeddings = F.pad(
+ relative_embeddings,
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
+ else:
+ padded_relative_embeddings = relative_embeddings
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
+ return used_relative_embeddings
+
+ def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
+ batch, heads, length, _ = x.size()
+ # Concat columns of pad to shift from relative to absolute indexing.
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
+
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
+ x_flat = x.view([batch, heads, length * 2 * length])
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
+
+ # Reshape and slice out the padded elements.
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
+ return x_final
+
+ def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
+ batch, heads, length, _ = x.size()
+ # padd along column
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
+ # add 0's in the beginning that will skew the elements after reshape
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
+ return x_final
+
+ def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
+ r = torch.arange(length, dtype=torch.float32)
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
+
+
+class FFN(nn.Module):
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.activation = activation
+ self.causal = causal
+
+ if causal:
+ self.padding = self._causal_padding
+ else:
+ self.padding = self._same_padding
+
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
+ self.drop = nn.Dropout(p_dropout)
+
+ def forward(self, x, x_mask):
+ x = self.conv_1(self.padding(x * x_mask))
+ if self.activation == "gelu":
+ x = x * torch.sigmoid(1.702 * x)
+ else:
+ x = torch.relu(x)
+ x = self.drop(x)
+ x = self.conv_2(self.padding(x * x_mask))
+ return x * x_mask
+
+ def _causal_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = self.kernel_size - 1
+ pad_r = 0
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
+
+ def _same_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = (self.kernel_size - 1) // 2
+ pad_r = self.kernel_size // 2
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
diff --git a/modules/commons.py b/modules/commons.py
new file mode 100644
index 0000000000000000000000000000000000000000..074888006392e956ce204d8368362dbb2cd4e304
--- /dev/null
+++ b/modules/commons.py
@@ -0,0 +1,188 @@
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+def slice_pitch_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, idx_str:idx_end]
+ return ret
+
+def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
+ return ret, ret_pitch, ids_str
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size*dilation - dilation)/2)
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def intersperse(lst, item):
+ result = [item] * (len(lst) * 2 + 1)
+ result[1::2] = lst
+ return result
+
+
+def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
+ kl = (logs_q - logs_p) - 0.5
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
+ return kl
+
+
+def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
+ return -torch.log(-torch.log(uniform_samples))
+
+
+def rand_gumbel_like(x):
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
+ return g
+
+
+def slice_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, :, idx_str:idx_end]
+ return ret
+
+
+def rand_slice_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def rand_spec_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def get_timing_signal_1d(
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
+ position = torch.arange(length, dtype=torch.float)
+ num_timescales = channels // 2
+ log_timescale_increment = (
+ math.log(float(max_timescale) / float(min_timescale)) /
+ (num_timescales - 1))
+ inv_timescales = min_timescale * torch.exp(
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
+ signal = signal.view(1, channels, length)
+ return signal
+
+
+def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return x + signal.to(dtype=x.dtype, device=x.device)
+
+
+def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
+
+
+def subsequent_mask(length):
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
+ return mask
+
+
+@torch.jit.script
+def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
+ n_channels_int = n_channels[0]
+ in_act = input_a + input_b
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
+ acts = t_act * s_act
+ return acts
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def shift_1d(x):
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
+ return x
+
+
+def sequence_mask(length, max_length=None):
+ if max_length is None:
+ max_length = length.max()
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
+ return x.unsqueeze(0) < length.unsqueeze(1)
+
+
+def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
+ device = duration.device
+
+ b, _, t_y, t_x = mask.shape
+ cum_duration = torch.cumsum(duration, -1)
+
+ cum_duration_flat = cum_duration.view(b * t_x)
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
+ path = path.view(b, t_x, t_y)
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
+ path = path.unsqueeze(1).transpose(2,3) * mask
+ return path
+
+
+def clip_grad_value_(parameters, clip_value, norm_type=2):
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
+ norm_type = float(norm_type)
+ if clip_value is not None:
+ clip_value = float(clip_value)
+
+ total_norm = 0
+ for p in parameters:
+ param_norm = p.grad.data.norm(norm_type)
+ total_norm += param_norm.item() ** norm_type
+ if clip_value is not None:
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
+ total_norm = total_norm ** (1. / norm_type)
+ return total_norm
diff --git a/modules/crepe.py b/modules/crepe.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bff0e3474de6483290b56993f9b845e91ef9702
--- /dev/null
+++ b/modules/crepe.py
@@ -0,0 +1,327 @@
+from typing import Optional,Union
+try:
+ from typing import Literal
+except Exception as e:
+ from typing_extensions import Literal
+import numpy as np
+import torch
+import torchcrepe
+from torch import nn
+from torch.nn import functional as F
+import scipy
+
+#from:https://github.com/fishaudio/fish-diffusion
+
+def repeat_expand(
+ content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"
+):
+ """Repeat content to target length.
+ This is a wrapper of torch.nn.functional.interpolate.
+
+ Args:
+ content (torch.Tensor): tensor
+ target_len (int): target length
+ mode (str, optional): interpolation mode. Defaults to "nearest".
+
+ Returns:
+ torch.Tensor: tensor
+ """
+
+ ndim = content.ndim
+
+ if content.ndim == 1:
+ content = content[None, None]
+ elif content.ndim == 2:
+ content = content[None]
+
+ assert content.ndim == 3
+
+ is_np = isinstance(content, np.ndarray)
+ if is_np:
+ content = torch.from_numpy(content)
+
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
+
+ if is_np:
+ results = results.numpy()
+
+ if ndim == 1:
+ return results[0, 0]
+ elif ndim == 2:
+ return results[0]
+
+
+class BasePitchExtractor:
+ def __init__(
+ self,
+ hop_length: int = 512,
+ f0_min: float = 50.0,
+ f0_max: float = 1100.0,
+ keep_zeros: bool = True,
+ ):
+ """Base pitch extractor.
+
+ Args:
+ hop_length (int, optional): Hop length. Defaults to 512.
+ f0_min (float, optional): Minimum f0. Defaults to 50.0.
+ f0_max (float, optional): Maximum f0. Defaults to 1100.0.
+ keep_zeros (bool, optional): Whether keep zeros in pitch. Defaults to True.
+ """
+
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.keep_zeros = keep_zeros
+
+ def __call__(self, x, sampling_rate=44100, pad_to=None):
+ raise NotImplementedError("BasePitchExtractor is not callable.")
+
+ def post_process(self, x, sampling_rate, f0, pad_to):
+ if isinstance(f0, np.ndarray):
+ f0 = torch.from_numpy(f0).float().to(x.device)
+
+ if pad_to is None:
+ return f0
+
+ f0 = repeat_expand(f0, pad_to)
+
+ if self.keep_zeros:
+ return f0
+
+ vuv_vector = torch.zeros_like(f0)
+ vuv_vector[f0 > 0.0] = 1.0
+ vuv_vector[f0 <= 0.0] = 0.0
+
+ # 去掉0频率, 并线性插值
+ nzindex = torch.nonzero(f0).squeeze()
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
+ time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
+ time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
+
+ if f0.shape[0] <= 0:
+ return torch.zeros(pad_to, dtype=torch.float, device=x.device),torch.zeros(pad_to, dtype=torch.float, device=x.device)
+
+ if f0.shape[0] == 1:
+ return torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[0],torch.ones(pad_to, dtype=torch.float, device=x.device)
+
+ # 大概可以用 torch 重写?
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
+ vuv_vector = vuv_vector.cpu().numpy()
+ vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
+
+ return f0,vuv_vector
+
+
+class MaskedAvgPool1d(nn.Module):
+ def __init__(
+ self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
+ ):
+ """An implementation of mean pooling that supports masked values.
+
+ Args:
+ kernel_size (int): The size of the median pooling window.
+ stride (int, optional): The stride of the median pooling window. Defaults to None.
+ padding (int, optional): The padding of the median pooling window. Defaults to 0.
+ """
+
+ super(MaskedAvgPool1d, self).__init__()
+ self.kernel_size = kernel_size
+ self.stride = stride or kernel_size
+ self.padding = padding
+
+ def forward(self, x, mask=None):
+ ndim = x.dim()
+ if ndim == 2:
+ x = x.unsqueeze(1)
+
+ assert (
+ x.dim() == 3
+ ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
+
+ # Apply the mask by setting masked elements to zero, or make NaNs zero
+ if mask is None:
+ mask = ~torch.isnan(x)
+
+ # Ensure mask has the same shape as the input tensor
+ assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
+
+ masked_x = torch.where(mask, x, torch.zeros_like(x))
+ # Create a ones kernel with the same number of channels as the input tensor
+ ones_kernel = torch.ones(x.size(1), 1, self.kernel_size, device=x.device)
+
+ # Perform sum pooling
+ sum_pooled = nn.functional.conv1d(
+ masked_x,
+ ones_kernel,
+ stride=self.stride,
+ padding=self.padding,
+ groups=x.size(1),
+ )
+
+ # Count the non-masked (valid) elements in each pooling window
+ valid_count = nn.functional.conv1d(
+ mask.float(),
+ ones_kernel,
+ stride=self.stride,
+ padding=self.padding,
+ groups=x.size(1),
+ )
+ valid_count = valid_count.clamp(min=1) # Avoid division by zero
+
+ # Perform masked average pooling
+ avg_pooled = sum_pooled / valid_count
+
+ # Fill zero values with NaNs
+ avg_pooled[avg_pooled == 0] = float("nan")
+
+ if ndim == 2:
+ return avg_pooled.squeeze(1)
+
+ return avg_pooled
+
+
+class MaskedMedianPool1d(nn.Module):
+ def __init__(
+ self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0
+ ):
+ """An implementation of median pooling that supports masked values.
+
+ This implementation is inspired by the median pooling implementation in
+ https://gist.github.com/rwightman/f2d3849281624be7c0f11c85c87c1598
+
+ Args:
+ kernel_size (int): The size of the median pooling window.
+ stride (int, optional): The stride of the median pooling window. Defaults to None.
+ padding (int, optional): The padding of the median pooling window. Defaults to 0.
+ """
+
+ super(MaskedMedianPool1d, self).__init__()
+ self.kernel_size = kernel_size
+ self.stride = stride or kernel_size
+ self.padding = padding
+
+ def forward(self, x, mask=None):
+ ndim = x.dim()
+ if ndim == 2:
+ x = x.unsqueeze(1)
+
+ assert (
+ x.dim() == 3
+ ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)"
+
+ if mask is None:
+ mask = ~torch.isnan(x)
+
+ assert x.shape == mask.shape, "Input tensor and mask must have the same shape"
+
+ masked_x = torch.where(mask, x, torch.zeros_like(x))
+
+ x = F.pad(masked_x, (self.padding, self.padding), mode="reflect")
+ mask = F.pad(
+ mask.float(), (self.padding, self.padding), mode="constant", value=0
+ )
+
+ x = x.unfold(2, self.kernel_size, self.stride)
+ mask = mask.unfold(2, self.kernel_size, self.stride)
+
+ x = x.contiguous().view(x.size()[:3] + (-1,))
+ mask = mask.contiguous().view(mask.size()[:3] + (-1,)).to(x.device)
+
+ # Combine the mask with the input tensor
+ #x_masked = torch.where(mask.bool(), x, torch.fill_(torch.zeros_like(x),float("inf")))
+ x_masked = torch.where(mask.bool(), x, torch.FloatTensor([float("inf")]).to(x.device))
+
+ # Sort the masked tensor along the last dimension
+ x_sorted, _ = torch.sort(x_masked, dim=-1)
+
+ # Compute the count of non-masked (valid) values
+ valid_count = mask.sum(dim=-1)
+
+ # Calculate the index of the median value for each pooling window
+ median_idx = (torch.div((valid_count - 1), 2, rounding_mode='trunc')).clamp(min=0)
+
+ # Gather the median values using the calculated indices
+ median_pooled = x_sorted.gather(-1, median_idx.unsqueeze(-1).long()).squeeze(-1)
+
+ # Fill infinite values with NaNs
+ median_pooled[torch.isinf(median_pooled)] = float("nan")
+
+ if ndim == 2:
+ return median_pooled.squeeze(1)
+
+ return median_pooled
+
+
+class CrepePitchExtractor(BasePitchExtractor):
+ def __init__(
+ self,
+ hop_length: int = 512,
+ f0_min: float = 50.0,
+ f0_max: float = 1100.0,
+ threshold: float = 0.05,
+ keep_zeros: bool = False,
+ device = None,
+ model: Literal["full", "tiny"] = "full",
+ use_fast_filters: bool = True,
+ ):
+ super().__init__(hop_length, f0_min, f0_max, keep_zeros)
+
+ self.threshold = threshold
+ self.model = model
+ self.use_fast_filters = use_fast_filters
+ self.hop_length = hop_length
+ if device is None:
+ self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ else:
+ self.dev = torch.device(device)
+ if self.use_fast_filters:
+ self.median_filter = MaskedMedianPool1d(3, 1, 1).to(device)
+ self.mean_filter = MaskedAvgPool1d(3, 1, 1).to(device)
+
+ def __call__(self, x, sampling_rate=44100, pad_to=None):
+ """Extract pitch using crepe.
+
+
+ Args:
+ x (torch.Tensor): Audio signal, shape (1, T).
+ sampling_rate (int, optional): Sampling rate. Defaults to 44100.
+ pad_to (int, optional): Pad to length. Defaults to None.
+
+ Returns:
+ torch.Tensor: Pitch, shape (T // hop_length,).
+ """
+
+ assert x.ndim == 2, f"Expected 2D tensor, got {x.ndim}D tensor."
+ assert x.shape[0] == 1, f"Expected 1 channel, got {x.shape[0]} channels."
+
+ x = x.to(self.dev)
+ f0, pd = torchcrepe.predict(
+ x,
+ sampling_rate,
+ self.hop_length,
+ self.f0_min,
+ self.f0_max,
+ pad=True,
+ model=self.model,
+ batch_size=1024,
+ device=x.device,
+ return_periodicity=True,
+ )
+
+ # Filter, remove silence, set uv threshold, refer to the original warehouse readme
+ if self.use_fast_filters:
+ pd = self.median_filter(pd)
+ else:
+ pd = torchcrepe.filter.median(pd, 3)
+
+ pd = torchcrepe.threshold.Silence(-60.0)(pd, x, sampling_rate, 512)
+ f0 = torchcrepe.threshold.At(self.threshold)(f0, pd)
+
+ if self.use_fast_filters:
+ f0 = self.mean_filter(f0)
+ else:
+ f0 = torchcrepe.filter.mean(f0, 3)
+
+ f0 = torch.where(torch.isnan(f0), torch.full_like(f0, 0), f0)[0]
+
+ return self.post_process(x, sampling_rate, f0, pad_to)
diff --git a/modules/ddsp.py b/modules/ddsp.py
new file mode 100644
index 0000000000000000000000000000000000000000..b09ac5c5c19d165e75e1780877a857be8c104ed7
--- /dev/null
+++ b/modules/ddsp.py
@@ -0,0 +1,190 @@
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+import torch.fft as fft
+import numpy as np
+import librosa as li
+import math
+from scipy.signal import get_window
+
+
+def safe_log(x):
+ return torch.log(x + 1e-7)
+
+
+@torch.no_grad()
+def mean_std_loudness(dataset):
+ mean = 0
+ std = 0
+ n = 0
+ for _, _, l in dataset:
+ n += 1
+ mean += (l.mean().item() - mean) / n
+ std += (l.std().item() - std) / n
+ return mean, std
+
+
+def multiscale_fft(signal, scales, overlap):
+ stfts = []
+ for s in scales:
+ S = torch.stft(
+ signal,
+ s,
+ int(s * (1 - overlap)),
+ s,
+ torch.hann_window(s).to(signal),
+ True,
+ normalized=True,
+ return_complex=True,
+ ).abs()
+ stfts.append(S)
+ return stfts
+
+
+def resample(x, factor: int):
+ batch, frame, channel = x.shape
+ x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame)
+
+ window = torch.hann_window(
+ factor * 2,
+ dtype=x.dtype,
+ device=x.device,
+ ).reshape(1, 1, -1)
+ y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x)
+ y[..., ::factor] = x
+ y[..., -1:] = x[..., -1:]
+ y = torch.nn.functional.pad(y, [factor, factor])
+ y = torch.nn.functional.conv1d(y, window)[..., :-1]
+
+ y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1)
+
+ return y
+
+
+def upsample(signal, factor):
+ signal = signal.permute(0, 2, 1)
+ signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor)
+ return signal.permute(0, 2, 1)
+
+
+def remove_above_nyquist(amplitudes, pitch, sampling_rate):
+ n_harm = amplitudes.shape[-1]
+ pitches = pitch * torch.arange(1, n_harm + 1).to(pitch)
+ aa = (pitches < sampling_rate / 2).float() + 1e-4
+ return amplitudes * aa
+
+
+def scale_function(x):
+ return 2 * torch.sigmoid(x) ** (math.log(10)) + 1e-7
+
+
+def extract_loudness(signal, sampling_rate, block_size, n_fft=2048):
+ S = li.stft(
+ signal,
+ n_fft=n_fft,
+ hop_length=block_size,
+ win_length=n_fft,
+ center=True,
+ )
+ S = np.log(abs(S) + 1e-7)
+ f = li.fft_frequencies(sampling_rate, n_fft)
+ a_weight = li.A_weighting(f)
+
+ S = S + a_weight.reshape(-1, 1)
+
+ S = np.mean(S, 0)[..., :-1]
+
+ return S
+
+
+def extract_pitch(signal, sampling_rate, block_size):
+ length = signal.shape[-1] // block_size
+ f0 = crepe.predict(
+ signal,
+ sampling_rate,
+ step_size=int(1000 * block_size / sampling_rate),
+ verbose=1,
+ center=True,
+ viterbi=True,
+ )
+ f0 = f0[1].reshape(-1)[:-1]
+
+ if f0.shape[-1] != length:
+ f0 = np.interp(
+ np.linspace(0, 1, length, endpoint=False),
+ np.linspace(0, 1, f0.shape[-1], endpoint=False),
+ f0,
+ )
+
+ return f0
+
+
+def mlp(in_size, hidden_size, n_layers):
+ channels = [in_size] + (n_layers) * [hidden_size]
+ net = []
+ for i in range(n_layers):
+ net.append(nn.Linear(channels[i], channels[i + 1]))
+ net.append(nn.LayerNorm(channels[i + 1]))
+ net.append(nn.LeakyReLU())
+ return nn.Sequential(*net)
+
+
+def gru(n_input, hidden_size):
+ return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True)
+
+
+def harmonic_synth(pitch, amplitudes, sampling_rate):
+ n_harmonic = amplitudes.shape[-1]
+ omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1)
+ omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
+ signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True)
+ return signal
+
+
+def amp_to_impulse_response(amp, target_size):
+ amp = torch.stack([amp, torch.zeros_like(amp)], -1)
+ amp = torch.view_as_complex(amp)
+ amp = fft.irfft(amp)
+
+ filter_size = amp.shape[-1]
+
+ amp = torch.roll(amp, filter_size // 2, -1)
+ win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device)
+
+ amp = amp * win
+
+ amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size)))
+ amp = torch.roll(amp, -filter_size // 2, -1)
+
+ return amp
+
+
+def fft_convolve(signal, kernel):
+ signal = nn.functional.pad(signal, (0, signal.shape[-1]))
+ kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0))
+
+ output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel))
+ output = output[..., output.shape[-1] // 2:]
+
+ return output
+
+
+def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False):
+ if win_type == 'None' or win_type is None:
+ window = np.ones(win_len)
+ else:
+ window = get_window(win_type, win_len, fftbins=True) # **0.5
+
+ N = fft_len
+ fourier_basis = np.fft.rfft(np.eye(N))[:win_len]
+ real_kernel = np.real(fourier_basis)
+ imag_kernel = np.imag(fourier_basis)
+ kernel = np.concatenate([real_kernel, imag_kernel], 1).T
+
+ if invers:
+ kernel = np.linalg.pinv(kernel).T
+
+ kernel = kernel * window
+ kernel = kernel[:, None, :]
+ return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(window[None, :, None].astype(np.float32))
+
diff --git a/modules/losses.py b/modules/losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd21799eccde350c3aac0bdd661baf96ed220147
--- /dev/null
+++ b/modules/losses.py
@@ -0,0 +1,61 @@
+import torch
+from torch.nn import functional as F
+
+import modules.commons as commons
+
+
+def feature_loss(fmap_r, fmap_g):
+ loss = 0
+ for dr, dg in zip(fmap_r, fmap_g):
+ for rl, gl in zip(dr, dg):
+ rl = rl.float().detach()
+ gl = gl.float()
+ loss += torch.mean(torch.abs(rl - gl))
+
+ return loss * 2
+
+
+def discriminator_loss(disc_real_outputs, disc_generated_outputs):
+ loss = 0
+ r_losses = []
+ g_losses = []
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
+ dr = dr.float()
+ dg = dg.float()
+ r_loss = torch.mean((1-dr)**2)
+ g_loss = torch.mean(dg**2)
+ loss += (r_loss + g_loss)
+ r_losses.append(r_loss.item())
+ g_losses.append(g_loss.item())
+
+ return loss, r_losses, g_losses
+
+
+def generator_loss(disc_outputs):
+ loss = 0
+ gen_losses = []
+ for dg in disc_outputs:
+ dg = dg.float()
+ l = torch.mean((1-dg)**2)
+ gen_losses.append(l)
+ loss += l
+
+ return loss, gen_losses
+
+
+def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
+ """
+ z_p, logs_q: [b, h, t_t]
+ m_p, logs_p: [b, h, t_t]
+ """
+ z_p = z_p.float()
+ logs_q = logs_q.float()
+ m_p = m_p.float()
+ logs_p = logs_p.float()
+ z_mask = z_mask.float()
+ #print(logs_p)
+ kl = logs_p - logs_q - 0.5
+ kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
+ kl = torch.sum(kl * z_mask)
+ l = kl / torch.sum(z_mask)
+ return l
diff --git a/modules/mel_processing.py b/modules/mel_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c5b35beb83f3b288af0fac5b49ebf2c69f062c
--- /dev/null
+++ b/modules/mel_processing.py
@@ -0,0 +1,112 @@
+import math
+import os
+import random
+import torch
+from torch import nn
+import torch.nn.functional as F
+import torch.utils.data
+import numpy as np
+import librosa
+import librosa.util as librosa_util
+from librosa.util import normalize, pad_center, tiny
+from scipy.signal import get_window
+from scipy.io.wavfile import read
+from librosa.filters import mel as librosa_mel_fn
+
+MAX_WAV_VALUE = 32768.0
+
+
+def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
+ return torch.log(torch.clamp(x, min=clip_val) * C)
+
+
+def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression factor used to compress
+ """
+ return torch.exp(x) / C
+
+
+def spectral_normalize_torch(magnitudes):
+ output = dynamic_range_compression_torch(magnitudes)
+ return output
+
+
+def spectral_de_normalize_torch(magnitudes):
+ output = dynamic_range_decompression_torch(magnitudes)
+ return output
+
+
+mel_basis = {}
+hann_window = {}
+
+
+def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+ return spec
+
+
+def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
+ global mel_basis
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+ return spec
+
+
+def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global mel_basis, hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+
+ return spec
diff --git a/modules/modules.py b/modules/modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..54290fd207b25e93831bd21005990ea137e6b50e
--- /dev/null
+++ b/modules/modules.py
@@ -0,0 +1,342 @@
+import copy
+import math
+import numpy as np
+import scipy
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm
+
+import modules.commons as commons
+from modules.commons import init_weights, get_padding
+
+
+LRELU_SLOPE = 0.1
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, channels, eps=1e-5):
+ super().__init__()
+ self.channels = channels
+ self.eps = eps
+
+ self.gamma = nn.Parameter(torch.ones(channels))
+ self.beta = nn.Parameter(torch.zeros(channels))
+
+ def forward(self, x):
+ x = x.transpose(1, -1)
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
+ return x.transpose(1, -1)
+
+
+class ConvReluNorm(nn.Module):
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
+ super().__init__()
+ self.in_channels = in_channels
+ self.hidden_channels = hidden_channels
+ self.out_channels = out_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+ assert n_layers > 1, "Number of layers should be larger than 0."
+
+ self.conv_layers = nn.ModuleList()
+ self.norm_layers = nn.ModuleList()
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.relu_drop = nn.Sequential(
+ nn.ReLU(),
+ nn.Dropout(p_dropout))
+ for _ in range(n_layers-1):
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask):
+ x_org = x
+ for i in range(self.n_layers):
+ x = self.conv_layers[i](x * x_mask)
+ x = self.norm_layers[i](x)
+ x = self.relu_drop(x)
+ x = x_org + self.proj(x)
+ return x * x_mask
+
+
+class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
+ super().__init__()
+ self.channels = channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+
+ self.drop = nn.Dropout(p_dropout)
+ self.convs_sep = nn.ModuleList()
+ self.convs_1x1 = nn.ModuleList()
+ self.norms_1 = nn.ModuleList()
+ self.norms_2 = nn.ModuleList()
+ for i in range(n_layers):
+ dilation = kernel_size ** i
+ padding = (kernel_size * dilation - dilation) // 2
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
+ groups=channels, dilation=dilation, padding=padding
+ ))
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
+ self.norms_1.append(LayerNorm(channels))
+ self.norms_2.append(LayerNorm(channels))
+
+ def forward(self, x, x_mask, g=None):
+ if g is not None:
+ x = x + g
+ for i in range(self.n_layers):
+ y = self.convs_sep[i](x * x_mask)
+ y = self.norms_1[i](y)
+ y = F.gelu(y)
+ y = self.convs_1x1[i](y)
+ y = self.norms_2[i](y)
+ y = F.gelu(y)
+ y = self.drop(y)
+ x = x + y
+ return x * x_mask
+
+
+class WN(torch.nn.Module):
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
+ super(WN, self).__init__()
+ assert(kernel_size % 2 == 1)
+ self.hidden_channels =hidden_channels
+ self.kernel_size = kernel_size,
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.p_dropout = p_dropout
+
+ self.in_layers = torch.nn.ModuleList()
+ self.res_skip_layers = torch.nn.ModuleList()
+ self.drop = nn.Dropout(p_dropout)
+
+ if gin_channels != 0:
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
+
+ for i in range(n_layers):
+ dilation = dilation_rate ** i
+ padding = int((kernel_size * dilation - dilation) / 2)
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
+ dilation=dilation, padding=padding)
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
+ self.in_layers.append(in_layer)
+
+ # last one is not necessary
+ if i < n_layers - 1:
+ res_skip_channels = 2 * hidden_channels
+ else:
+ res_skip_channels = hidden_channels
+
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
+ self.res_skip_layers.append(res_skip_layer)
+
+ def forward(self, x, x_mask, g=None, **kwargs):
+ output = torch.zeros_like(x)
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
+
+ if g is not None:
+ g = self.cond_layer(g)
+
+ for i in range(self.n_layers):
+ x_in = self.in_layers[i](x)
+ if g is not None:
+ cond_offset = i * 2 * self.hidden_channels
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
+ else:
+ g_l = torch.zeros_like(x_in)
+
+ acts = commons.fused_add_tanh_sigmoid_multiply(
+ x_in,
+ g_l,
+ n_channels_tensor)
+ acts = self.drop(acts)
+
+ res_skip_acts = self.res_skip_layers[i](acts)
+ if i < self.n_layers - 1:
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
+ x = (x + res_acts) * x_mask
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
+ else:
+ output = output + res_skip_acts
+ return output * x_mask
+
+ def remove_weight_norm(self):
+ if self.gin_channels != 0:
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
+ for l in self.in_layers:
+ torch.nn.utils.remove_weight_norm(l)
+ for l in self.res_skip_layers:
+ torch.nn.utils.remove_weight_norm(l)
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.convs1 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2])))
+ ])
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1)))
+ ])
+ self.convs2.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c2(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.convs = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1])))
+ ])
+ self.convs.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+class Log(nn.Module):
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
+ logdet = torch.sum(-y, [1, 2])
+ return y, logdet
+ else:
+ x = torch.exp(x) * x_mask
+ return x
+
+
+class Flip(nn.Module):
+ def forward(self, x, *args, reverse=False, **kwargs):
+ x = torch.flip(x, [1])
+ if not reverse:
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
+ return x, logdet
+ else:
+ return x
+
+
+class ElementwiseAffine(nn.Module):
+ def __init__(self, channels):
+ super().__init__()
+ self.channels = channels
+ self.m = nn.Parameter(torch.zeros(channels,1))
+ self.logs = nn.Parameter(torch.zeros(channels,1))
+
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = self.m + torch.exp(self.logs) * x
+ y = y * x_mask
+ logdet = torch.sum(self.logs * x_mask, [1,2])
+ return y, logdet
+ else:
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
+ return x
+
+
+class ResidualCouplingLayer(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=0,
+ gin_channels=0,
+ mean_only=False):
+ assert channels % 2 == 0, "channels should be divisible by 2"
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.half_channels = channels // 2
+ self.mean_only = mean_only
+
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
+ self.post.weight.data.zero_()
+ self.post.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
+ h = self.pre(x0) * x_mask
+ h = self.enc(h, x_mask, g=g)
+ stats = self.post(h) * x_mask
+ if not self.mean_only:
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
+ else:
+ m = stats
+ logs = torch.zeros_like(m)
+
+ if not reverse:
+ x1 = m + x1 * torch.exp(logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ logdet = torch.sum(logs, [1,2])
+ return x, logdet
+ else:
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ return x
diff --git a/onnxexport/model_onnx.py b/onnxexport/model_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..e28bae95ec1e53aa05d06fc784ff86d55f228d60
--- /dev/null
+++ b/onnxexport/model_onnx.py
@@ -0,0 +1,335 @@
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import modules.attentions as attentions
+import modules.commons as commons
+import modules.modules as modules
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+
+import utils
+from modules.commons import init_weights, get_padding
+from vdecoder.hifigan.models import Generator
+from utils import f0_to_coarse
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
+ gin_channels=gin_channels, mean_only=True))
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+
+class Encoder(nn.Module):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ # print(x.shape,x_lengths.shape)
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class TextEncoder(nn.Module):
+ def __init__(self,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ n_layers,
+ gin_channels=0,
+ filter_channels=None,
+ n_heads=None,
+ p_dropout=None):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+ self.f0_emb = nn.Embedding(256, hidden_channels)
+
+ self.enc_ = attentions.Encoder(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+
+ def forward(self, x, x_mask, f0=None, z=None):
+ x = x + self.f0_emb(f0).transpose(1, 2)
+ x = self.enc_(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + z * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
+ ])
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ])
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class F0Decoder(nn.Module):
+ def __init__(self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ spk_channels=0):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.spk_channels = spk_channels
+
+ self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1)
+ self.decoder = attentions.FFT(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1)
+ self.cond = nn.Conv1d(spk_channels, hidden_channels, 1)
+
+ def forward(self, x, norm_f0, x_mask, spk_emb=None):
+ x = torch.detach(x)
+ if spk_emb is not None:
+ x = x + self.cond(spk_emb)
+ x += self.f0_prenet(norm_f0)
+ x = self.prenet(x) * x_mask
+ x = self.decoder(x * x_mask, x_mask)
+ x = self.proj(x) * x_mask
+ return x
+
+
+class SynthesizerTrn(nn.Module):
+ """
+ Synthesizer for Training
+ """
+
+ def __init__(self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ ssl_dim,
+ n_speakers,
+ sampling_rate=44100,
+ **kwargs):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ self.ssl_dim = ssl_dim
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
+
+ self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2)
+
+ self.enc_p = TextEncoder(
+ inter_channels,
+ hidden_channels,
+ filter_channels=filter_channels,
+ n_heads=n_heads,
+ n_layers=n_layers,
+ kernel_size=kernel_size,
+ p_dropout=p_dropout
+ )
+ hps = {
+ "sampling_rate": sampling_rate,
+ "inter_channels": inter_channels,
+ "resblock": resblock,
+ "resblock_kernel_sizes": resblock_kernel_sizes,
+ "resblock_dilation_sizes": resblock_dilation_sizes,
+ "upsample_rates": upsample_rates,
+ "upsample_initial_channel": upsample_initial_channel,
+ "upsample_kernel_sizes": upsample_kernel_sizes,
+ "gin_channels": gin_channels,
+ }
+ self.dec = Generator(h=hps)
+ self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
+ self.f0_decoder = F0Decoder(
+ 1,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ spk_channels=gin_channels
+ )
+ self.emb_uv = nn.Embedding(2, hidden_channels)
+ self.predict_f0 = False
+
+ def forward(self, c, f0, mel2ph, uv, noise=None, g=None):
+
+ decoder_inp = F.pad(c, [0, 0, 1, 0])
+ mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]])
+ c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H]
+
+ c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
+ g = g.unsqueeze(0)
+ g = self.emb_g(g).transpose(1, 2)
+ x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype)
+ x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2)
+
+ if self.predict_f0:
+ lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500
+ norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False)
+ pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g)
+ f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1)
+
+ z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise)
+ z = self.flow(z_p, c_mask, g=g, reverse=True)
+ o = self.dec(z * c_mask, g=g, f0=f0)
+ return o
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9a866c149844c0317a430dd1eb407a497af720fa
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,22 @@
+Flask
+Flask_Cors
+gradio==3.18.0
+numpy
+playsound
+pydub
+requests
+scipy
+sounddevice
+SoundFile
+starlette
+torch
+torchaudio
+tqdm
+scikit-maad
+praat-parselmouth
+onnx
+onnxsim
+onnxoptimizer
+fairseq
+librosa
+edge-tts
diff --git a/utils.py b/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e19cac39c57f213bbf6f1435ab48fe7948a1b17b
--- /dev/null
+++ b/utils.py
@@ -0,0 +1,501 @@
+import os
+import glob
+import re
+import sys
+import argparse
+import logging
+import json
+import subprocess
+import random
+
+import librosa
+import numpy as np
+from scipy.io.wavfile import read
+import torch
+from torch.nn import functional as F
+from modules.commons import sequence_mask
+from hubert import hubert_model
+MATPLOTLIB_FLAG = False
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+logger = logging
+
+f0_bin = 256
+f0_max = 1100.0
+f0_min = 50.0
+f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+
+
+# def normalize_f0(f0, random_scale=True):
+# f0_norm = f0.clone() # create a copy of the input Tensor
+# batch_size, _, frame_length = f0_norm.shape
+# for i in range(batch_size):
+# means = torch.mean(f0_norm[i, 0, :])
+# if random_scale:
+# factor = random.uniform(0.8, 1.2)
+# else:
+# factor = 1
+# f0_norm[i, 0, :] = (f0_norm[i, 0, :] - means) * factor
+# return f0_norm
+# def normalize_f0(f0, random_scale=True):
+# means = torch.mean(f0[:, 0, :], dim=1, keepdim=True)
+# if random_scale:
+# factor = torch.Tensor(f0.shape[0],1).uniform_(0.8, 1.2).to(f0.device)
+# else:
+# factor = torch.ones(f0.shape[0], 1, 1).to(f0.device)
+# f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
+# return f0_norm
+def normalize_f0(f0, x_mask, uv, random_scale=True):
+ # calculate means based on x_mask
+ uv_sum = torch.sum(uv, dim=1, keepdim=True)
+ uv_sum[uv_sum == 0] = 9999
+ means = torch.sum(f0[:, 0, :] * uv, dim=1, keepdim=True) / uv_sum
+
+ if random_scale:
+ factor = torch.Tensor(f0.shape[0], 1).uniform_(0.8, 1.2).to(f0.device)
+ else:
+ factor = torch.ones(f0.shape[0], 1).to(f0.device)
+ # normalize f0 based on means and factor
+ f0_norm = (f0 - means.unsqueeze(-1)) * factor.unsqueeze(-1)
+ if torch.isnan(f0_norm).any():
+ exit(0)
+ return f0_norm * x_mask
+
+
+def plot_data_to_numpy(x, y):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(10, 2))
+ plt.plot(x)
+ plt.plot(y)
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+
+def interpolate_f0(f0):
+ '''
+ 对F0进行插值处理
+ '''
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i]
+ last_value = data[i]
+
+ return ip_data[:,0], vuv_vector[:,0]
+
+
+def compute_f0_parselmouth(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
+ import parselmouth
+ x = wav_numpy
+ if p_len is None:
+ p_len = x.shape[0]//hop_length
+ else:
+ assert abs(p_len-x.shape[0]//hop_length) < 4, "pad length error"
+ time_step = hop_length / sampling_rate * 1000
+ f0_min = 50
+ f0_max = 1100
+ f0 = parselmouth.Sound(x, sampling_rate).to_pitch_ac(
+ time_step=time_step / 1000, voicing_threshold=0.6,
+ pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
+
+ pad_size=(p_len - len(f0) + 1) // 2
+ if(pad_size>0 or p_len - len(f0) - pad_size>0):
+ f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
+ return f0
+
+def resize_f0(x, target_len):
+ source = np.array(x)
+ source[source<0.001] = np.nan
+ target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source)
+ res = np.nan_to_num(target)
+ return res
+
+def compute_f0_dio(wav_numpy, p_len=None, sampling_rate=44100, hop_length=512):
+ import pyworld
+ if p_len is None:
+ p_len = wav_numpy.shape[0]//hop_length
+ f0, t = pyworld.dio(
+ wav_numpy.astype(np.double),
+ fs=sampling_rate,
+ f0_ceil=800,
+ frame_period=1000 * hop_length / sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav_numpy.astype(np.double), f0, t, sampling_rate)
+ for index, pitch in enumerate(f0):
+ f0[index] = round(pitch, 1)
+ return resize_f0(f0, p_len)
+
+def f0_to_coarse(f0):
+ is_torch = isinstance(f0, torch.Tensor)
+ f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
+
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
+ f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
+ assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
+ return f0_coarse
+
+
+def get_hubert_model():
+ vec_path = "hubert/checkpoint_best_legacy_500.pt"
+ print("load model(s) from {}".format(vec_path))
+ from fairseq import checkpoint_utils
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ [vec_path],
+ suffix="",
+ )
+ model = models[0]
+ model.eval()
+ return model
+
+def get_hubert_content(hmodel, wav_16k_tensor):
+ feats = wav_16k_tensor
+ if feats.dim() == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
+ inputs = {
+ "source": feats.to(wav_16k_tensor.device),
+ "padding_mask": padding_mask.to(wav_16k_tensor.device),
+ "output_layer": 9, # layer 9
+ }
+ with torch.no_grad():
+ logits = hmodel.extract_features(**inputs)
+ feats = hmodel.final_proj(logits[0])
+ return feats.transpose(1, 2)
+
+
+def get_content(cmodel, y):
+ with torch.no_grad():
+ c = cmodel.extract_features(y.squeeze(1))[0]
+ c = c.transpose(1, 2)
+ return c
+
+
+
+def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
+ assert os.path.isfile(checkpoint_path)
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
+ iteration = checkpoint_dict['iteration']
+ learning_rate = checkpoint_dict['learning_rate']
+ if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
+ saved_state_dict = checkpoint_dict['model']
+ if hasattr(model, 'module'):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ new_state_dict = {}
+ for k, v in state_dict.items():
+ try:
+ # assert "dec" in k or "disc" in k
+ # print("load", k)
+ new_state_dict[k] = saved_state_dict[k]
+ assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
+ except:
+ print("error, %s is not in the checkpoint" % k)
+ logger.info("%s is not in the checkpoint" % k)
+ new_state_dict[k] = v
+ if hasattr(model, 'module'):
+ model.module.load_state_dict(new_state_dict)
+ else:
+ model.load_state_dict(new_state_dict)
+ logger.info("Loaded checkpoint '{}' (iteration {})".format(
+ checkpoint_path, iteration))
+ return model, optimizer, learning_rate, iteration
+
+
+def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
+ logger.info("Saving model and optimizer state at iteration {} to {}".format(
+ iteration, checkpoint_path))
+ if hasattr(model, 'module'):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ torch.save({'model': state_dict,
+ 'iteration': iteration,
+ 'optimizer': optimizer.state_dict(),
+ 'learning_rate': learning_rate}, checkpoint_path)
+
+def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
+ """Freeing up space by deleting saved ckpts
+
+ Arguments:
+ path_to_models -- Path to the model directory
+ n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
+ sort_by_time -- True -> chronologically delete ckpts
+ False -> lexicographically delete ckpts
+ """
+ ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
+ name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
+ time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
+ sort_key = time_key if sort_by_time else name_key
+ x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
+ to_del = [os.path.join(path_to_models, fn) for fn in
+ (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
+ del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
+ del_routine = lambda x: [os.remove(x), del_info(x)]
+ rs = [del_routine(fn) for fn in to_del]
+
+def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
+ for k, v in scalars.items():
+ writer.add_scalar(k, v, global_step)
+ for k, v in histograms.items():
+ writer.add_histogram(k, v, global_step)
+ for k, v in images.items():
+ writer.add_image(k, v, global_step, dataformats='HWC')
+ for k, v in audios.items():
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
+
+
+def latest_checkpoint_path(dir_path, regex="G_*.pth"):
+ f_list = glob.glob(os.path.join(dir_path, regex))
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
+ x = f_list[-1]
+ print(x)
+ return x
+
+
+def plot_spectrogram_to_numpy(spectrogram):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(10,2))
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
+ interpolation='none')
+ plt.colorbar(im, ax=ax)
+ plt.xlabel("Frames")
+ plt.ylabel("Channels")
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def plot_alignment_to_numpy(alignment, info=None):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(6, 4))
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
+ interpolation='none')
+ fig.colorbar(im, ax=ax)
+ xlabel = 'Decoder timestep'
+ if info is not None:
+ xlabel += '\n\n' + info
+ plt.xlabel(xlabel)
+ plt.ylabel('Encoder timestep')
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def load_wav_to_torch(full_path):
+ sampling_rate, data = read(full_path)
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
+
+
+def load_filepaths_and_text(filename, split="|"):
+ with open(filename, encoding='utf-8') as f:
+ filepaths_and_text = [line.strip().split(split) for line in f]
+ return filepaths_and_text
+
+
+def get_hparams(init=True):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
+ help='JSON file for configuration')
+ parser.add_argument('-m', '--model', type=str, required=True,
+ help='Model name')
+
+ args = parser.parse_args()
+ model_dir = os.path.join("./logs", args.model)
+
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+
+ config_path = args.config
+ config_save_path = os.path.join(model_dir, "config.json")
+ if init:
+ with open(config_path, "r") as f:
+ data = f.read()
+ with open(config_save_path, "w") as f:
+ f.write(data)
+ else:
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_dir(model_dir):
+ config_save_path = os.path.join(model_dir, "config.json")
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams =HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_file(config_path):
+ with open(config_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams =HParams(**config)
+ return hparams
+
+
+def check_git_hash(model_dir):
+ source_dir = os.path.dirname(os.path.realpath(__file__))
+ if not os.path.exists(os.path.join(source_dir, ".git")):
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
+ source_dir
+ ))
+ return
+
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
+
+ path = os.path.join(model_dir, "githash")
+ if os.path.exists(path):
+ saved_hash = open(path).read()
+ if saved_hash != cur_hash:
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
+ saved_hash[:8], cur_hash[:8]))
+ else:
+ open(path, "w").write(cur_hash)
+
+
+def get_logger(model_dir, filename="train.log"):
+ global logger
+ logger = logging.getLogger(os.path.basename(model_dir))
+ logger.setLevel(logging.DEBUG)
+
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+ h = logging.FileHandler(os.path.join(model_dir, filename))
+ h.setLevel(logging.DEBUG)
+ h.setFormatter(formatter)
+ logger.addHandler(h)
+ return logger
+
+
+def repeat_expand_2d(content, target_len):
+ # content : [h, t]
+
+ src_len = content.shape[-1]
+ target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(content.device)
+ temp = torch.arange(src_len+1) * target_len / src_len
+ current_pos = 0
+ for i in range(target_len):
+ if i < temp[current_pos+1]:
+ target[:, i] = content[:, current_pos]
+ else:
+ current_pos += 1
+ target[:, i] = content[:, current_pos]
+
+ return target
+
+
+class HParams():
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ if type(v) == dict:
+ v = HParams(**v)
+ self[k] = v
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def items(self):
+ return self.__dict__.items()
+
+ def values(self):
+ return self.__dict__.values()
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ return setattr(self, key, value)
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+ def __repr__(self):
+ return self.__dict__.__repr__()
+
diff --git a/vdecoder/__init__.py b/vdecoder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vdecoder/hifigan/env.py b/vdecoder/hifigan/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056
--- /dev/null
+++ b/vdecoder/hifigan/env.py
@@ -0,0 +1,15 @@
+import os
+import shutil
+
+
+class AttrDict(dict):
+ def __init__(self, *args, **kwargs):
+ super(AttrDict, self).__init__(*args, **kwargs)
+ self.__dict__ = self
+
+
+def build_env(config, config_name, path):
+ t_path = os.path.join(path, config_name)
+ if config != t_path:
+ os.makedirs(path, exist_ok=True)
+ shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/vdecoder/hifigan/models.py b/vdecoder/hifigan/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..9747301f350bb269e62601017fe4633ce271b27e
--- /dev/null
+++ b/vdecoder/hifigan/models.py
@@ -0,0 +1,503 @@
+import os
+import json
+from .env import AttrDict
+import numpy as np
+import torch
+import torch.nn.functional as F
+import torch.nn as nn
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from .utils import init_weights, get_padding
+
+LRELU_SLOPE = 0.1
+
+
+def load_model(model_path, device='cuda'):
+ config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
+ with open(config_file) as f:
+ data = f.read()
+
+ global h
+ json_config = json.loads(data)
+ h = AttrDict(json_config)
+
+ generator = Generator(h).to(device)
+
+ cp_dict = torch.load(model_path)
+ generator.load_state_dict(cp_dict['generator'])
+ generator.eval()
+ generator.remove_weight_norm()
+ del cp_dict
+ return generator, h
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.h = h
+ self.convs1 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2])))
+ ])
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1)))
+ ])
+ self.convs2.apply(init_weights)
+
+ def forward(self, x):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ xt = c2(xt)
+ x = xt + x
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.h = h
+ self.convs = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1])))
+ ])
+ self.convs.apply(init_weights)
+
+ def forward(self, x):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ xt = c(xt)
+ x = xt + x
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+def padDiff(x):
+ return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
+
+class SineGen(torch.nn.Module):
+ """ Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(self, samp_rate, harmonic_num=0,
+ sine_amp=0.1, noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+ self.flag_for_pulse = flag_for_pulse
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = (f0 > self.voiced_threshold).type(torch.float32)
+ return uv
+
+ def _f02sine(self, f0_values):
+ """ f0_values: (batchsize, length, dim)
+ where dim indicates fundamental tone and overtones
+ """
+ # convert to F0 in rad. The interger part n can be ignored
+ # because 2 * np.pi * n doesn't affect phase
+ rad_values = (f0_values / self.sampling_rate) % 1
+
+ # initial phase noise (no noise for fundamental component)
+ rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
+ device=f0_values.device)
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
+ if not self.flag_for_pulse:
+ # for normal case
+
+ # To prevent torch.cumsum numerical overflow,
+ # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
+ # Buffer tmp_over_one_idx indicates the time step to add -1.
+ # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
+ tmp_over_one = torch.cumsum(rad_values, 1) % 1
+ tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+
+ sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
+ * 2 * np.pi)
+ else:
+ # If necessary, make sure that the first time step of every
+ # voiced segments is sin(pi) or cos(0)
+ # This is used for pulse-train generation
+
+ # identify the last time step in unvoiced segments
+ uv = self._f02uv(f0_values)
+ uv_1 = torch.roll(uv, shifts=-1, dims=1)
+ uv_1[:, -1, :] = 1
+ u_loc = (uv < 1) * (uv_1 > 0)
+
+ # get the instantanouse phase
+ tmp_cumsum = torch.cumsum(rad_values, dim=1)
+ # different batch needs to be processed differently
+ for idx in range(f0_values.shape[0]):
+ temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
+ temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
+ # stores the accumulation of i.phase within
+ # each voiced segments
+ tmp_cumsum[idx, :, :] = 0
+ tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
+
+ # rad_values - tmp_cumsum: remove the accumulation of i.phase
+ # within the previous voiced segment.
+ i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
+
+ # get the sines
+ sines = torch.cos(i_phase * 2 * np.pi)
+ return sines
+
+ def forward(self, f0):
+ """ sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
+ device=f0.device)
+ # fundamental component
+ fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
+
+ # generate sine waveforms
+ sine_waves = self._f02sine(fn) * self.sine_amp
+
+ # generate uv signal
+ # uv = torch.ones(f0.shape)
+ # uv = uv * (f0 > self.voiced_threshold)
+ uv = self._f02uv(f0)
+
+ # noise: for unvoiced should be similar to sine_amp
+ # std = self.sine_amp/3 -> max value ~ self.sine_amp
+ # . for voiced regions is self.noise_std
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+
+ # first: set the unvoiced part to 0 by uv
+ # then: additive noise
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """ SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
+ sine_amp, add_noise_std, voiced_threshod)
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x):
+ """
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ """
+ # source for harmonic branch
+ sine_wavs, uv, _ = self.l_sin_gen(x)
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+
+ # source for noise branch, in the same shape as uv
+ noise = torch.randn_like(uv) * self.sine_amp / 3
+ return sine_merge, noise, uv
+
+
+class Generator(torch.nn.Module):
+ def __init__(self, h):
+ super(Generator, self).__init__()
+ self.h = h
+
+ self.num_kernels = len(h["resblock_kernel_sizes"])
+ self.num_upsamples = len(h["upsample_rates"])
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"]))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=h["sampling_rate"],
+ harmonic_num=8)
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3))
+ resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])):
+ c_cur = h["upsample_initial_channel"] // (2 ** (i + 1))
+ self.ups.append(weight_norm(
+ ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)),
+ k, u, padding=(k - u) // 2)))
+ if i + 1 < len(h["upsample_rates"]): #
+ stride_f0 = np.prod(h["upsample_rates"][i + 1:])
+ self.noise_convs.append(Conv1d(
+ 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = h["upsample_initial_channel"] // (2 ** (i + 1))
+ for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])):
+ self.resblocks.append(resblock(h, ch, k, d))
+
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
+ self.ups.apply(init_weights)
+ self.conv_post.apply(init_weights)
+ self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1)
+
+ def forward(self, x, f0, g=None):
+ # print(1,x.shape,f0.shape,f0[:, None].shape)
+ f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
+ # print(2,f0.shape)
+ har_source, noi_source, uv = self.m_source(f0)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ x = x + self.cond(g)
+ # print(124,x.shape,har_source.shape)
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, LRELU_SLOPE)
+ # print(3,x.shape)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ # print(4,x_source.shape,har_source.shape,x.shape)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ print('Removing weight norm...')
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+ remove_weight_norm(self.conv_pre)
+ remove_weight_norm(self.conv_post)
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
+ ])
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, periods=None):
+ super(MultiPeriodDiscriminator, self).__init__()
+ self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
+ self.discriminators = nn.ModuleList()
+ for period in self.periods:
+ self.discriminators.append(DiscriminatorP(period))
+
+ def forward(self, y, y_hat):
+ y_d_rs = []
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ y_d_rs.append(y_d_r)
+ fmap_rs.append(fmap_r)
+ y_d_gs.append(y_d_g)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv1d(1, 128, 15, 1, padding=7)),
+ norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
+ norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
+ norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ])
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class MultiScaleDiscriminator(torch.nn.Module):
+ def __init__(self):
+ super(MultiScaleDiscriminator, self).__init__()
+ self.discriminators = nn.ModuleList([
+ DiscriminatorS(use_spectral_norm=True),
+ DiscriminatorS(),
+ DiscriminatorS(),
+ ])
+ self.meanpools = nn.ModuleList([
+ AvgPool1d(4, 2, padding=2),
+ AvgPool1d(4, 2, padding=2)
+ ])
+
+ def forward(self, y, y_hat):
+ y_d_rs = []
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ if i != 0:
+ y = self.meanpools[i - 1](y)
+ y_hat = self.meanpools[i - 1](y_hat)
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ y_d_rs.append(y_d_r)
+ fmap_rs.append(fmap_r)
+ y_d_gs.append(y_d_g)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+def feature_loss(fmap_r, fmap_g):
+ loss = 0
+ for dr, dg in zip(fmap_r, fmap_g):
+ for rl, gl in zip(dr, dg):
+ loss += torch.mean(torch.abs(rl - gl))
+
+ return loss * 2
+
+
+def discriminator_loss(disc_real_outputs, disc_generated_outputs):
+ loss = 0
+ r_losses = []
+ g_losses = []
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
+ r_loss = torch.mean((1 - dr) ** 2)
+ g_loss = torch.mean(dg ** 2)
+ loss += (r_loss + g_loss)
+ r_losses.append(r_loss.item())
+ g_losses.append(g_loss.item())
+
+ return loss, r_losses, g_losses
+
+
+def generator_loss(disc_outputs):
+ loss = 0
+ gen_losses = []
+ for dg in disc_outputs:
+ l = torch.mean((1 - dg) ** 2)
+ gen_losses.append(l)
+ loss += l
+
+ return loss, gen_losses
diff --git a/vdecoder/hifigan/nvSTFT.py b/vdecoder/hifigan/nvSTFT.py
new file mode 100644
index 0000000000000000000000000000000000000000..88597d62a505715091f9ba62d38bf0a85a31b95a
--- /dev/null
+++ b/vdecoder/hifigan/nvSTFT.py
@@ -0,0 +1,111 @@
+import math
+import os
+os.environ["LRU_CACHE_CAPACITY"] = "3"
+import random
+import torch
+import torch.utils.data
+import numpy as np
+import librosa
+from librosa.util import normalize
+from librosa.filters import mel as librosa_mel_fn
+from scipy.io.wavfile import read
+import soundfile as sf
+
+def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
+ sampling_rate = None
+ try:
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
+ except Exception as ex:
+ print(f"'{full_path}' failed to load.\nException:")
+ print(ex)
+ if return_empty_on_exception:
+ return [], sampling_rate or target_sr or 32000
+ else:
+ raise Exception(ex)
+
+ if len(data.shape) > 1:
+ data = data[:, 0]
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
+
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
+ else: # if audio data is type fp32
+ max_mag = max(np.amax(data), -np.amin(data))
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
+
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
+
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
+ return [], sampling_rate or target_sr or 32000
+ if target_sr is not None and sampling_rate != target_sr:
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
+ sampling_rate = target_sr
+
+ return data, sampling_rate
+
+def dynamic_range_compression(x, C=1, clip_val=1e-5):
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
+
+def dynamic_range_decompression(x, C=1):
+ return np.exp(x) / C
+
+def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ return torch.log(torch.clamp(x, min=clip_val) * C)
+
+def dynamic_range_decompression_torch(x, C=1):
+ return torch.exp(x) / C
+
+class STFT():
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
+ self.target_sr = sr
+
+ self.n_mels = n_mels
+ self.n_fft = n_fft
+ self.win_size = win_size
+ self.hop_length = hop_length
+ self.fmin = fmin
+ self.fmax = fmax
+ self.clip_val = clip_val
+ self.mel_basis = {}
+ self.hann_window = {}
+
+ def get_mel(self, y, center=False):
+ sampling_rate = self.target_sr
+ n_mels = self.n_mels
+ n_fft = self.n_fft
+ win_size = self.win_size
+ hop_length = self.hop_length
+ fmin = self.fmin
+ fmax = self.fmax
+ clip_val = self.clip_val
+
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ if fmax not in self.mel_basis:
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
+ self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
+ self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
+ # print(111,spec)
+ spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
+ # print(222,spec)
+ spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
+ # print(333,spec)
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
+ # print(444,spec)
+ return spec
+
+ def __call__(self, audiopath):
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
+ return spect
+
+stft = STFT()
diff --git a/vdecoder/hifigan/utils.py b/vdecoder/hifigan/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c93c996d3cc73c30d71c1fc47056e4230f35c0f
--- /dev/null
+++ b/vdecoder/hifigan/utils.py
@@ -0,0 +1,68 @@
+import glob
+import os
+import matplotlib
+import torch
+from torch.nn.utils import weight_norm
+# matplotlib.use("Agg")
+import matplotlib.pylab as plt
+
+
+def plot_spectrogram(spectrogram):
+ fig, ax = plt.subplots(figsize=(10, 2))
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
+ interpolation='none')
+ plt.colorbar(im, ax=ax)
+
+ fig.canvas.draw()
+ plt.close()
+
+ return fig
+
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def apply_weight_norm(m):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ weight_norm(m)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size*dilation - dilation)/2)
+
+
+def load_checkpoint(filepath, device):
+ assert os.path.isfile(filepath)
+ print("Loading '{}'".format(filepath))
+ checkpoint_dict = torch.load(filepath, map_location=device)
+ print("Complete.")
+ return checkpoint_dict
+
+
+def save_checkpoint(filepath, obj):
+ print("Saving checkpoint to {}".format(filepath))
+ torch.save(obj, filepath)
+ print("Complete.")
+
+
+def del_old_checkpoints(cp_dir, prefix, n_models=2):
+ pattern = os.path.join(cp_dir, prefix + '????????')
+ cp_list = glob.glob(pattern) # get checkpoint paths
+ cp_list = sorted(cp_list)# sort by iter
+ if len(cp_list) > n_models: # if more than n_models models are found
+ for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
+ open(cp, 'w').close()# empty file contents
+ os.unlink(cp)# delete file (move to trash when using Colab)
+
+
+def scan_checkpoint(cp_dir, prefix):
+ pattern = os.path.join(cp_dir, prefix + '????????')
+ cp_list = glob.glob(pattern)
+ if len(cp_list) == 0:
+ return None
+ return sorted(cp_list)[-1]
+