Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,69 +1,461 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
#
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
return image_data[image_name]["mp3"]
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
pass
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="長さ調整", info="1.0未満で速度を上げ、1.0以上で速度を遅くします"),
|
30 |
-
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="推論CFG率", info="わずかな影響があります"),
|
31 |
-
gr.Checkbox(label="F0条件付きモデルを使用", value=False, info="歌声変換には必須です"),
|
32 |
-
gr.Checkbox(label="F0自動調整", value=True, info="F0をおおよそ調整して目標音声に合わせます。F0条件付きモデル使用時にのみ有効です"),
|
33 |
-
gr.Slider(label='音程変換', minimum=-24, maximum=24, step=1, value=0, info="半音単位の音程変換。F0条件付きモデル使用時にのみ有効です"),
|
34 |
-
]
|
35 |
-
|
36 |
-
examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 25, 1.0, 0.7, False, True, 0],
|
37 |
-
["examples/source/jay_0.wav", "examples/reference/azuma_0.wav", 25, 1.0, 0.7, True, True, 0],
|
38 |
-
["examples/source/Wiz Khalifa,Charlie Puth - See You Again [vocals]_[cut_28sec].wav",
|
39 |
-
"examples/reference/teio_0.wav", 100, 1.0, 0.7, True, False, 0],
|
40 |
-
["examples/source/TECHNOPOLIS - 2085 [vocals]_[cut_14sec].wav",
|
41 |
-
"examples/reference/trump_0.wav", 50, 1.0, 0.7, True, False, -12],
|
42 |
-
]
|
43 |
-
|
44 |
-
outputs = [gr.Audio(label="ストリーム出力音声", streaming=True, format='mp3'),
|
45 |
-
gr.Audio(label="完全出力音声", streaming=False, format='wav')]
|
46 |
-
|
47 |
-
# ギャラリーに渡す画像のパスリスト
|
48 |
-
gallery_images = [image_data["sikokumetan"]["webp"]] # 画像のファイルパスを直接渡す
|
49 |
-
|
50 |
-
# ギャラリーを追加
|
51 |
-
gallery = gr.Gallery(
|
52 |
-
value=gallery_images,
|
53 |
-
label="選択した画像に基づく参考音声",
|
54 |
-
elem_id="image_gallery",
|
55 |
-
interactive=True,
|
56 |
-
grid=2 # 画像を2列に並べる
|
57 |
)
|
58 |
|
59 |
-
|
60 |
-
gallery.change(fn=on_image_select, inputs=gallery, outputs=inputs[1]) # 参考音声を更新
|
61 |
-
|
62 |
-
gr.Interface(fn=voice_conversion,
|
63 |
-
description=description,
|
64 |
-
inputs=inputs,
|
65 |
-
outputs=outputs,
|
66 |
-
title="Seed Voice Conversion",
|
67 |
-
examples=examples,
|
68 |
-
cache_examples=False,
|
69 |
-
).launch()
|
|
|
1 |
+
import os
|
2 |
+
import spaces
|
3 |
import gradio as gr
|
4 |
+
import torch
|
5 |
+
import torchaudio
|
6 |
+
import librosa
|
7 |
+
from modules.commons import build_model, load_checkpoint, recursive_munch
|
8 |
+
import yaml
|
9 |
+
from hf_utils import load_custom_model_from_hf
|
10 |
+
import numpy as np
|
11 |
+
from pydub import AudioSegment
|
12 |
|
13 |
+
# Load model and configuration
|
14 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
+
|
16 |
+
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
|
17 |
+
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth",
|
18 |
+
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
|
19 |
+
# dit_checkpoint_path = "E:/DiT_epoch_00018_step_801000.pth"
|
20 |
+
# dit_config_path = "configs/config_dit_mel_seed_uvit_whisper_small_encoder_wavenet.yml"
|
21 |
+
config = yaml.safe_load(open(dit_config_path, 'r'))
|
22 |
+
model_params = recursive_munch(config['model_params'])
|
23 |
+
model = build_model(model_params, stage='DiT')
|
24 |
+
hop_length = config['preprocess_params']['spect_params']['hop_length']
|
25 |
+
sr = config['preprocess_params']['sr']
|
26 |
+
|
27 |
+
# Load checkpoints
|
28 |
+
model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path,
|
29 |
+
load_only_params=True, ignore_modules=[], is_distributed=False)
|
30 |
+
for key in model:
|
31 |
+
model[key].eval()
|
32 |
+
model[key].to(device)
|
33 |
+
model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
|
34 |
+
|
35 |
+
# Load additional modules
|
36 |
+
from modules.campplus.DTDNN import CAMPPlus
|
37 |
+
|
38 |
+
campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None)
|
39 |
+
campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
|
40 |
+
campplus_model.load_state_dict(torch.load(campplus_ckpt_path, map_location="cpu"))
|
41 |
+
campplus_model.eval()
|
42 |
+
campplus_model.to(device)
|
43 |
+
|
44 |
+
from modules.bigvgan import bigvgan
|
45 |
+
|
46 |
+
bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False)
|
47 |
+
|
48 |
+
# remove weight norm in the model and set to eval mode
|
49 |
+
bigvgan_model.remove_weight_norm()
|
50 |
+
bigvgan_model = bigvgan_model.eval().to(device)
|
51 |
+
|
52 |
+
ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
|
53 |
+
|
54 |
+
codec_config = yaml.safe_load(open(config_path))
|
55 |
+
codec_model_params = recursive_munch(codec_config['model_params'])
|
56 |
+
codec_encoder = build_model(codec_model_params, stage="codec")
|
57 |
+
|
58 |
+
ckpt_params = torch.load(ckpt_path, map_location="cpu")
|
59 |
+
|
60 |
+
for key in codec_encoder:
|
61 |
+
codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
|
62 |
+
_ = [codec_encoder[key].eval() for key in codec_encoder]
|
63 |
+
_ = [codec_encoder[key].to(device) for key in codec_encoder]
|
64 |
+
|
65 |
+
# whisper
|
66 |
+
from transformers import AutoFeatureExtractor, WhisperModel
|
67 |
+
|
68 |
+
whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer,
|
69 |
+
'whisper_name') else "openai/whisper-small"
|
70 |
+
whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float16).to(device)
|
71 |
+
del whisper_model.decoder
|
72 |
+
whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name)
|
73 |
+
|
74 |
+
# Generate mel spectrograms
|
75 |
+
mel_fn_args = {
|
76 |
+
"n_fft": config['preprocess_params']['spect_params']['n_fft'],
|
77 |
+
"win_size": config['preprocess_params']['spect_params']['win_length'],
|
78 |
+
"hop_size": config['preprocess_params']['spect_params']['hop_length'],
|
79 |
+
"num_mels": config['preprocess_params']['spect_params']['n_mels'],
|
80 |
+
"sampling_rate": sr,
|
81 |
+
"fmin": 0,
|
82 |
+
"fmax": None,
|
83 |
+
"center": False
|
84 |
}
|
85 |
+
from modules.audio import mel_spectrogram
|
86 |
+
|
87 |
+
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
|
88 |
+
|
89 |
+
# f0 conditioned model
|
90 |
+
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
|
91 |
+
"DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth",
|
92 |
+
"config_dit_mel_seed_uvit_whisper_base_f0_44k.yml")
|
93 |
+
|
94 |
+
config = yaml.safe_load(open(dit_config_path, 'r'))
|
95 |
+
model_params = recursive_munch(config['model_params'])
|
96 |
+
model_f0 = build_model(model_params, stage='DiT')
|
97 |
+
hop_length = config['preprocess_params']['spect_params']['hop_length']
|
98 |
+
sr = config['preprocess_params']['sr']
|
99 |
+
|
100 |
+
# Load checkpoints
|
101 |
+
model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path,
|
102 |
+
load_only_params=True, ignore_modules=[], is_distributed=False)
|
103 |
+
for key in model_f0:
|
104 |
+
model_f0[key].eval()
|
105 |
+
model_f0[key].to(device)
|
106 |
+
model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
|
107 |
+
|
108 |
+
# f0 extractor
|
109 |
+
from modules.rmvpe import RMVPE
|
110 |
+
|
111 |
+
model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None)
|
112 |
+
rmvpe = RMVPE(model_path, is_half=False, device=device)
|
113 |
+
|
114 |
+
mel_fn_args_f0 = {
|
115 |
+
"n_fft": config['preprocess_params']['spect_params']['n_fft'],
|
116 |
+
"win_size": config['preprocess_params']['spect_params']['win_length'],
|
117 |
+
"hop_size": config['preprocess_params']['spect_params']['hop_length'],
|
118 |
+
"num_mels": config['preprocess_params']['spect_params']['n_mels'],
|
119 |
+
"sampling_rate": sr,
|
120 |
+
"fmin": 0,
|
121 |
+
"fmax": None,
|
122 |
+
"center": False
|
123 |
+
}
|
124 |
+
to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0)
|
125 |
+
bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False)
|
126 |
+
|
127 |
+
# remove weight norm in the model and set to eval mode
|
128 |
+
bigvgan_44k_model.remove_weight_norm()
|
129 |
+
bigvgan_44k_model = bigvgan_44k_model.eval().to(device)
|
130 |
+
|
131 |
+
def adjust_f0_semitones(f0_sequence, n_semitones):
|
132 |
+
factor = 2 ** (n_semitones / 12)
|
133 |
+
return f0_sequence * factor
|
134 |
+
|
135 |
+
def crossfade(chunk1, chunk2, overlap):
|
136 |
+
fade_out = np.cos(np.linspace(0, np.pi / 2, overlap)) ** 2
|
137 |
+
fade_in = np.cos(np.linspace(np.pi / 2, 0, overlap)) ** 2
|
138 |
+
chunk2[:overlap] = chunk2[:overlap] * fade_in + chunk1[-overlap:] * fade_out
|
139 |
+
return chunk2
|
140 |
+
|
141 |
+
# streaming and chunk processing related params
|
142 |
+
bitrate = "320k"
|
143 |
+
overlap_frame_len = 16
|
144 |
+
@spaces.GPU
|
145 |
+
@torch.no_grad()
|
146 |
+
@torch.inference_mode()
|
147 |
+
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift):
|
148 |
+
inference_module = model if not f0_condition else model_f0
|
149 |
+
mel_fn = to_mel if not f0_condition else to_mel_f0
|
150 |
+
bigvgan_fn = bigvgan_model if not f0_condition else bigvgan_44k_model
|
151 |
+
sr = 22050 if not f0_condition else 44100
|
152 |
+
hop_length = 256 if not f0_condition else 512
|
153 |
+
max_context_window = sr // hop_length * 30
|
154 |
+
overlap_wave_len = overlap_frame_len * hop_length
|
155 |
+
# Load audio
|
156 |
+
source_audio = librosa.load(source, sr=sr)[0]
|
157 |
+
ref_audio = librosa.load(target, sr=sr)[0]
|
158 |
+
|
159 |
+
# Process audio
|
160 |
+
source_audio = torch.tensor(source_audio).unsqueeze(0).float().to(device)
|
161 |
+
ref_audio = torch.tensor(ref_audio[:sr * 25]).unsqueeze(0).float().to(device)
|
162 |
+
|
163 |
+
# Resample
|
164 |
+
ref_waves_16k = torchaudio.functional.resample(ref_audio, sr, 16000)
|
165 |
+
converted_waves_16k = torchaudio.functional.resample(source_audio, sr, 16000)
|
166 |
+
# if source audio less than 30 seconds, whisper can handle in one forward
|
167 |
+
if converted_waves_16k.size(-1) <= 16000 * 30:
|
168 |
+
alt_inputs = whisper_feature_extractor([converted_waves_16k.squeeze(0).cpu().numpy()],
|
169 |
+
return_tensors="pt",
|
170 |
+
return_attention_mask=True,
|
171 |
+
sampling_rate=16000)
|
172 |
+
alt_input_features = whisper_model._mask_input_features(
|
173 |
+
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device)
|
174 |
+
alt_outputs = whisper_model.encoder(
|
175 |
+
alt_input_features.to(whisper_model.encoder.dtype),
|
176 |
+
head_mask=None,
|
177 |
+
output_attentions=False,
|
178 |
+
output_hidden_states=False,
|
179 |
+
return_dict=True,
|
180 |
+
)
|
181 |
+
S_alt = alt_outputs.last_hidden_state.to(torch.float32)
|
182 |
+
S_alt = S_alt[:, :converted_waves_16k.size(-1) // 320 + 1]
|
183 |
+
else:
|
184 |
+
overlapping_time = 5 # 5 seconds
|
185 |
+
S_alt_list = []
|
186 |
+
buffer = None
|
187 |
+
traversed_time = 0
|
188 |
+
while traversed_time < converted_waves_16k.size(-1):
|
189 |
+
if buffer is None: # first chunk
|
190 |
+
chunk = converted_waves_16k[:, traversed_time:traversed_time + 16000 * 30]
|
191 |
+
else:
|
192 |
+
chunk = torch.cat([buffer, converted_waves_16k[:, traversed_time:traversed_time + 16000 * (30 - overlapping_time)]], dim=-1)
|
193 |
+
alt_inputs = whisper_feature_extractor([chunk.squeeze(0).cpu().numpy()],
|
194 |
+
return_tensors="pt",
|
195 |
+
return_attention_mask=True,
|
196 |
+
sampling_rate=16000)
|
197 |
+
alt_input_features = whisper_model._mask_input_features(
|
198 |
+
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device)
|
199 |
+
alt_outputs = whisper_model.encoder(
|
200 |
+
alt_input_features.to(whisper_model.encoder.dtype),
|
201 |
+
head_mask=None,
|
202 |
+
output_attentions=False,
|
203 |
+
output_hidden_states=False,
|
204 |
+
return_dict=True,
|
205 |
+
)
|
206 |
+
S_alt = alt_outputs.last_hidden_state.to(torch.float32)
|
207 |
+
S_alt = S_alt[:, :chunk.size(-1) // 320 + 1]
|
208 |
+
if traversed_time == 0:
|
209 |
+
S_alt_list.append(S_alt)
|
210 |
+
else:
|
211 |
+
S_alt_list.append(S_alt[:, 50 * overlapping_time:])
|
212 |
+
buffer = chunk[:, -16000 * overlapping_time:]
|
213 |
+
traversed_time += 30 * 16000 if traversed_time == 0 else chunk.size(-1) - 16000 * overlapping_time
|
214 |
+
S_alt = torch.cat(S_alt_list, dim=1)
|
215 |
+
|
216 |
+
ori_waves_16k = torchaudio.functional.resample(ref_audio, sr, 16000)
|
217 |
+
ori_inputs = whisper_feature_extractor([ori_waves_16k.squeeze(0).cpu().numpy()],
|
218 |
+
return_tensors="pt",
|
219 |
+
return_attention_mask=True)
|
220 |
+
ori_input_features = whisper_model._mask_input_features(
|
221 |
+
ori_inputs.input_features, attention_mask=ori_inputs.attention_mask).to(device)
|
222 |
+
with torch.no_grad():
|
223 |
+
ori_outputs = whisper_model.encoder(
|
224 |
+
ori_input_features.to(whisper_model.encoder.dtype),
|
225 |
+
head_mask=None,
|
226 |
+
output_attentions=False,
|
227 |
+
output_hidden_states=False,
|
228 |
+
return_dict=True,
|
229 |
+
)
|
230 |
+
S_ori = ori_outputs.last_hidden_state.to(torch.float32)
|
231 |
+
S_ori = S_ori[:, :ori_waves_16k.size(-1) // 320 + 1]
|
232 |
|
233 |
+
mel = mel_fn(source_audio.to(device).float())
|
234 |
+
mel2 = mel_fn(ref_audio.to(device).float())
|
|
|
235 |
|
236 |
+
target_lengths = torch.LongTensor([int(mel.size(2) * length_adjust)]).to(mel.device)
|
237 |
+
target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device)
|
|
|
238 |
|
239 |
+
feat2 = torchaudio.compliance.kaldi.fbank(ref_waves_16k,
|
240 |
+
num_mel_bins=80,
|
241 |
+
dither=0,
|
242 |
+
sample_frequency=16000)
|
243 |
+
feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
|
244 |
+
style2 = campplus_model(feat2.unsqueeze(0))
|
245 |
+
|
246 |
+
if f0_condition:
|
247 |
+
F0_ori = rmvpe.infer_from_audio(ref_waves_16k[0], thred=0.5)
|
248 |
+
F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.5)
|
249 |
+
|
250 |
+
F0_ori = torch.from_numpy(F0_ori).to(device)[None]
|
251 |
+
F0_alt = torch.from_numpy(F0_alt).to(device)[None]
|
252 |
+
|
253 |
+
voiced_F0_ori = F0_ori[F0_ori > 1]
|
254 |
+
voiced_F0_alt = F0_alt[F0_alt > 1]
|
255 |
+
|
256 |
+
log_f0_alt = torch.log(F0_alt + 1e-5)
|
257 |
+
voiced_log_f0_ori = torch.log(voiced_F0_ori + 1e-5)
|
258 |
+
voiced_log_f0_alt = torch.log(voiced_F0_alt + 1e-5)
|
259 |
+
median_log_f0_ori = torch.median(voiced_log_f0_ori)
|
260 |
+
median_log_f0_alt = torch.median(voiced_log_f0_alt)
|
261 |
+
|
262 |
+
# shift alt log f0 level to ori log f0 level
|
263 |
+
shifted_log_f0_alt = log_f0_alt.clone()
|
264 |
+
if auto_f0_adjust:
|
265 |
+
shifted_log_f0_alt[F0_alt > 1] = log_f0_alt[F0_alt > 1] - median_log_f0_alt + median_log_f0_ori
|
266 |
+
shifted_f0_alt = torch.exp(shifted_log_f0_alt)
|
267 |
+
if pitch_shift != 0:
|
268 |
+
shifted_f0_alt[F0_alt > 1] = adjust_f0_semitones(shifted_f0_alt[F0_alt > 1], pitch_shift)
|
269 |
+
else:
|
270 |
+
F0_ori = None
|
271 |
+
F0_alt = None
|
272 |
+
shifted_f0_alt = None
|
273 |
+
|
274 |
+
# Length regulation
|
275 |
+
cond, _, codes, commitment_loss, codebook_loss = inference_module.length_regulator(S_alt, ylens=target_lengths, n_quantizers=3, f0=shifted_f0_alt)
|
276 |
+
prompt_condition, _, codes, commitment_loss, codebook_loss = inference_module.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=3, f0=F0_ori)
|
277 |
+
|
278 |
+
max_source_window = max_context_window - mel2.size(2)
|
279 |
+
# split source condition (cond) into chunks
|
280 |
+
processed_frames = 0
|
281 |
+
generated_wave_chunks = []
|
282 |
+
# generate chunk by chunk and stream the output
|
283 |
+
while processed_frames < cond.size(1):
|
284 |
+
chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
|
285 |
+
is_last_chunk = processed_frames + max_source_window >= cond.size(1)
|
286 |
+
cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1)
|
287 |
+
with torch.autocast(device_type='cuda', dtype=torch.float16):
|
288 |
+
# Voice Conversion
|
289 |
+
vc_target = inference_module.cfm.inference(cat_condition,
|
290 |
+
torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
|
291 |
+
mel2, style2, None, diffusion_steps,
|
292 |
+
inference_cfg_rate=inference_cfg_rate)
|
293 |
+
vc_target = vc_target[:, :, mel2.size(-1):]
|
294 |
+
vc_wave = bigvgan_fn(vc_target.float())[0]
|
295 |
+
if processed_frames == 0:
|
296 |
+
if is_last_chunk:
|
297 |
+
output_wave = vc_wave[0].cpu().numpy()
|
298 |
+
generated_wave_chunks.append(output_wave)
|
299 |
+
output_wave = (output_wave * 32768.0).astype(np.int16)
|
300 |
+
mp3_bytes = AudioSegment(
|
301 |
+
output_wave.tobytes(), frame_rate=sr,
|
302 |
+
sample_width=output_wave.dtype.itemsize, channels=1
|
303 |
+
).export(format="mp3", bitrate=bitrate).read()
|
304 |
+
yield mp3_bytes, (sr, np.concatenate(generated_wave_chunks))
|
305 |
+
break
|
306 |
+
output_wave = vc_wave[0, :-overlap_wave_len].cpu().numpy()
|
307 |
+
generated_wave_chunks.append(output_wave)
|
308 |
+
previous_chunk = vc_wave[0, -overlap_wave_len:]
|
309 |
+
processed_frames += vc_target.size(2) - overlap_frame_len
|
310 |
+
output_wave = (output_wave * 32768.0).astype(np.int16)
|
311 |
+
mp3_bytes = AudioSegment(
|
312 |
+
output_wave.tobytes(), frame_rate=sr,
|
313 |
+
sample_width=output_wave.dtype.itemsize, channels=1
|
314 |
+
).export(format="mp3", bitrate=bitrate).read()
|
315 |
+
yield mp3_bytes, None
|
316 |
+
elif is_last_chunk:
|
317 |
+
output_wave = crossfade(previous_chunk.cpu().numpy(), vc_wave[0].cpu().numpy(), overlap_wave_len)
|
318 |
+
generated_wave_chunks.append(output_wave)
|
319 |
+
processed_frames += vc_target.size(2) - overlap_frame_len
|
320 |
+
output_wave = (output_wave * 32768.0).astype(np.int16)
|
321 |
+
mp3_bytes = AudioSegment(
|
322 |
+
output_wave.tobytes(), frame_rate=sr,
|
323 |
+
sample_width=output_wave.dtype.itemsize, channels=1
|
324 |
+
).export(format="mp3", bitrate=bitrate).read()
|
325 |
+
yield mp3_bytes, (sr, np.concatenate(generated_wave_chunks))
|
326 |
+
break
|
327 |
+
else:
|
328 |
+
output_wave = crossfade(previous_chunk.cpu().numpy(), vc_wave[0, :-overlap_wave_len].cpu().numpy(), overlap_wave_len)
|
329 |
+
generated_wave_chunks.append(output_wave)
|
330 |
+
previous_chunk = vc_wave[0, -overlap_wave_len:]
|
331 |
+
processed_frames += vc_target.size(2) - overlap_frame_len
|
332 |
+
output_wave = (output_wave * 32768.0).astype(np.int16)
|
333 |
+
mp3_bytes = AudioSegment(
|
334 |
+
output_wave.tobytes(), frame_rate=sr,
|
335 |
+
sample_width=output_wave.dtype.itemsize, channels=1
|
336 |
+
).export(format="mp3", bitrate=bitrate).read()
|
337 |
+
yield mp3_bytes, None
|
338 |
+
|
339 |
+
|
340 |
+
import gradio as gr
|
341 |
+
from g4f.client import Client
|
342 |
+
import markdown2 # より豊富なマークダウン対応
|
343 |
+
import base64
|
344 |
+
from io import BytesIO
|
345 |
+
import json
|
346 |
+
|
347 |
+
client = Client()
|
348 |
+
|
349 |
+
def format_output(text):
|
350 |
+
"""
|
351 |
+
チャットGPTスタイルのマークダウン形式に対応するためのフォーマット関数
|
352 |
+
"""
|
353 |
+
return markdown2.markdown(text, extras=[
|
354 |
+
"fenced-code-blocks",
|
355 |
+
"tables",
|
356 |
+
"task_list",
|
357 |
+
"strike",
|
358 |
+
"spoiler",
|
359 |
+
"markdown-in-html"
|
360 |
+
])
|
361 |
+
|
362 |
+
def image_to_data_url(image):
|
363 |
+
"""
|
364 |
+
画像をBase64形式のdataURLに変換する関数
|
365 |
+
"""
|
366 |
+
buffered = BytesIO()
|
367 |
+
image.save(buffered, format="PNG")
|
368 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
369 |
+
return f"data:image/png;base64,{img_str}"
|
370 |
+
|
371 |
+
def respond(message, history, system_message, max_tokens, temperature, top_p, model_choice, web_search, image=None):
|
372 |
+
# システムメッセージを先頭に追加
|
373 |
+
messages = [{"role": "system", "content": system_message}]
|
374 |
+
|
375 |
+
# これまでの会話履歴を追加
|
376 |
+
for user_msg, assistant_msg in history:
|
377 |
+
if user_msg:
|
378 |
+
messages.append({"role": "user", "content": user_msg})
|
379 |
+
if assistant_msg:
|
380 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
381 |
+
|
382 |
+
# 今回のユーザーメッセージを追加
|
383 |
+
if image:
|
384 |
+
message += f"\n" # 画像をマークダウンで追加
|
385 |
+
messages.append({"role": "user", "content": message})
|
386 |
+
|
387 |
+
# API 呼び出し
|
388 |
+
response = client.chat.completions.create(
|
389 |
+
model=model_choice,
|
390 |
+
messages=messages,
|
391 |
+
max_tokens=max_tokens,
|
392 |
+
temperature=temperature,
|
393 |
+
top_p=top_p,
|
394 |
+
web_search=web_search
|
395 |
+
)
|
396 |
+
|
397 |
+
formatted_response = format_output(response.choices[0].message.content)
|
398 |
+
return formatted_response
|
399 |
+
|
400 |
+
def chat(message, history, system_message, max_tokens, temperature, top_p, model_choice, web_search, image):
|
401 |
+
if message.strip() == "" and not image:
|
402 |
+
return "", history, history
|
403 |
+
if image:
|
404 |
+
image_data_url = image_to_data_url(image)
|
405 |
+
else:
|
406 |
+
image_data_url = None
|
407 |
+
|
408 |
+
print("メッセージ送信直後の履歴:")
|
409 |
+
print(json.dumps(history, ensure_ascii=False, indent=2)) # メッセージ送信前の履歴をJSONで表示
|
410 |
+
|
411 |
+
response = respond(message, history, system_message, max_tokens, temperature, top_p, model_choice, web_search, image_data_url)
|
412 |
+
history = history + [(message, response)]
|
413 |
+
|
414 |
+
print("AIの回答直後の履歴:")
|
415 |
+
print(json.dumps(history, ensure_ascii=False, indent=2)) # AIの回答後の履歴をJSONで表示
|
416 |
+
|
417 |
+
# 入力欄をクリアして、更新済みのチャット履歴と状態を返す
|
418 |
+
return "", history, history
|
419 |
+
|
420 |
+
with gr.Blocks() as demo:
|
421 |
+
with gr.Row():
|
422 |
+
# 左側のカラム:入力欄と各オプションを配置
|
423 |
+
with gr.Column():
|
424 |
+
# オプション(システムメッセージや各種パラメータ)
|
425 |
+
system_message = gr.Textbox(
|
426 |
+
value="あなたは日本語しか話せません。あなたは最新の医療支援AIです。薬の紹介、薬の提案、薬の作成など、さまざまなタスクに答えます。また、新しい薬を開発する際は、既存のものに頼らずに画期的なアイデアを出します。",
|
427 |
+
label="システムメッセージ"
|
428 |
+
)
|
429 |
+
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="トークン制限")
|
430 |
+
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=2, step=0.1, label="Temperature (数値が大きいほど様々な回答をします。)")
|
431 |
+
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling) (数値が低いと回答候補が上位のみになります。)")
|
432 |
+
model_choice = gr.Radio(choices=["gpt-4o-mini", "o3-mini"], value="gpt-4o-mini", label="モデル選択")
|
433 |
+
web_search = gr.Checkbox(value=True, label="WEB検索")
|
434 |
+
|
435 |
+
# チャット入力欄と送信ボタン
|
436 |
+
chatbot_input = gr.Textbox(show_label=False, placeholder="ここにメッセージを入力してください...", lines=2)
|
437 |
+
image_input = gr.Image(type="pil", label="画像をアップロード", visible=False) # 画像アップロード
|
438 |
+
submit_btn = gr.Button("送信")
|
439 |
+
|
440 |
+
# 右側のカラム:チャットの履歴を表示
|
441 |
+
with gr.Column():
|
442 |
+
chat_history_display = gr.Chatbot(label="チャット履歴")
|
443 |
+
|
444 |
+
# 会話の状態(履歴)を保持する State コンポーネント
|
445 |
+
state = gr.State([])
|
446 |
+
|
447 |
+
# 送信ボタン押下時の挙動
|
448 |
+
submit_btn.click(
|
449 |
+
chat,
|
450 |
+
inputs=[chatbot_input, state, system_message, max_tokens, temperature, top_p, model_choice, web_search, image_input],
|
451 |
+
outputs=[chatbot_input, chat_history_display, state]
|
452 |
+
)
|
453 |
|
454 |
+
# エンターキーでの送信にも対応
|
455 |
+
chatbot_input.submit(
|
456 |
+
chat,
|
457 |
+
inputs=[chatbot_input, state, system_message, max_tokens, temperature, top_p, model_choice, web_search, image_input],
|
458 |
+
outputs=[chatbot_input, chat_history_display, state]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
459 |
)
|
460 |
|
461 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|