Spaces:
Building
Building
File size: 4,181 Bytes
ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 2def64b ef94b92 0d13f94 8389616 55da42a 0d13f94 a048747 0d13f94 ef94b92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import os
import torch
import shutil
import librosa
import warnings
import numpy as np
import gradio as gr
import librosa.display
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from collections import Counter
from PIL import Image
from tqdm import tqdm
from model import net, MODEL_DIR
MODEL = net()
TRANS = {
"PearlRiver": "Pearl River",
"YoungChang": "YOUNG CHANG",
"Steinway-T": "STEINWAY Theater",
"Hsinghai": "HSINGHAI",
"Kawai": "KAWAI",
"Steinway": "STEINWAY",
"Kawai-G": "KAWAI Grand",
"Yamaha": "YAMAHA",
}
CLASSES = list(TRANS.keys())
CACHE_DIR = "./__pycache__/tmp"
def most_common_element(input_list):
counter = Counter(input_list)
mce, _ = counter.most_common(1)[0]
return mce
def wav_to_mel(audio_path: str, width=0.18):
os.makedirs(CACHE_DIR, exist_ok=True)
try:
y, sr = librosa.load(audio_path, sr=48000)
non_silent = y
mel_spec = librosa.feature.melspectrogram(y=non_silent, sr=sr)
log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
dur = librosa.get_duration(y=non_silent, sr=sr)
total_frames = log_mel_spec.shape[1]
step = int(width * total_frames / dur)
count = int(total_frames / step)
begin = int(0.5 * (total_frames - count * step))
end = begin + step * count
for i in tqdm(range(begin, end, step), desc="Converting wav to jpgs..."):
librosa.display.specshow(log_mel_spec[:, i : i + step])
plt.axis("off")
plt.savefig(
f"{CACHE_DIR}/{os.path.basename(audio_path)[:-4]}_{i}.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
except Exception as e:
print(f"Error converting {audio_path} : {e}")
def embed_img(img_path, input_size=224):
transform = transforms.Compose(
[
transforms.Resize([input_size, input_size]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
img = Image.open(img_path).convert("RGB")
return transform(img).unsqueeze(0)
def inference(wav_path, folder_path=CACHE_DIR):
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
if not wav_path:
return None, "Please input an audio!"
wav_to_mel(wav_path)
outputs = []
all_files = os.listdir(folder_path)
for file_name in all_files:
if file_name.lower().endswith(".jpg"):
file_path = os.path.join(folder_path, file_name)
input = embed_img(file_path)
output: torch.Tensor = MODEL(input)
pred_id = torch.max(output.data, 1)[1]
outputs.append(pred_id)
max_count_item = most_common_element(outputs)
shutil.rmtree(folder_path)
return os.path.basename(wav_path), TRANS[CLASSES[max_count_item]]
if __name__ == "__main__":
warnings.filterwarnings("ignore")
example_wavs = []
for cls in CLASSES:
example_wavs.append(f"{MODEL_DIR}/examples/{cls}.wav")
with gr.Blocks() as demo:
gr.Interface(
fn=inference,
inputs=gr.Audio(type="filepath", label="Upload a piano recording"),
outputs=[
gr.Textbox(label="Audio filename", show_copy_button=True),
gr.Textbox(
label="Piano classification result",
show_copy_button=True,
),
],
examples=example_wavs,
cache_examples=False,
allow_flagging="never",
title="It is recommended to keep the duration of recording around 3s, too long will affect the recognition efficiency.",
)
gr.Markdown(
"""
# Cite
```bibtex
@inproceedings{zhou2023holistic,
title = {A Holistic Evaluation of Piano Sound Quality},
author = {Monan Zhou and Shangda Wu and Shaohua Ji and Zijin Li and Wei Li},
booktitle = {National Conference on Sound and Music Technology},
pages = {3--17},
year = {2023},
organization = {Springer}
}
```"""
)
demo.launch()
|