Datasets:

Size:
n<1K
ArXiv:
License:
admin commited on
Commit
b09dc83
1 Parent(s): f22c953

upl script

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. Guzheng_Tech99.py +297 -0
  3. README.md +167 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ rename.sh
Guzheng_Tech99.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import random
4
+ import datasets
5
+ import numpy as np
6
+ from glob import glob
7
+
8
+ _NAMES = {
9
+ "chanyin": 0,
10
+ "dianyin": 6,
11
+ "shanghua": 2,
12
+ "xiahua": 3,
13
+ "huazhi": 4,
14
+ "guazou": 4,
15
+ "lianmo": 4,
16
+ "liantuo": 4,
17
+ "yaozhi": 5,
18
+ "boxian": 1,
19
+ }
20
+
21
+ _NAME = [
22
+ "chanyin",
23
+ "boxian",
24
+ "shanghua",
25
+ "xiahua",
26
+ "huazhi/guazou/lianmo/liantuo",
27
+ "yaozhi",
28
+ "dianyin",
29
+ ]
30
+
31
+ _DBNAME = os.path.basename(__file__).split(".")[0]
32
+
33
+ _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}"
34
+
35
+ _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data"
36
+
37
+ _URLS = {
38
+ "audio": f"{_DOMAIN}/audio.zip",
39
+ "mel": f"{_DOMAIN}/mel.zip",
40
+ "label": f"{_DOMAIN}/label.zip",
41
+ }
42
+
43
+ _TIME_LENGTH = 3 # seconds
44
+ _SAMPLE_RATE = 44100
45
+ _HOP_LENGTH = 512 # SAMPLE_RATE * ZHEN_LENGTH // 1000
46
+
47
+
48
+ class Guzheng_Tech99(datasets.GeneratorBasedBuilder):
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ features=(
52
+ datasets.Features(
53
+ {
54
+ "audio": datasets.Audio(sampling_rate=44100),
55
+ "mel": datasets.Image(),
56
+ "label": datasets.Sequence(
57
+ feature={
58
+ "onset_time": datasets.Value("float32"),
59
+ "offset_time": datasets.Value("float32"),
60
+ "IPT": datasets.ClassLabel(num_classes=7, names=_NAME),
61
+ "note": datasets.Value("int8"),
62
+ }
63
+ ),
64
+ }
65
+ )
66
+ if self.config.name == "default"
67
+ else datasets.Features(
68
+ {
69
+ "data": datasets.features.Array3D(
70
+ dtype="float32", shape=(88, 258, 1)
71
+ ),
72
+ "label": datasets.features.Array2D(
73
+ dtype="float32", shape=(7, 258)
74
+ ),
75
+ }
76
+ )
77
+ ),
78
+ homepage=_HOMEPAGE,
79
+ license="CC-BY-NC-ND",
80
+ version="1.2.0",
81
+ )
82
+
83
+ def _RoW_norm(self, data):
84
+ common_sum = 0
85
+ square_sum = 0
86
+ tfle = 0
87
+ for i in range(len(data)):
88
+ tfle += (data[i].sum(-1).sum(0) != 0).astype("int").sum()
89
+ common_sum += data[i].sum(-1).sum(-1)
90
+ square_sum += (data[i] ** 2).sum(-1).sum(-1)
91
+
92
+ common_avg = common_sum / tfle
93
+ square_avg = square_sum / tfle
94
+ std = np.sqrt(square_avg - common_avg**2)
95
+ return common_avg, std
96
+
97
+ def _norm(self, avg, std, data, size):
98
+ avg = np.tile(avg.reshape((1, -1, 1, 1)), (size[0], 1, size[2], size[3]))
99
+ std = np.tile(std.reshape((1, -1, 1, 1)), (size[0], 1, size[2], size[3]))
100
+ data = (data - avg) / std
101
+ return data
102
+
103
+ def _load(self, wav_dir, csv_dir, groups, avg=None, std=None):
104
+ # Return all [(audio address, corresponding to csv file address), ( , ), ...] list
105
+ if std is None:
106
+ std = np.array([None])
107
+
108
+ if avg is None:
109
+ avg = np.array([None])
110
+
111
+ def files(wav_dir, csv_dir, group):
112
+ flacs = sorted(glob(os.path.join(wav_dir, group, "*.flac")))
113
+ if len(flacs) == 0:
114
+ flacs = sorted(glob(os.path.join(wav_dir, group, "*.wav")))
115
+
116
+ csvs = sorted(glob(os.path.join(csv_dir, group, "*.csv")))
117
+ files = list(zip(flacs, csvs))
118
+ if len(files) == 0:
119
+ raise RuntimeError(f"Group {group} is empty")
120
+
121
+ result = []
122
+ for audio_path, csv_path in files:
123
+ result.append((audio_path, csv_path))
124
+
125
+ return result
126
+
127
+ # Returns the CQT of the input audio
128
+ def logCQT(file):
129
+ import librosa
130
+
131
+ sr = _SAMPLE_RATE
132
+ y, sr = librosa.load(file, sr=sr)
133
+ # 帧长为32ms (1000ms/(16000/512) = 32ms), D2的频率是73.418
134
+ cqt = librosa.cqt(
135
+ y,
136
+ sr=sr,
137
+ hop_length=_HOP_LENGTH,
138
+ fmin=27.5,
139
+ n_bins=88,
140
+ bins_per_octave=12,
141
+ )
142
+ return (
143
+ (1.0 / 80.0) * librosa.core.amplitude_to_db(np.abs(cqt), ref=np.max)
144
+ ) + 1.0
145
+
146
+ def chunk_data(f):
147
+ s = int(_SAMPLE_RATE * _TIME_LENGTH / _HOP_LENGTH)
148
+ xdata = np.transpose(f)
149
+ x = []
150
+ length = int(np.ceil((int(len(xdata) / s) + 1) * s))
151
+ app = np.zeros((length - xdata.shape[0], xdata.shape[1]))
152
+ xdata = np.concatenate((xdata, app), 0)
153
+ for i in range(int(length / s)):
154
+ data = xdata[int(i * s) : int(i * s + s)]
155
+ x.append(np.transpose(data[:s, :]))
156
+
157
+ return np.array(x)
158
+
159
+ def load_all(audio_path, csv_path):
160
+ # Load audio features: The shape of cqt (88, 8520), 8520 is the number of frames on the time axis
161
+ cqt = logCQT(audio_path)
162
+ # Load the ground truth label
163
+ hop = _HOP_LENGTH
164
+ n_steps = cqt.shape[1]
165
+ n_IPTs = 7
166
+ technique = _NAMES
167
+ IPT_label = np.zeros([n_IPTs, n_steps], dtype=int)
168
+ with open(csv_path, "r") as f: # csv file for each audio
169
+ reader = csv.DictReader(f, delimiter=",")
170
+ for label in reader: # each note
171
+ onset = float(label["onset_time"])
172
+ offset = float(label["offset_time"])
173
+ IPT = int(technique[label["IPT"]])
174
+ left = int(round(onset * _SAMPLE_RATE / hop))
175
+ frame_right = int(round(offset * _SAMPLE_RATE / hop))
176
+ frame_right = min(n_steps, frame_right)
177
+ IPT_label[IPT, left:frame_right] = 1
178
+
179
+ return dict(
180
+ audiuo_path=audio_path, csv_path=csv_path, cqt=cqt, IPT_label=IPT_label
181
+ )
182
+
183
+ data = []
184
+ # print(f"Loading {len(groups)} group{'s' if len(groups) > 1 else ''} ")
185
+ for group in groups:
186
+ for input_files in files(wav_dir, csv_dir, group):
187
+ data.append(load_all(*input_files))
188
+
189
+ i = 0
190
+ for dic in data:
191
+ x = dic["cqt"]
192
+ x = chunk_data(x)
193
+ y_i = dic["IPT_label"]
194
+ y_i = chunk_data(y_i)
195
+ if i == 0:
196
+ Xtr = x
197
+ Ytr_i = y_i
198
+ i += 1
199
+
200
+ else:
201
+ Xtr = np.concatenate([Xtr, x], axis=0)
202
+ Ytr_i = np.concatenate([Ytr_i, y_i], axis=0)
203
+
204
+ # Transform the shape of the input
205
+ Xtr = np.expand_dims(Xtr, axis=3)
206
+ # Calculate the mean and variance of the input
207
+ if avg.all() == None and std.all() == None:
208
+ avg, std = self._RoW_norm(Xtr)
209
+ # Normalize
210
+ Xtr = self._norm(avg, std, Xtr, Xtr.shape)
211
+ return list(Xtr), list(Ytr_i)
212
+
213
+ def _parse_csv_label(self, csv_file):
214
+ label = []
215
+ with open(csv_file, mode="r", encoding="utf-8") as file:
216
+ for row in csv.DictReader(file):
217
+ label.append(
218
+ {
219
+ "onset_time": float(row["onset_time"]),
220
+ "offset_time": float(row["offset_time"]),
221
+ "IPT": _NAME[_NAMES[row["IPT"]]],
222
+ "note": int(row["note"]),
223
+ }
224
+ )
225
+
226
+ return label
227
+
228
+ def _split_generators(self, dl_manager):
229
+ audio_files = dl_manager.download_and_extract(_URLS["audio"])
230
+ csv_files = dl_manager.download_and_extract(_URLS["label"])
231
+ trainset, validset, testset = [], [], []
232
+ if self.config.name == "default":
233
+ files = {}
234
+ mel_files = dl_manager.download_and_extract(_URLS["mel"])
235
+ for path in dl_manager.iter_files([audio_files]):
236
+ fname: str = os.path.basename(path)
237
+ if fname.endswith(".flac"):
238
+ item_id = fname.split(".")[0]
239
+ files[item_id] = {"audio": path}
240
+
241
+ for path in dl_manager.iter_files([mel_files]):
242
+ fname = os.path.basename(path)
243
+ if fname.endswith(".jpg"):
244
+ item_id = fname.split(".")[0]
245
+ files[item_id]["mel"] = path
246
+
247
+ for path in dl_manager.iter_files([csv_files]):
248
+ fname = os.path.basename(path)
249
+ if fname.endswith(".csv"):
250
+ item_id = fname.split(".")[0]
251
+ files[item_id]["label"] = self._parse_csv_label(path)
252
+
253
+ for item in files.values():
254
+ if "train" in item["audio"]:
255
+ trainset.append(item)
256
+
257
+ elif "validation" in item["audio"]:
258
+ validset.append(item)
259
+
260
+ elif "test" in item["audio"]:
261
+ testset.append(item)
262
+
263
+ else:
264
+ audio_dir = audio_files + "\\audio"
265
+ csv_dir = csv_files + "\\label"
266
+ X_train, Y_train = self._load(audio_dir, csv_dir, ["train"])
267
+ X_valid, Y_valid = self._load(audio_dir, csv_dir, ["validation"])
268
+ X_test, Y_test = self._load(audio_dir, csv_dir, ["test"])
269
+
270
+ for i in range(len(X_train)):
271
+ trainset.append({"data": X_train[i], "label": Y_train[i]})
272
+
273
+ for i in range(len(X_valid)):
274
+ validset.append({"data": X_valid[i], "label": Y_valid[i]})
275
+
276
+ for i in range(len(X_test)):
277
+ testset.append({"data": X_test[i], "label": Y_test[i]})
278
+
279
+ random.shuffle(trainset)
280
+ random.shuffle(validset)
281
+ random.shuffle(testset)
282
+
283
+ return [
284
+ datasets.SplitGenerator(
285
+ name=datasets.Split.TRAIN, gen_kwargs={"files": trainset}
286
+ ),
287
+ datasets.SplitGenerator(
288
+ name=datasets.Split.VALIDATION, gen_kwargs={"files": validset}
289
+ ),
290
+ datasets.SplitGenerator(
291
+ name=datasets.Split.TEST, gen_kwargs={"files": testset}
292
+ ),
293
+ ]
294
+
295
+ def _generate_examples(self, files):
296
+ for i, path in enumerate(files):
297
+ yield i, path
README.md CHANGED
@@ -1,3 +1,169 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: cc-by-nc-nd-4.0
3
+ task_categories:
4
+ - audio-classification
5
+ language:
6
+ - zh
7
+ - en
8
+ tags:
9
+ - music
10
+ - art
11
+ pretty_name: Guzheng Technique 99 Dataset
12
+ size_categories:
13
+ - n<1K
14
+ viewer: false
15
  ---
16
+
17
+ # Dataset Card for Guzheng Technique 99 Dataset
18
+ The raw dataset, sourced from [Guzheng_Tech99](https://ccmusic-database.github.io/en/database/csmtd.html#Tech99), encompasses 99 solo compositions for the guzheng, recorded by professional musicians in a studio environment, with a cumulative duration of 9,064.6 seconds. Each composition has been annotated for every note, indicating the onset, offset, pitch, and playing techniques, which include chanyin, boxian, shanghua, xiahua, huazhi\guazou\lianmo\liantuo, yaozhi, and dianyin. This meticulous annotation has resulted in a total of 63,352 annotated labels across the dataset.
19
+
20
+ Based on the above raw data, we performed data processing and constructed the `default subset` of the current integrated version of the dataset, and the details of its data structure can be viewed through the [viewer](https://www.modelscope.cn/datasets/ccmusic-database/Guzheng_Tech99/dataPeview).
21
+
22
+ In light of the fact that the current dataset has been referenced and evaluated in a published article, we transcribe here the details of the dataset processing during the evaluation in the said article: each audio clip is a 3-second segment sampled at 44,100Hz, which is then converted into a log Constant-Q Transform (CQT) spectrogram. A CQT accompanied by a label constitutes a single data entry, forming the first and second columns, respectively. The CQT is a 3-dimensional array with dimensions of 88×258×1, representing the frequency-time structure of the audio. The label, on the other hand, is a 2-dimensional array with dimensions of 7×258, indicating the presence of seven distinct techniques across each time frame. Ultimately, given that the raw dataset has already been divided into train, valid, and test sets, we have integrated the feature extraction method mentioned in this article's evaluation process into the API, thereby constructing the `eval subset`.
23
+
24
+ ## Viewer
25
+ <https://www.modelscope.cn/datasets/ccmusic-database/Guzheng_Tech99/dataPeview>
26
+
27
+ ## Dataset Structure
28
+ ### Default Subset
29
+ <style>
30
+ .datastructure td {
31
+ vertical-align: middle !important;
32
+ text-align: center;
33
+ }
34
+ .datastructure th {
35
+ text-align: center;
36
+ }
37
+ </style>
38
+ <table class="datastructure">
39
+ <tr>
40
+ <th>audio</th>
41
+ <th>mel</th>
42
+ <th>label</th>
43
+ </tr>
44
+ <tr>
45
+ <td>.flac, 44100Hz</td>
46
+ <td>.jpg, 44100Hz</td>
47
+ <td>{onset_time : float64, offset_time : float, IPT : 7-class, note : int8}</td>
48
+ </tr>
49
+ <tr>
50
+ <td>...</td>
51
+ <td>...</td>
52
+ <td>...</td>
53
+ </tr>
54
+ </table>
55
+
56
+ ### Eval Subset
57
+ | data(logCQT spectrogram) | label |
58
+ | :----------------------: | :--------------: |
59
+ | float64, 88 x 258 x 1 | float64, 7 x 258 |
60
+ | ... | ... |
61
+
62
+ ### Data Instances
63
+ .zip(.flac, .csv)
64
+
65
+ ### Data Fields
66
+ The dataset comprises 99 Guzheng solo compositions, recorded by professionals in a studio, totaling 9064.6 seconds. It includes seven playing techniques labeled for each note (onset, offset, pitch, vibrato, point note, upward portamento, downward portamento, plucks, glissando, and tremolo), resulting in 63,352 annotated labels. The dataset is divided into 79, 10, and 10 songs for the training, validation, and test sets, respectively.
67
+
68
+ ### Data Splits
69
+ train, validation, test
70
+
71
+ ## Dataset Description
72
+ - **Homepage:** <https://ccmusic-database.github.io>
73
+ - **Repository:** <https://huggingface.co/datasets/ccmusic-database/Guzheng_Tech99>
74
+ - **Paper:** <https://doi.org/10.5281/zenodo.5676893>
75
+ - **Leaderboard:** <https://www.modelscope.cn/datasets/ccmusic-database/Guzheng_Tech99>
76
+ - **Point of Contact:** <https://github.com/LiDCC/GuzhengTech99/tree/windows>
77
+
78
+ ### Dataset Summary
79
+ The integrated version provides the original content and the spectrogram generated in the experimental part of the paper cited above. For the second part, the pre-process in the paper is replicated. Each audio clip is a 3-second segment sampled at 44,100Hz, which is subsequently converted into a log Constant-Q Transform (CQT) spectrogram. A CQT accompanied by a label constitutes a single data entry, forming the first and second columns, respectively. The CQT is a 3-dimensional array with the dimension of 88 × 258 × 1, representing the frequency-time structure of the audio. The label, on the other hand, is a 2-dimensional array with dimensions of 7 × 258, which indicates the presence of seven distinct techniques across each time frame. indicating the existence of the seven techniques in each time frame. In the end, given that the raw dataset has already been split into train, valid, and test sets, the integrated dataset maintains the same split method. This dataset can be used for frame-level guzheng playing technique detection.
80
+
81
+ ### Supported Tasks and Leaderboards
82
+ MIR, audio classification
83
+
84
+ ### Languages
85
+ Chinese, English
86
+
87
+ ## Usage
88
+ ### Default Subset
89
+ ```python
90
+ from datasets import load_dataset
91
+
92
+ dataset = load_dataset("ccmusic-database/Guzheng_Tech99", name="default", split="train")
93
+ for item in ds:
94
+ print(item)
95
+ ```
96
+
97
+ ### Eval Subset
98
+ ```python
99
+ from datasets import load_dataset
100
+
101
+ dataset = load_dataset("ccmusic-database/Guzheng_Tech99", name="eval")
102
+ for item in ds["train"]:
103
+ print(item)
104
+
105
+ for item in ds["validation"]:
106
+ print(item)
107
+
108
+ for item in ds["test"]:
109
+ print(item)
110
+ ```
111
+
112
+ ## Maintenance
113
+ ```bash
114
+ GIT_LFS_SKIP_SMUDGE=1 git clone [email protected]:datasets/ccmusic-database/Guzheng_Tech99
115
+ cd Guzheng_Tech99
116
+ ```
117
+ ## Dataset Creation
118
+ ### Curation Rationale
119
+ Instrument playing technique (IPT) is a key element of musical presentation.
120
+
121
+ ### Source Data
122
+ #### Initial Data Collection and Normalization
123
+ Dichucheng Li, Monan Zhou
124
+
125
+ #### Who are the source language producers?
126
+ Students from FD-LAMT
127
+
128
+ ### Annotations
129
+ #### Annotation process
130
+ Guzheng is a polyphonic instrument. In Guzheng performance, notes with different IPTs are usually overlapped and mixed IPTs that can be decomposed into multiple independent IPTs are usually used. Most existing work on IPT detection typically uses datasets with monophonic instrumental solo pieces. This dataset fills a gap in the research field.
131
+
132
+ #### Who are the annotators?
133
+ Students from FD-LAMT
134
+
135
+ ### Personal and Sensitive Information
136
+ None
137
+
138
+ ## Considerations for Using the Data
139
+ ### Social Impact of Dataset
140
+ Promoting the development of the music AI industry
141
+
142
+ ### Discussion of Biases
143
+ Only for Traditional Chinese Instruments
144
+
145
+ ### Other Known Limitations
146
+ Insufficient sample
147
+
148
+ ## Additional Information
149
+ ### Dataset Curators
150
+ Dichucheng Li
151
+
152
+ ### Evaluation
153
+ [Dichucheng Li, Mingjin Che, Wenwu Meng, Yulun Wu, Yi Yu, Fan Xia and Wei Li. "Frame-Level Multi-Label Playing Technique Detection Using Multi-Scale Network and Self-Attention Mechanism", in IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2023).](https://arxiv.org/pdf/2303.13272.pdf)
154
+
155
+ ### Citation Information
156
+ ```bibtex
157
+ @dataset{zhaorui_liu_2021_5676893,
158
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
159
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
160
+ month = {mar},
161
+ year = {2024},
162
+ publisher = {HuggingFace},
163
+ version = {1.2},
164
+ url = {https://huggingface.co/ccmusic-database}
165
+ }
166
+ ```
167
+
168
+ ### Contributions
169
+ Promoting the development of the music AI industry