Spaces:
Running
on
Zero
Running
on
Zero
Upload TMIDIX.py
Browse files
TMIDIX.py
CHANGED
@@ -5,9 +5,8 @@ r'''############################################################################
|
|
5 |
#
|
6 |
#
|
7 |
# Tegridy MIDI X Module (TMIDI X / tee-midi eks)
|
8 |
-
# Version 1.0
|
9 |
#
|
10 |
-
# NOTE: TMIDI X Module starts after the partial MIDI.py module @ line
|
11 |
#
|
12 |
# Based upon MIDI.py module v.6.7. by Peter Billam / pjb.com.au
|
13 |
#
|
@@ -26,7 +25,7 @@ r'''############################################################################
|
|
26 |
# you may not use this file except in compliance with the License.
|
27 |
# You may obtain a copy of the License at
|
28 |
#
|
29 |
-
#
|
30 |
#
|
31 |
# Unless required by applicable law or agreed to in writing, software
|
32 |
# distributed under the License is distributed on an "AS IS" BASIS,
|
@@ -47,7 +46,20 @@ r'''############################################################################
|
|
47 |
# Copyright 2020 Peter Billam
|
48 |
#
|
49 |
###################################################################################
|
50 |
-
###################################################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
import sys, struct, copy
|
53 |
|
@@ -1440,7 +1452,6 @@ def _encode(events_lol, unknown_callback=None, never_add_eot=False,
|
|
1440 |
###################################################################################
|
1441 |
#
|
1442 |
# Tegridy MIDI X Module (TMIDI X / tee-midi eks)
|
1443 |
-
# Version 1.0
|
1444 |
#
|
1445 |
# Based upon and includes the amazing MIDI.py module v.6.7. by Peter Billam
|
1446 |
# pjb.com.au
|
@@ -1477,6 +1488,7 @@ from itertools import groupby
|
|
1477 |
|
1478 |
from collections import Counter
|
1479 |
from collections import defaultdict
|
|
|
1480 |
|
1481 |
from operator import itemgetter
|
1482 |
|
@@ -1497,6 +1509,13 @@ from pathlib import Path
|
|
1497 |
|
1498 |
import shutil
|
1499 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1500 |
###################################################################################
|
1501 |
#
|
1502 |
# Original TMIDI Tegridy helper functions
|
@@ -3705,19 +3724,52 @@ def validate_pitches(chord, channel_to_check = 0, return_sorted = True):
|
|
3705 |
chord.sort(key = lambda x: x[4], reverse=True)
|
3706 |
return chord
|
3707 |
|
3708 |
-
def adjust_score_velocities(score,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3709 |
|
3710 |
-
|
3711 |
-
|
3712 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3713 |
|
3714 |
-
max_channel_velocity = max([c[5] for c in score])
|
3715 |
-
if max_channel_velocity < min_velocity:
|
3716 |
-
factor = max_velocity / min_velocity
|
3717 |
else:
|
3718 |
-
|
3719 |
-
|
3720 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3721 |
|
3722 |
def chordify_score(score,
|
3723 |
return_choridfied_score=True,
|
@@ -3849,7 +3901,10 @@ def chordify_score(score,
|
|
3849 |
else:
|
3850 |
return None
|
3851 |
|
3852 |
-
def fix_monophonic_score_durations(monophonic_score
|
|
|
|
|
|
|
3853 |
|
3854 |
fixed_score = []
|
3855 |
|
@@ -3861,15 +3916,17 @@ def fix_monophonic_score_durations(monophonic_score):
|
|
3861 |
nmt = monophonic_score[i+1][1]
|
3862 |
|
3863 |
if note[1]+note[2] >= nmt:
|
3864 |
-
note_dur = nmt-note[1]-
|
3865 |
else:
|
3866 |
note_dur = note[2]
|
3867 |
|
3868 |
new_note = [note[0], note[1], note_dur] + note[3:]
|
3869 |
-
|
3870 |
-
|
3871 |
-
|
3872 |
-
|
|
|
|
|
3873 |
|
3874 |
elif type(monophonic_score[0][0]) == int:
|
3875 |
|
@@ -3879,15 +3936,17 @@ def fix_monophonic_score_durations(monophonic_score):
|
|
3879 |
nmt = monophonic_score[i+1][0]
|
3880 |
|
3881 |
if note[0]+note[1] >= nmt:
|
3882 |
-
note_dur = nmt-note[0]-
|
3883 |
else:
|
3884 |
note_dur = note[1]
|
3885 |
-
|
3886 |
new_note = [note[0], note_dur] + note[2:]
|
3887 |
-
|
3888 |
-
|
3889 |
-
|
3890 |
-
|
|
|
|
|
3891 |
|
3892 |
return fixed_score
|
3893 |
|
@@ -5003,54 +5062,101 @@ def patch_list_from_enhanced_score_notes(enhanced_score_notes,
|
|
5003 |
|
5004 |
###################################################################################
|
5005 |
|
5006 |
-
def patch_enhanced_score_notes(
|
5007 |
-
|
5008 |
-
|
5009 |
-
|
5010 |
-
|
|
|
|
|
5011 |
|
5012 |
-
#===========================================================================
|
|
|
|
|
|
|
|
|
5013 |
|
5014 |
enhanced_score_notes_with_patch_changes = []
|
5015 |
|
5016 |
patches = [-1] * 16
|
5017 |
|
|
|
|
|
|
|
5018 |
overflow_idx = -1
|
5019 |
|
5020 |
for idx, e in enumerate(enhanced_score_notes):
|
5021 |
-
|
5022 |
-
|
5023 |
-
|
5024 |
-
|
5025 |
-
|
5026 |
-
|
5027 |
-
|
5028 |
-
e[3] =
|
5029 |
-
|
5030 |
-
|
5031 |
-
|
5032 |
-
|
5033 |
-
|
5034 |
-
|
5035 |
-
|
5036 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5037 |
|
5038 |
#===========================================================================
|
5039 |
|
5040 |
overflow_patches = []
|
|
|
|
|
|
|
|
|
|
|
5041 |
|
5042 |
if overflow_idx != -1:
|
5043 |
-
|
5044 |
-
|
5045 |
-
|
5046 |
-
|
5047 |
-
|
5048 |
-
|
5049 |
-
|
5050 |
-
|
5051 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5052 |
|
5053 |
-
|
5054 |
|
5055 |
#===========================================================================
|
5056 |
|
@@ -5060,9 +5166,13 @@ def patch_enhanced_score_notes(enhanced_score_notes,
|
|
5060 |
|
5061 |
#===========================================================================
|
5062 |
|
|
|
|
|
|
|
|
|
5063 |
if verbose:
|
5064 |
print('=' * 70)
|
5065 |
-
print('
|
5066 |
print('=' * 70)
|
5067 |
for c, p in enumerate(patches):
|
5068 |
print('Cha', str(c).zfill(2), '---', str(p).zfill(3), Number2patch[p])
|
@@ -5075,6 +5185,8 @@ def patch_enhanced_score_notes(enhanced_score_notes,
|
|
5075 |
print(str(p).zfill(3), Number2patch[p])
|
5076 |
print('=' * 70)
|
5077 |
|
|
|
|
|
5078 |
return enhanced_score_notes_with_patch_changes, patches, overflow_patches
|
5079 |
|
5080 |
###################################################################################
|
@@ -11170,13 +11282,17 @@ def escore_notes_core(escore_notes, core_len=128):
|
|
11170 |
|
11171 |
###################################################################################
|
11172 |
|
11173 |
-
def multiprocessing_wrapper(function, data_list):
|
11174 |
|
11175 |
with multiprocessing.Pool() as pool:
|
11176 |
|
11177 |
results = []
|
11178 |
|
11179 |
-
for result in tqdm.tqdm(pool.imap_unordered(function, data_list),
|
|
|
|
|
|
|
|
|
11180 |
results.append(result)
|
11181 |
|
11182 |
return results
|
@@ -11325,28 +11441,83 @@ def system_memory_utilization(return_dict=False):
|
|
11325 |
|
11326 |
###################################################################################
|
11327 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11328 |
def create_files_list(datasets_paths=['./'],
|
11329 |
files_exts=['.mid', '.midi', '.kar', '.MID', '.MIDI', '.KAR'],
|
|
|
|
|
|
|
11330 |
randomize_files_list=True,
|
|
|
|
|
|
|
11331 |
verbose=True
|
11332 |
):
|
|
|
11333 |
if verbose:
|
11334 |
print('=' * 70)
|
11335 |
print('Searching for files...')
|
11336 |
print('This may take a while on a large dataset in particular...')
|
11337 |
print('=' * 70)
|
11338 |
|
11339 |
-
filez_set = defaultdict(None)
|
11340 |
-
|
11341 |
files_exts = tuple(files_exts)
|
11342 |
|
11343 |
-
|
11344 |
-
|
11345 |
-
|
11346 |
-
|
11347 |
-
|
11348 |
-
|
11349 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11350 |
|
11351 |
if verbose:
|
11352 |
print('Done!')
|
@@ -11366,6 +11537,7 @@ def create_files_list(datasets_paths=['./'],
|
|
11366 |
|
11367 |
if verbose:
|
11368 |
print('Found', len(filez), 'files.')
|
|
|
11369 |
print('=' * 70)
|
11370 |
|
11371 |
else:
|
@@ -11373,8 +11545,20 @@ def create_files_list(datasets_paths=['./'],
|
|
11373 |
print('Could not find any files...')
|
11374 |
print('Please check dataset dirs and files extensions...')
|
11375 |
print('=' * 70)
|
|
|
|
|
|
|
|
|
11376 |
|
11377 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11378 |
|
11379 |
###################################################################################
|
11380 |
|
@@ -12196,6 +12380,12 @@ def escore_notes_pitches_chords_signature(escore_notes,
|
|
12196 |
use_full_chords=False
|
12197 |
):
|
12198 |
|
|
|
|
|
|
|
|
|
|
|
|
|
12199 |
max_patch = max(0, min(128, max_patch))
|
12200 |
|
12201 |
escore_notes = [e for e in escore_notes if e[6] <= max_patch]
|
@@ -12207,7 +12397,7 @@ def escore_notes_pitches_chords_signature(escore_notes,
|
|
12207 |
sig = []
|
12208 |
dsig = []
|
12209 |
|
12210 |
-
drums_offset =
|
12211 |
|
12212 |
bad_chords_counter = 0
|
12213 |
|
@@ -12224,10 +12414,10 @@ def escore_notes_pitches_chords_signature(escore_notes,
|
|
12224 |
tones_chord = sorted(set([p % 12 for p in pitches]))
|
12225 |
|
12226 |
try:
|
12227 |
-
sig_token =
|
12228 |
except:
|
12229 |
checked_tones_chord = check_and_fix_tones_chord(tones_chord, use_full_chords=use_full_chords)
|
12230 |
-
sig_token =
|
12231 |
bad_chords_counter += 1
|
12232 |
|
12233 |
elif len(pitches) == 1:
|
@@ -12438,10 +12628,10 @@ def merge_chords(chord1, chord2, sort_drums_last=False):
|
|
12438 |
e[1] = mchord[0][1]
|
12439 |
|
12440 |
if sort_drums_last:
|
12441 |
-
mchord.sort(key=lambda x: (x[4], x[6]) if x[6] != 128 else (x[6], -x[4]))
|
12442 |
|
12443 |
else:
|
12444 |
-
mchord.sort(key=lambda x: (x[4], x[6]))
|
12445 |
|
12446 |
return mchord
|
12447 |
|
@@ -12753,6 +12943,672 @@ def multi_instrumental_escore_notes_tokenized(escore_notes, compress_seq=False):
|
|
12753 |
|
12754 |
return melody_chords
|
12755 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12756 |
###################################################################################
|
12757 |
# This is the end of the TMIDI X Python module
|
12758 |
###################################################################################
|
|
|
5 |
#
|
6 |
#
|
7 |
# Tegridy MIDI X Module (TMIDI X / tee-midi eks)
|
|
|
8 |
#
|
9 |
+
# NOTE: TMIDI X Module starts after the partial MIDI.py module @ line 1450
|
10 |
#
|
11 |
# Based upon MIDI.py module v.6.7. by Peter Billam / pjb.com.au
|
12 |
#
|
|
|
25 |
# you may not use this file except in compliance with the License.
|
26 |
# You may obtain a copy of the License at
|
27 |
#
|
28 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
29 |
#
|
30 |
# Unless required by applicable law or agreed to in writing, software
|
31 |
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
46 |
# Copyright 2020 Peter Billam
|
47 |
#
|
48 |
###################################################################################
|
49 |
+
###################################################################################
|
50 |
+
'''
|
51 |
+
|
52 |
+
###################################################################################
|
53 |
+
|
54 |
+
__version__ = "25.7.8"
|
55 |
+
|
56 |
+
print('=' * 70)
|
57 |
+
print('TMIDIX Python module')
|
58 |
+
print('Version:', __version__)
|
59 |
+
print('=' * 70)
|
60 |
+
print('Loading module...')
|
61 |
+
|
62 |
+
###################################################################################
|
63 |
|
64 |
import sys, struct, copy
|
65 |
|
|
|
1452 |
###################################################################################
|
1453 |
#
|
1454 |
# Tegridy MIDI X Module (TMIDI X / tee-midi eks)
|
|
|
1455 |
#
|
1456 |
# Based upon and includes the amazing MIDI.py module v.6.7. by Peter Billam
|
1457 |
# pjb.com.au
|
|
|
1488 |
|
1489 |
from collections import Counter
|
1490 |
from collections import defaultdict
|
1491 |
+
from collections import OrderedDict
|
1492 |
|
1493 |
from operator import itemgetter
|
1494 |
|
|
|
1509 |
|
1510 |
import shutil
|
1511 |
|
1512 |
+
import hashlib
|
1513 |
+
|
1514 |
+
from array import array
|
1515 |
+
|
1516 |
+
from pathlib import Path
|
1517 |
+
from fnmatch import fnmatch
|
1518 |
+
|
1519 |
###################################################################################
|
1520 |
#
|
1521 |
# Original TMIDI Tegridy helper functions
|
|
|
3724 |
chord.sort(key = lambda x: x[4], reverse=True)
|
3725 |
return chord
|
3726 |
|
3727 |
+
def adjust_score_velocities(score,
|
3728 |
+
max_velocity,
|
3729 |
+
adj_per_channel=False,
|
3730 |
+
adj_in_place=True
|
3731 |
+
):
|
3732 |
+
|
3733 |
+
if adj_in_place:
|
3734 |
+
buf = score
|
3735 |
+
|
3736 |
+
else:
|
3737 |
+
buf = copy.deepcopy(score)
|
3738 |
+
|
3739 |
+
notes = [evt for evt in buf if evt[0] == 'note']
|
3740 |
+
|
3741 |
+
if not notes:
|
3742 |
+
return buf
|
3743 |
+
|
3744 |
+
if adj_per_channel:
|
3745 |
+
channel_max = {}
|
3746 |
+
|
3747 |
+
for _, _, _, ch, _, vel, _ in notes:
|
3748 |
+
channel_max[ch] = max(channel_max.get(ch, 0), vel)
|
3749 |
|
3750 |
+
channel_factor = {
|
3751 |
+
ch: (max_velocity / vmax if vmax > 0 else 1.0)
|
3752 |
+
for ch, vmax in channel_max.items()
|
3753 |
+
}
|
3754 |
+
|
3755 |
+
for evt in buf:
|
3756 |
+
if evt[0] == 'note':
|
3757 |
+
ch = evt[3]
|
3758 |
+
factor = channel_factor.get(ch, 1.0)
|
3759 |
+
new_vel = int(evt[5] * factor)
|
3760 |
+
evt[5] = max(1, min(127, new_vel))
|
3761 |
|
|
|
|
|
|
|
3762 |
else:
|
3763 |
+
global_max = max(vel for _, _, _, _, _, vel, _ in notes)
|
3764 |
+
factor = max_velocity / global_max if global_max > 0 else 1.0
|
3765 |
+
|
3766 |
+
for evt in buf:
|
3767 |
+
if evt[0] == 'note':
|
3768 |
+
new_vel = int(evt[5] * factor)
|
3769 |
+
evt[5] = max(1, min(127, new_vel))
|
3770 |
+
|
3771 |
+
if not adj_in_place:
|
3772 |
+
return buf
|
3773 |
|
3774 |
def chordify_score(score,
|
3775 |
return_choridfied_score=True,
|
|
|
3901 |
else:
|
3902 |
return None
|
3903 |
|
3904 |
+
def fix_monophonic_score_durations(monophonic_score,
|
3905 |
+
min_notes_gap=1,
|
3906 |
+
min_notes_dur=1
|
3907 |
+
):
|
3908 |
|
3909 |
fixed_score = []
|
3910 |
|
|
|
3916 |
nmt = monophonic_score[i+1][1]
|
3917 |
|
3918 |
if note[1]+note[2] >= nmt:
|
3919 |
+
note_dur = max(1, nmt-note[1]-min_notes_gap)
|
3920 |
else:
|
3921 |
note_dur = note[2]
|
3922 |
|
3923 |
new_note = [note[0], note[1], note_dur] + note[3:]
|
3924 |
+
|
3925 |
+
if new_note[2] >= min_notes_dur:
|
3926 |
+
fixed_score.append(new_note)
|
3927 |
+
|
3928 |
+
if monophonic_score[-1][2] >= min_notes_dur:
|
3929 |
+
fixed_score.append(monophonic_score[-1])
|
3930 |
|
3931 |
elif type(monophonic_score[0][0]) == int:
|
3932 |
|
|
|
3936 |
nmt = monophonic_score[i+1][0]
|
3937 |
|
3938 |
if note[0]+note[1] >= nmt:
|
3939 |
+
note_dur = max(1, nmt-note[0]-min_notes_gap)
|
3940 |
else:
|
3941 |
note_dur = note[1]
|
3942 |
+
|
3943 |
new_note = [note[0], note_dur] + note[2:]
|
3944 |
+
|
3945 |
+
if new_note[1] >= min_notes_dur:
|
3946 |
+
fixed_score.append(new_note)
|
3947 |
+
|
3948 |
+
if monophonic_score[-1][1] >= min_notes_dur:
|
3949 |
+
fixed_score.append(monophonic_score[-1])
|
3950 |
|
3951 |
return fixed_score
|
3952 |
|
|
|
5062 |
|
5063 |
###################################################################################
|
5064 |
|
5065 |
+
def patch_enhanced_score_notes(escore_notes,
|
5066 |
+
default_patch=0,
|
5067 |
+
reserved_patch=-1,
|
5068 |
+
reserved_patch_channel=-1,
|
5069 |
+
drums_patch=9,
|
5070 |
+
verbose=False
|
5071 |
+
):
|
5072 |
|
5073 |
+
#===========================================================================
|
5074 |
+
|
5075 |
+
enhanced_score_notes = copy.deepcopy(escore_notes)
|
5076 |
+
|
5077 |
+
#===========================================================================
|
5078 |
|
5079 |
enhanced_score_notes_with_patch_changes = []
|
5080 |
|
5081 |
patches = [-1] * 16
|
5082 |
|
5083 |
+
if -1 < reserved_patch < 128 and -1 < reserved_patch_channel < 128:
|
5084 |
+
patches[reserved_patch_channel] = reserved_patch
|
5085 |
+
|
5086 |
overflow_idx = -1
|
5087 |
|
5088 |
for idx, e in enumerate(enhanced_score_notes):
|
5089 |
+
if e[0] == 'note':
|
5090 |
+
if e[3] != 9:
|
5091 |
+
if -1 < reserved_patch < 128 and -1 < reserved_patch_channel < 128:
|
5092 |
+
if e[6] == reserved_patch:
|
5093 |
+
e[3] = reserved_patch_channel
|
5094 |
+
|
5095 |
+
if patches[e[3]] == -1:
|
5096 |
+
patches[e[3]] = e[6]
|
5097 |
+
|
5098 |
+
else:
|
5099 |
+
if patches[e[3]] != e[6]:
|
5100 |
+
if e[6] in patches:
|
5101 |
+
e[3] = patches.index(e[6])
|
5102 |
+
|
5103 |
+
else:
|
5104 |
+
if -1 in patches:
|
5105 |
+
patches[patches.index(-1)] = e[6]
|
5106 |
+
|
5107 |
+
else:
|
5108 |
+
overflow_idx = idx
|
5109 |
+
break
|
5110 |
+
|
5111 |
+
enhanced_score_notes_with_patch_changes.append(e)
|
5112 |
|
5113 |
#===========================================================================
|
5114 |
|
5115 |
overflow_patches = []
|
5116 |
+
overflow_channels = [-1] * 16
|
5117 |
+
overflow_channels[9] = drums_patch
|
5118 |
+
|
5119 |
+
if -1 < reserved_patch < 128 and -1 < reserved_patch_channel < 128:
|
5120 |
+
overflow_channels[reserved_patch_channel] = reserved_patch
|
5121 |
|
5122 |
if overflow_idx != -1:
|
5123 |
+
for idx, e in enumerate(enhanced_score_notes[overflow_idx:]):
|
5124 |
+
if e[0] == 'note':
|
5125 |
+
if e[3] != 9:
|
5126 |
+
if e[6] not in overflow_channels:
|
5127 |
+
|
5128 |
+
if -1 in overflow_channels:
|
5129 |
+
free_chan = overflow_channels.index(-1)
|
5130 |
+
overflow_channels[free_chan] = e[6]
|
5131 |
+
e[3] = free_chan
|
5132 |
+
|
5133 |
+
enhanced_score_notes_with_patch_changes.append(['patch_change', e[1], e[3], e[6]])
|
5134 |
+
|
5135 |
+
overflow_patches.append(e[6])
|
5136 |
+
|
5137 |
+
else:
|
5138 |
+
overflow_channels = [-1] * 16
|
5139 |
+
overflow_channels[9] = drums_patch
|
5140 |
+
|
5141 |
+
if -1 < reserved_patch < 128 and -1 < reserved_patch_channel < 128:
|
5142 |
+
overflow_channels[reserved_patch_channel] = reserved_patch
|
5143 |
+
e[3] = reserved_patch_channel
|
5144 |
+
|
5145 |
+
if e[6] != reserved_patch:
|
5146 |
+
|
5147 |
+
free_chan = overflow_channels.index(-1)
|
5148 |
+
e[3] = free_chan
|
5149 |
+
|
5150 |
+
overflow_channels[e[3]] = e[6]
|
5151 |
+
|
5152 |
+
enhanced_score_notes_with_patch_changes.append(['patch_change', e[1], e[3], e[6]])
|
5153 |
+
|
5154 |
+
overflow_patches.append(e[6])
|
5155 |
+
|
5156 |
+
else:
|
5157 |
+
e[3] = overflow_channels.index(e[6])
|
5158 |
|
5159 |
+
enhanced_score_notes_with_patch_changes.append(e)
|
5160 |
|
5161 |
#===========================================================================
|
5162 |
|
|
|
5166 |
|
5167 |
#===========================================================================
|
5168 |
|
5169 |
+
overflow_patches = ordered_set(overflow_patches)
|
5170 |
+
|
5171 |
+
#===========================================================================
|
5172 |
+
|
5173 |
if verbose:
|
5174 |
print('=' * 70)
|
5175 |
+
print('Main composition patches')
|
5176 |
print('=' * 70)
|
5177 |
for c, p in enumerate(patches):
|
5178 |
print('Cha', str(c).zfill(2), '---', str(p).zfill(3), Number2patch[p])
|
|
|
5185 |
print(str(p).zfill(3), Number2patch[p])
|
5186 |
print('=' * 70)
|
5187 |
|
5188 |
+
#===========================================================================
|
5189 |
+
|
5190 |
return enhanced_score_notes_with_patch_changes, patches, overflow_patches
|
5191 |
|
5192 |
###################################################################################
|
|
|
11282 |
|
11283 |
###################################################################################
|
11284 |
|
11285 |
+
def multiprocessing_wrapper(function, data_list, verbose=True):
|
11286 |
|
11287 |
with multiprocessing.Pool() as pool:
|
11288 |
|
11289 |
results = []
|
11290 |
|
11291 |
+
for result in tqdm.tqdm(pool.imap_unordered(function, data_list),
|
11292 |
+
total=len(data_list),
|
11293 |
+
disable=not verbose
|
11294 |
+
):
|
11295 |
+
|
11296 |
results.append(result)
|
11297 |
|
11298 |
return results
|
|
|
11441 |
|
11442 |
###################################################################################
|
11443 |
|
11444 |
+
def system_cpus_utilization(return_dict=False):
|
11445 |
+
|
11446 |
+
if return_dict:
|
11447 |
+
return {'num_cpus': psutil.cpu_count(),
|
11448 |
+
'cpus_util': psutil.cpu_percent()
|
11449 |
+
}
|
11450 |
+
|
11451 |
+
else:
|
11452 |
+
print('Number of CPUs:', psutil.cpu_count())
|
11453 |
+
print('CPUs utilization:', psutil.cpu_percent())
|
11454 |
+
|
11455 |
+
###################################################################################
|
11456 |
+
|
11457 |
def create_files_list(datasets_paths=['./'],
|
11458 |
files_exts=['.mid', '.midi', '.kar', '.MID', '.MIDI', '.KAR'],
|
11459 |
+
max_num_files_per_dir=-1,
|
11460 |
+
randomize_dir_files=False,
|
11461 |
+
max_total_files=-1,
|
11462 |
randomize_files_list=True,
|
11463 |
+
check_for_dupes=False,
|
11464 |
+
use_md5_hashes=False,
|
11465 |
+
return_dupes=False,
|
11466 |
verbose=True
|
11467 |
):
|
11468 |
+
|
11469 |
if verbose:
|
11470 |
print('=' * 70)
|
11471 |
print('Searching for files...')
|
11472 |
print('This may take a while on a large dataset in particular...')
|
11473 |
print('=' * 70)
|
11474 |
|
|
|
|
|
11475 |
files_exts = tuple(files_exts)
|
11476 |
|
11477 |
+
filez_set = defaultdict(None)
|
11478 |
+
dupes_list = []
|
11479 |
+
|
11480 |
+
for dataset_addr in datasets_paths:
|
11481 |
+
|
11482 |
+
print('=' * 70)
|
11483 |
+
print('Processing', dataset_addr)
|
11484 |
+
print('=' * 70)
|
11485 |
+
|
11486 |
+
for dirpath, dirnames, filenames in tqdm.tqdm(os.walk(dataset_addr), disable=not verbose):
|
11487 |
+
|
11488 |
+
if randomize_dir_files:
|
11489 |
+
random.shuffle(filenames)
|
11490 |
+
|
11491 |
+
if max_num_files_per_dir > 0:
|
11492 |
+
max_num_files = max_num_files_per_dir
|
11493 |
+
|
11494 |
+
else:
|
11495 |
+
max_num_files = len(filenames)
|
11496 |
+
|
11497 |
+
for file in filenames[:max_num_files]:
|
11498 |
+
if file.endswith(files_exts):
|
11499 |
+
if check_for_dupes:
|
11500 |
+
|
11501 |
+
if use_md5_hashes:
|
11502 |
+
md5_hash = hashlib.md5(open(os.path.join(dirpath, file), 'rb').read()).hexdigest()
|
11503 |
+
|
11504 |
+
if md5_hash not in filez_set:
|
11505 |
+
filez_set[md5_hash] = os.path.join(dirpath, file)
|
11506 |
+
|
11507 |
+
else:
|
11508 |
+
dupes_list.append(os.path.join(dirpath, file))
|
11509 |
+
|
11510 |
+
else:
|
11511 |
+
if file not in filez_set:
|
11512 |
+
filez_set[file] = os.path.join(dirpath, file)
|
11513 |
+
|
11514 |
+
else:
|
11515 |
+
dupes_list.append(os.path.join(dirpath, file))
|
11516 |
+
else:
|
11517 |
+
fpath = os.path.join(dirpath, file)
|
11518 |
+
filez_set[fpath] = fpath
|
11519 |
+
|
11520 |
+
filez = list(filez_set.values())
|
11521 |
|
11522 |
if verbose:
|
11523 |
print('Done!')
|
|
|
11537 |
|
11538 |
if verbose:
|
11539 |
print('Found', len(filez), 'files.')
|
11540 |
+
print('Skipped', len(dupes_list), 'duplicate files.')
|
11541 |
print('=' * 70)
|
11542 |
|
11543 |
else:
|
|
|
11545 |
print('Could not find any files...')
|
11546 |
print('Please check dataset dirs and files extensions...')
|
11547 |
print('=' * 70)
|
11548 |
+
|
11549 |
+
if max_total_files > 0:
|
11550 |
+
if return_dupes:
|
11551 |
+
return filez[:max_total_files], dupes_list
|
11552 |
|
11553 |
+
else:
|
11554 |
+
return filez[:max_total_files]
|
11555 |
+
|
11556 |
+
else:
|
11557 |
+
if return_dupes:
|
11558 |
+
return filez, dupes_list
|
11559 |
+
|
11560 |
+
else:
|
11561 |
+
return filez
|
11562 |
|
11563 |
###################################################################################
|
11564 |
|
|
|
12380 |
use_full_chords=False
|
12381 |
):
|
12382 |
|
12383 |
+
if use_full_chords:
|
12384 |
+
CHORDS = ALL_CHORDS_FULL
|
12385 |
+
|
12386 |
+
else:
|
12387 |
+
CHORDS = ALL_CHORDS_SORTED
|
12388 |
+
|
12389 |
max_patch = max(0, min(128, max_patch))
|
12390 |
|
12391 |
escore_notes = [e for e in escore_notes if e[6] <= max_patch]
|
|
|
12397 |
sig = []
|
12398 |
dsig = []
|
12399 |
|
12400 |
+
drums_offset = len(CHORDS) + 128
|
12401 |
|
12402 |
bad_chords_counter = 0
|
12403 |
|
|
|
12414 |
tones_chord = sorted(set([p % 12 for p in pitches]))
|
12415 |
|
12416 |
try:
|
12417 |
+
sig_token = CHORDS.index(tones_chord) + 128
|
12418 |
except:
|
12419 |
checked_tones_chord = check_and_fix_tones_chord(tones_chord, use_full_chords=use_full_chords)
|
12420 |
+
sig_token = CHORDS.index(checked_tones_chord) + 128
|
12421 |
bad_chords_counter += 1
|
12422 |
|
12423 |
elif len(pitches) == 1:
|
|
|
12628 |
e[1] = mchord[0][1]
|
12629 |
|
12630 |
if sort_drums_last:
|
12631 |
+
mchord.sort(key=lambda x: (-x[4], x[6]) if x[6] != 128 else (x[6], -x[4]))
|
12632 |
|
12633 |
else:
|
12634 |
+
mchord.sort(key=lambda x: (-x[4], x[6]))
|
12635 |
|
12636 |
return mchord
|
12637 |
|
|
|
12943 |
|
12944 |
return melody_chords
|
12945 |
|
12946 |
+
###################################################################################
|
12947 |
+
|
12948 |
+
def merge_counts(data, return_lists=True):
|
12949 |
+
|
12950 |
+
merged = defaultdict(int)
|
12951 |
+
|
12952 |
+
for value, count in data:
|
12953 |
+
merged[value] += count
|
12954 |
+
|
12955 |
+
if return_lists:
|
12956 |
+
return [[k, v] for k, v in merged.items()]
|
12957 |
+
|
12958 |
+
else:
|
12959 |
+
return list(merged.items())
|
12960 |
+
|
12961 |
+
###################################################################################
|
12962 |
+
|
12963 |
+
def convert_escore_notes_pitches_chords_signature(signature, convert_to_full_chords=True):
|
12964 |
+
|
12965 |
+
if convert_to_full_chords:
|
12966 |
+
SRC_CHORDS = ALL_CHORDS_SORTED
|
12967 |
+
TRG_CHORDS = ALL_CHORDS_FULL
|
12968 |
+
|
12969 |
+
else:
|
12970 |
+
SRC_CHORDS = ALL_CHORDS_FULL
|
12971 |
+
TRG_CHORDS = ALL_CHORDS_SORTED
|
12972 |
+
|
12973 |
+
cdiff = len(TRG_CHORDS) - len(SRC_CHORDS)
|
12974 |
+
|
12975 |
+
pitches_counts = [c for c in signature if -1 < c[0] < 128]
|
12976 |
+
chords_counts = [c for c in signature if 127 < c[0] < len(SRC_CHORDS)+128]
|
12977 |
+
drums_counts = [[c[0]+cdiff, c[1]] for c in signature if len(SRC_CHORDS)+127 < c[0] < len(SRC_CHORDS)+256]
|
12978 |
+
bad_chords_count = [c for c in signature if c[0] == -1]
|
12979 |
+
|
12980 |
+
new_chords_counts = []
|
12981 |
+
|
12982 |
+
for c in chords_counts:
|
12983 |
+
tones_chord = SRC_CHORDS[c[0]-128]
|
12984 |
+
|
12985 |
+
if tones_chord not in TRG_CHORDS:
|
12986 |
+
tones_chord = check_and_fix_tones_chord(tones_chord, use_full_chords=convert_to_full_chords)
|
12987 |
+
bad_chords_count[0][1] += 1
|
12988 |
+
|
12989 |
+
new_chords_counts.append([TRG_CHORDS.index(tones_chord)+128, c[1]])
|
12990 |
+
|
12991 |
+
return pitches_counts + merge_counts(new_chords_counts) + drums_counts + bad_chords_count
|
12992 |
+
|
12993 |
+
###################################################################################
|
12994 |
+
|
12995 |
+
def convert_bytes_in_nested_list(lst, encoding='utf-8', errors='ignore'):
|
12996 |
+
|
12997 |
+
new_list = []
|
12998 |
+
|
12999 |
+
for item in lst:
|
13000 |
+
if isinstance(item, list):
|
13001 |
+
new_list.append(convert_bytes_in_nested_list(item))
|
13002 |
+
|
13003 |
+
elif isinstance(item, bytes):
|
13004 |
+
new_list.append(item.decode(encoding, errors=errors))
|
13005 |
+
|
13006 |
+
else:
|
13007 |
+
new_list.append(item)
|
13008 |
+
|
13009 |
+
return new_list
|
13010 |
+
|
13011 |
+
###################################################################################
|
13012 |
+
|
13013 |
+
def mult_pitches(pitches, min_oct=4, max_oct=6):
|
13014 |
+
|
13015 |
+
tones_chord = sorted(set([p % 12 for p in pitches]))
|
13016 |
+
|
13017 |
+
mult_ptcs = []
|
13018 |
+
|
13019 |
+
for t in tones_chord:
|
13020 |
+
for i in range(min_oct, max_oct):
|
13021 |
+
mult_ptcs.append((i*12)+t)
|
13022 |
+
|
13023 |
+
return mult_ptcs
|
13024 |
+
|
13025 |
+
###################################################################################
|
13026 |
+
|
13027 |
+
def find_next(pitches, cur_ptc):
|
13028 |
+
|
13029 |
+
i = 0
|
13030 |
+
|
13031 |
+
for i, p in enumerate(pitches):
|
13032 |
+
if p != cur_ptc:
|
13033 |
+
break
|
13034 |
+
|
13035 |
+
return i
|
13036 |
+
|
13037 |
+
###################################################################################
|
13038 |
+
|
13039 |
+
def ordered_groups_unsorted(data, key_index):
|
13040 |
+
|
13041 |
+
def keyfunc(sublist):
|
13042 |
+
return sublist[key_index]
|
13043 |
+
|
13044 |
+
groups = []
|
13045 |
+
|
13046 |
+
for key, group in groupby(data, key=keyfunc):
|
13047 |
+
groups.append((key, list(group)))
|
13048 |
+
|
13049 |
+
return groups
|
13050 |
+
|
13051 |
+
###################################################################################
|
13052 |
+
|
13053 |
+
def ordered_groups(data, ptc_idx, pat_idx):
|
13054 |
+
|
13055 |
+
groups = OrderedDict()
|
13056 |
+
|
13057 |
+
for sublist in data:
|
13058 |
+
key = tuple([sublist[ptc_idx], sublist[pat_idx]])
|
13059 |
+
|
13060 |
+
if key not in groups:
|
13061 |
+
groups[key] = []
|
13062 |
+
|
13063 |
+
groups[key].append(sublist)
|
13064 |
+
|
13065 |
+
return list(groups.items())
|
13066 |
+
|
13067 |
+
###################################################################################
|
13068 |
+
|
13069 |
+
def merge_melody_notes(escore_notes, pitches_idx=4, max_dur=255, last_dur=128):
|
13070 |
+
|
13071 |
+
groups = ordered_groups_unsorted(escore_notes, pitches_idx)
|
13072 |
+
|
13073 |
+
merged_melody_notes = []
|
13074 |
+
|
13075 |
+
for i, (k, g) in enumerate(groups[:-1]):
|
13076 |
+
|
13077 |
+
if len(g) == 1:
|
13078 |
+
merged_melody_notes.extend(g)
|
13079 |
+
|
13080 |
+
else:
|
13081 |
+
dur = min(max_dur, groups[i+1][1][0][1] - g[0][1])
|
13082 |
+
|
13083 |
+
merged_melody_notes.append(['note',
|
13084 |
+
g[0][1],
|
13085 |
+
dur,
|
13086 |
+
g[0][3],
|
13087 |
+
g[0][4],
|
13088 |
+
g[0][5],
|
13089 |
+
g[0][6]
|
13090 |
+
])
|
13091 |
+
|
13092 |
+
merged_melody_notes.append(['note',
|
13093 |
+
groups[-1][1][0][1],
|
13094 |
+
last_dur,
|
13095 |
+
groups[-1][1][0][3],
|
13096 |
+
groups[-1][1][0][4],
|
13097 |
+
groups[-1][1][0][5],
|
13098 |
+
groups[-1][1][0][6]
|
13099 |
+
])
|
13100 |
+
|
13101 |
+
return merged_melody_notes
|
13102 |
+
|
13103 |
+
###################################################################################
|
13104 |
+
|
13105 |
+
def add_expressive_melody_to_enhanced_score_notes(escore_notes,
|
13106 |
+
melody_start_chord=0,
|
13107 |
+
melody_prime_pitch=60,
|
13108 |
+
melody_step=1,
|
13109 |
+
melody_channel=3,
|
13110 |
+
melody_patch=40,
|
13111 |
+
melody_notes_max_duration=255,
|
13112 |
+
melody_last_note_dur=128,
|
13113 |
+
melody_clip_max_min_durs=[],
|
13114 |
+
melody_max_velocity=120,
|
13115 |
+
acc_max_velocity=90,
|
13116 |
+
return_melody=False
|
13117 |
+
):
|
13118 |
+
|
13119 |
+
|
13120 |
+
score = copy.deepcopy(escore_notes)
|
13121 |
+
|
13122 |
+
adjust_score_velocities(score, acc_max_velocity)
|
13123 |
+
|
13124 |
+
cscore = chordify_score([1000, score])
|
13125 |
+
|
13126 |
+
melody_pitches = [melody_prime_pitch]
|
13127 |
+
|
13128 |
+
for i, c in enumerate(cscore[melody_start_chord:]):
|
13129 |
+
|
13130 |
+
if i % melody_step == 0:
|
13131 |
+
|
13132 |
+
pitches = [e[4] for e in c if e[3] != 9]
|
13133 |
+
|
13134 |
+
if pitches:
|
13135 |
+
cptc = find_closest_value(mult_pitches(pitches), melody_pitches[-1])[0]
|
13136 |
+
melody_pitches.append(cptc)
|
13137 |
+
|
13138 |
+
song_f = []
|
13139 |
+
mel_f = []
|
13140 |
+
|
13141 |
+
idx = 1
|
13142 |
+
|
13143 |
+
for i, c in enumerate(cscore[:-melody_step]):
|
13144 |
+
pitches = [e[4] for e in c if e[3] != 9]
|
13145 |
+
|
13146 |
+
if pitches and i >= melody_start_chord and i % melody_step == 0:
|
13147 |
+
dur = min(cscore[i+melody_step][0][1] - c[0][1], melody_notes_max_duration)
|
13148 |
+
|
13149 |
+
mel_f.append(['note',
|
13150 |
+
c[0][1],
|
13151 |
+
dur,
|
13152 |
+
melody_channel,
|
13153 |
+
60+(melody_pitches[idx] % 24),
|
13154 |
+
100 + ((melody_pitches[idx] % 12) * 2),
|
13155 |
+
melody_patch
|
13156 |
+
])
|
13157 |
+
idx += 1
|
13158 |
+
|
13159 |
+
song_f.extend(c)
|
13160 |
+
|
13161 |
+
song_f.extend(flatten(cscore[-melody_step:]))
|
13162 |
+
|
13163 |
+
if len(melody_clip_max_min_durs) == 2:
|
13164 |
+
for e in mel_f:
|
13165 |
+
if e[2] >= melody_clip_max_min_durs[0]:
|
13166 |
+
e[2] = melody_clip_max_min_durs[1]
|
13167 |
+
|
13168 |
+
adjust_score_velocities(mel_f, melody_max_velocity)
|
13169 |
+
|
13170 |
+
merged_melody_notes = merge_melody_notes(mel_f,
|
13171 |
+
max_dur=melody_notes_max_duration,
|
13172 |
+
last_dur=melody_last_note_dur
|
13173 |
+
)
|
13174 |
+
|
13175 |
+
song_f = sorted(merged_melody_notes + song_f,
|
13176 |
+
key=lambda x: x[1]
|
13177 |
+
)
|
13178 |
+
|
13179 |
+
if return_melody:
|
13180 |
+
return mel_f
|
13181 |
+
|
13182 |
+
else:
|
13183 |
+
return song_f
|
13184 |
+
|
13185 |
+
###################################################################################
|
13186 |
+
|
13187 |
+
def list_md5_hash(ints_list):
|
13188 |
+
|
13189 |
+
arr = array('H', ints_list)
|
13190 |
+
binary_data = arr.tobytes()
|
13191 |
+
|
13192 |
+
return hashlib.md5(binary_data).hexdigest()
|
13193 |
+
|
13194 |
+
###################################################################################
|
13195 |
+
|
13196 |
+
def fix_escore_notes_durations(escore_notes,
|
13197 |
+
min_notes_gap=1,
|
13198 |
+
min_notes_dur=1,
|
13199 |
+
times_idx=1,
|
13200 |
+
durs_idx=2,
|
13201 |
+
channels_idx = 3,
|
13202 |
+
pitches_idx=4,
|
13203 |
+
patches_idx=6
|
13204 |
+
):
|
13205 |
+
|
13206 |
+
notes = [e for e in escore_notes if e[channels_idx] != 9]
|
13207 |
+
drums = [e for e in escore_notes if e[channels_idx] == 9]
|
13208 |
+
|
13209 |
+
escore_groups = ordered_groups(notes, pitches_idx, patches_idx)
|
13210 |
+
|
13211 |
+
merged_score = []
|
13212 |
+
|
13213 |
+
for k, g in escore_groups:
|
13214 |
+
if len(g) > 2:
|
13215 |
+
fg = fix_monophonic_score_durations(g,
|
13216 |
+
min_notes_gap=min_notes_gap,
|
13217 |
+
min_notes_dur=min_notes_dur
|
13218 |
+
)
|
13219 |
+
merged_score.extend(fg)
|
13220 |
+
|
13221 |
+
elif len(g) == 2:
|
13222 |
+
|
13223 |
+
if g[0][times_idx]+g[0][durs_idx] >= g[1][times_idx]:
|
13224 |
+
g[0][durs_idx] = max(1, g[1][times_idx] - g[0][times_idx] - min_notes_gap)
|
13225 |
+
|
13226 |
+
merged_score.extend(g)
|
13227 |
+
|
13228 |
+
else:
|
13229 |
+
merged_score.extend(g)
|
13230 |
+
|
13231 |
+
return sorted(merged_score + drums, key=lambda x: x[times_idx])
|
13232 |
+
|
13233 |
+
###################################################################################
|
13234 |
+
|
13235 |
+
def create_nested_chords_tree(chords_list):
|
13236 |
+
|
13237 |
+
tree = {}
|
13238 |
+
|
13239 |
+
for chord in chords_list:
|
13240 |
+
|
13241 |
+
node = tree
|
13242 |
+
|
13243 |
+
for semitone in chord:
|
13244 |
+
if semitone not in node:
|
13245 |
+
node[semitone] = {}
|
13246 |
+
|
13247 |
+
node = node[semitone]
|
13248 |
+
|
13249 |
+
node.setdefault(-1, []).append(chord)
|
13250 |
+
|
13251 |
+
return tree
|
13252 |
+
|
13253 |
+
###################################################################################
|
13254 |
+
|
13255 |
+
def get_chords_with_prefix(nested_chords_tree, prefix):
|
13256 |
+
|
13257 |
+
node = nested_chords_tree
|
13258 |
+
|
13259 |
+
for semitone in prefix:
|
13260 |
+
if semitone in node:
|
13261 |
+
node = node[semitone]
|
13262 |
+
|
13263 |
+
else:
|
13264 |
+
return []
|
13265 |
+
|
13266 |
+
collected_chords = []
|
13267 |
+
|
13268 |
+
def recursive_collect(subnode):
|
13269 |
+
if -1 in subnode:
|
13270 |
+
collected_chords.extend(subnode[-1])
|
13271 |
+
|
13272 |
+
for key, child in subnode.items():
|
13273 |
+
if key != -1:
|
13274 |
+
recursive_collect(child)
|
13275 |
+
|
13276 |
+
recursive_collect(node)
|
13277 |
+
|
13278 |
+
return collected_chords
|
13279 |
+
|
13280 |
+
###################################################################################
|
13281 |
+
|
13282 |
+
def get_chords_by_semitones(chords_list, chord_semitones):
|
13283 |
+
|
13284 |
+
query_set = set(chord_semitones)
|
13285 |
+
results = []
|
13286 |
+
|
13287 |
+
for chord in chords_list:
|
13288 |
+
|
13289 |
+
chord_set = set(chord)
|
13290 |
+
|
13291 |
+
if query_set.issubset(chord_set):
|
13292 |
+
results.append(sorted(set(chord)))
|
13293 |
+
|
13294 |
+
return results
|
13295 |
+
|
13296 |
+
###################################################################################
|
13297 |
+
|
13298 |
+
def remove_duplicate_pitches_from_escore_notes(escore_notes,
|
13299 |
+
pitches_idx=4,
|
13300 |
+
patches_idx=6,
|
13301 |
+
return_dupes_count=False
|
13302 |
+
):
|
13303 |
+
|
13304 |
+
cscore = chordify_score([1000, escore_notes])
|
13305 |
+
|
13306 |
+
new_escore = []
|
13307 |
+
|
13308 |
+
bp_count = 0
|
13309 |
+
|
13310 |
+
for c in cscore:
|
13311 |
+
|
13312 |
+
cho = []
|
13313 |
+
seen = []
|
13314 |
+
|
13315 |
+
for cc in c:
|
13316 |
+
if [cc[pitches_idx], cc[patches_idx]] not in seen:
|
13317 |
+
cho.append(cc)
|
13318 |
+
seen.append([cc[pitches_idx], cc[patches_idx]])
|
13319 |
+
|
13320 |
+
else:
|
13321 |
+
bp_count += 1
|
13322 |
+
|
13323 |
+
new_escore.extend(cho)
|
13324 |
+
|
13325 |
+
if return_dupes_count:
|
13326 |
+
return bp_count
|
13327 |
+
|
13328 |
+
else:
|
13329 |
+
return new_escore
|
13330 |
+
|
13331 |
+
###################################################################################
|
13332 |
+
|
13333 |
+
def chunks_shuffle(lst,
|
13334 |
+
min_len=1,
|
13335 |
+
max_len=3,
|
13336 |
+
seed=None
|
13337 |
+
):
|
13338 |
+
|
13339 |
+
rnd = random.Random(seed)
|
13340 |
+
chunks = []
|
13341 |
+
i, n = 0, len(lst)
|
13342 |
+
|
13343 |
+
while i < n:
|
13344 |
+
size = rnd.randint(min_len, max_len)
|
13345 |
+
size = min(size, n - i)
|
13346 |
+
chunks.append(lst[i : i + size])
|
13347 |
+
i += size
|
13348 |
+
|
13349 |
+
rnd.shuffle(chunks)
|
13350 |
+
|
13351 |
+
flattened = []
|
13352 |
+
for chunk in chunks:
|
13353 |
+
flattened.extend(chunk)
|
13354 |
+
|
13355 |
+
return flattened
|
13356 |
+
|
13357 |
+
###################################################################################
|
13358 |
+
|
13359 |
+
def convert_bytes_in_nested_list(lst,
|
13360 |
+
encoding='utf-8',
|
13361 |
+
errors='ignore',
|
13362 |
+
return_changed_events_count=False
|
13363 |
+
):
|
13364 |
+
|
13365 |
+
new_list = []
|
13366 |
+
|
13367 |
+
ce_count = 0
|
13368 |
+
|
13369 |
+
for item in lst:
|
13370 |
+
if isinstance(item, list):
|
13371 |
+
new_list.append(convert_bytes_in_nested_list(item))
|
13372 |
+
|
13373 |
+
elif isinstance(item, bytes):
|
13374 |
+
new_list.append(item.decode(encoding, errors=errors))
|
13375 |
+
ce_count += 1
|
13376 |
+
|
13377 |
+
else:
|
13378 |
+
new_list.append(item)
|
13379 |
+
|
13380 |
+
if return_changed_events_count:
|
13381 |
+
return new_list, ce_count
|
13382 |
+
|
13383 |
+
else:
|
13384 |
+
return new_list
|
13385 |
+
|
13386 |
+
###################################################################################
|
13387 |
+
|
13388 |
+
def find_deepest_midi_dirs(roots,
|
13389 |
+
marker_file="midi_score.mid",
|
13390 |
+
suffixes=None,
|
13391 |
+
randomize=False,
|
13392 |
+
seed=None,
|
13393 |
+
verbose=False
|
13394 |
+
):
|
13395 |
+
|
13396 |
+
try:
|
13397 |
+
iter(roots)
|
13398 |
+
if isinstance(roots, (str, Path)):
|
13399 |
+
root_list = [roots]
|
13400 |
+
else:
|
13401 |
+
root_list = list(roots)
|
13402 |
+
|
13403 |
+
except TypeError:
|
13404 |
+
root_list = [roots]
|
13405 |
+
|
13406 |
+
if isinstance(marker_file, (list, tuple)):
|
13407 |
+
patterns = [p.lower() for p in marker_file if p]
|
13408 |
+
|
13409 |
+
else:
|
13410 |
+
patterns = [marker_file.lower()] if marker_file else []
|
13411 |
+
|
13412 |
+
allowed = {s.lower() for s in (suffixes or ['.mid', '.midi', '.kar'])}
|
13413 |
+
|
13414 |
+
if verbose:
|
13415 |
+
print("Settings:")
|
13416 |
+
print(" Roots:", [str(r) for r in root_list])
|
13417 |
+
print(" Marker patterns:", patterns or "<no marker filter>")
|
13418 |
+
print(" Allowed suffixes:", allowed)
|
13419 |
+
print(f" Randomize={randomize}, Seed={seed}")
|
13420 |
+
|
13421 |
+
results = defaultdict(list)
|
13422 |
+
rng = random.Random(seed)
|
13423 |
+
|
13424 |
+
for root in root_list:
|
13425 |
+
|
13426 |
+
root_path = Path(root)
|
13427 |
+
|
13428 |
+
if not root_path.is_dir():
|
13429 |
+
print(f"Warning: '{root_path}' is not a valid directory, skipping.")
|
13430 |
+
continue
|
13431 |
+
|
13432 |
+
if verbose:
|
13433 |
+
print(f"\nScanning root: {str(root_path)}")
|
13434 |
+
|
13435 |
+
all_dirs = list(root_path.rglob("*"))
|
13436 |
+
dirs_iter = tqdm.tqdm(all_dirs, desc=f"Dirs in {root_path.name}", disable=not verbose)
|
13437 |
+
|
13438 |
+
for dirpath in dirs_iter:
|
13439 |
+
if not dirpath.is_dir():
|
13440 |
+
continue
|
13441 |
+
|
13442 |
+
children = list(dirpath.iterdir())
|
13443 |
+
if any(child.is_dir() for child in children):
|
13444 |
+
if verbose:
|
13445 |
+
print(f"Skipping non-leaf: {str(dirpath)}")
|
13446 |
+
continue
|
13447 |
+
|
13448 |
+
files = [f for f in children if f.is_file()]
|
13449 |
+
names = [f.name.lower() for f in files]
|
13450 |
+
|
13451 |
+
if patterns:
|
13452 |
+
matched = any(fnmatch(name, pat) for name in names for pat in patterns)
|
13453 |
+
if not matched:
|
13454 |
+
if verbose:
|
13455 |
+
print(f"No marker in: {str(dirpath)}")
|
13456 |
+
continue
|
13457 |
+
|
13458 |
+
if verbose:
|
13459 |
+
print(f"Marker found in: {str(dirpath)}")
|
13460 |
+
|
13461 |
+
else:
|
13462 |
+
if verbose:
|
13463 |
+
print(f"Including leaf (no marker): {str(dirpath)}")
|
13464 |
+
|
13465 |
+
for f in files:
|
13466 |
+
if f.suffix.lower() in allowed:
|
13467 |
+
results[str(dirpath)].append(str(f))
|
13468 |
+
|
13469 |
+
if verbose:
|
13470 |
+
print(f" Collected: {f.name}")
|
13471 |
+
|
13472 |
+
all_leaves = list(results.keys())
|
13473 |
+
if randomize:
|
13474 |
+
if verbose:
|
13475 |
+
print("\nShuffling leaf directories")
|
13476 |
+
|
13477 |
+
rng.shuffle(all_leaves)
|
13478 |
+
|
13479 |
+
else:
|
13480 |
+
all_leaves.sort()
|
13481 |
+
|
13482 |
+
final_dict = {}
|
13483 |
+
|
13484 |
+
for leaf in all_leaves:
|
13485 |
+
file_list = results[leaf][:]
|
13486 |
+
if randomize:
|
13487 |
+
if verbose:
|
13488 |
+
print(f"Shuffling files in: {leaf}")
|
13489 |
+
|
13490 |
+
rng.shuffle(file_list)
|
13491 |
+
|
13492 |
+
else:
|
13493 |
+
file_list.sort()
|
13494 |
+
|
13495 |
+
final_dict[leaf] = file_list
|
13496 |
+
|
13497 |
+
if verbose:
|
13498 |
+
print("\nScan complete. Found directories:")
|
13499 |
+
for d, fl in final_dict.items():
|
13500 |
+
print(f" {d} -> {len(fl)} files")
|
13501 |
+
|
13502 |
+
return final_dict
|
13503 |
+
|
13504 |
+
###################################################################################
|
13505 |
+
|
13506 |
+
PERCUSSION_GROUPS = {
|
13507 |
+
|
13508 |
+
1: { # Bass Drums
|
13509 |
+
35: 'Acoustic Bass Drum',
|
13510 |
+
36: 'Bass Drum 1',
|
13511 |
+
},
|
13512 |
+
2: { # Stick
|
13513 |
+
37: 'Side Stick',
|
13514 |
+
},
|
13515 |
+
3: { # Snares
|
13516 |
+
38: 'Acoustic Snare',
|
13517 |
+
40: 'Electric Snare',
|
13518 |
+
},
|
13519 |
+
4: { # Claps
|
13520 |
+
39: 'Hand Clap',
|
13521 |
+
},
|
13522 |
+
5: { # Floor Toms
|
13523 |
+
41: 'Low Floor Tom',
|
13524 |
+
43: 'High Floor Tom',
|
13525 |
+
},
|
13526 |
+
6: { # Hi-Hats
|
13527 |
+
42: 'Closed Hi-Hat',
|
13528 |
+
44: 'Pedal Hi-Hat',
|
13529 |
+
46: 'Open Hi-Hat',
|
13530 |
+
},
|
13531 |
+
7: { # Toms
|
13532 |
+
45: 'Low Tom',
|
13533 |
+
47: 'Low-Mid Tom',
|
13534 |
+
48: 'Hi-Mid Tom',
|
13535 |
+
50: 'High Tom',
|
13536 |
+
},
|
13537 |
+
8: { # Cymbals
|
13538 |
+
49: 'Crash Cymbal 1',
|
13539 |
+
51: 'Ride Cymbal 1',
|
13540 |
+
52: 'Chinese Cymbal',
|
13541 |
+
55: 'Splash Cymbal',
|
13542 |
+
57: 'Crash Cymbal 2',
|
13543 |
+
59: 'Ride Cymbal 2',
|
13544 |
+
},
|
13545 |
+
9: { # Bells
|
13546 |
+
53: 'Ride Bell',
|
13547 |
+
},
|
13548 |
+
10: { # Tambourine
|
13549 |
+
54: 'Tambourine',
|
13550 |
+
},
|
13551 |
+
11: { # Cowbell
|
13552 |
+
56: 'Cowbell',
|
13553 |
+
},
|
13554 |
+
12: { # Vibraslap
|
13555 |
+
58: 'Vibraslap',
|
13556 |
+
},
|
13557 |
+
13: { # Bongos
|
13558 |
+
60: 'Hi Bongo',
|
13559 |
+
61: 'Low Bongo',
|
13560 |
+
},
|
13561 |
+
14: { # Congas
|
13562 |
+
62: 'Mute Hi Conga',
|
13563 |
+
63: 'Open Hi Conga',
|
13564 |
+
64: 'Low Conga',
|
13565 |
+
},
|
13566 |
+
15: { # Timbales
|
13567 |
+
65: 'High Timbale',
|
13568 |
+
66: 'Low Timbale',
|
13569 |
+
},
|
13570 |
+
16: { # Agog么
|
13571 |
+
67: 'High Agogo',
|
13572 |
+
68: 'Low Agogo',
|
13573 |
+
},
|
13574 |
+
17: { # Cabasa
|
13575 |
+
69: 'Cabasa',
|
13576 |
+
},
|
13577 |
+
18: { # Maracas
|
13578 |
+
70: 'Maracas',
|
13579 |
+
},
|
13580 |
+
19: { # Whistles
|
13581 |
+
71: 'Short Whistle',
|
13582 |
+
72: 'Long Whistle',
|
13583 |
+
},
|
13584 |
+
20: { # Guiros
|
13585 |
+
73: 'Short Guiro',
|
13586 |
+
74: 'Long Guiro',
|
13587 |
+
},
|
13588 |
+
21: { # Claves
|
13589 |
+
75: 'Claves',
|
13590 |
+
},
|
13591 |
+
22: { # Wood Blocks
|
13592 |
+
76: 'Hi Wood Block',
|
13593 |
+
77: 'Low Wood Block',
|
13594 |
+
},
|
13595 |
+
23: { # Cuica
|
13596 |
+
78: 'Mute Cuica',
|
13597 |
+
79: 'Open Cuica',
|
13598 |
+
},
|
13599 |
+
24: { # Triangles
|
13600 |
+
80: 'Mute Triangle',
|
13601 |
+
81: 'Open Triangle',
|
13602 |
+
},
|
13603 |
+
}
|
13604 |
+
|
13605 |
+
###################################################################################
|
13606 |
+
|
13607 |
+
print('Module loaded!')
|
13608 |
+
print('=' * 70)
|
13609 |
+
print('Enjoy! :)')
|
13610 |
+
print('=' * 70)
|
13611 |
+
|
13612 |
###################################################################################
|
13613 |
# This is the end of the TMIDI X Python module
|
13614 |
###################################################################################
|