markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Synthesis | synth_id_list = file_id_list[-dur_test_file_number:]
input_lab_file_list = orig_lab_file_list[-dur_test_file_number:]
synth_dir = os.path.join(exp_dir, 'synth')
if not os.path.exists(synth_dir):
os.makedirs(synth_dir)
wav_dir = os.path.join(synth_dir, 'wav')
if not os.path.exists(wav_dir):
os.makedirs(wav_dir)
synth_inter_dir = os.path.join(synth_dir, 'inter')
if not os.path.exists(synth_inter_dir):
os.makedirs(synth_inter_dir)
synth_dur_lab_norm_dir = os.path.join(synth_inter_dir, 'dur_lab_norm')
if not os.path.exists(synth_dur_lab_norm_dir):
os.makedirs(synth_dur_lab_norm_dir)
synth_dur_cmp_pred_dir = os.path.join(synth_inter_dir, 'dur_cmp_pred')
if not os.path.exists(synth_dur_cmp_pred_dir):
os.makedirs(synth_dur_cmp_pred_dir)
synth_dur_lab_file_list = gen_file_list(dur_lab_dir, synth_id_list, 'labbin')
synth_dur_lab_norm_file_list = gen_file_list(synth_dur_lab_norm_dir, synth_id_list, 'labbin')
synth_dur_cmp_pred_file_list = gen_file_list(synth_dur_cmp_pred_dir, synth_id_list, 'cmp')
orig_lab_file_list = get_file_list_of_dir(lab_dir)
file_id_list = get_file_id_list(orig_lab_file_list)
dur_lab_file_list = gen_file_list(dur_lab_dir, file_id_list, 'labbin')
dur_lab_no_silence_file_list = gen_file_list(dur_lab_no_silence_dir, file_id_list, 'labbin')
dur_lab_no_silence_norm_file_list = gen_file_list(dur_lab_no_silence_norm_dir, file_id_list, 'labbin')
dur_dur_file_list = gen_file_list(dur_dur_dir, file_id_list, 'dur')
dur_cmp_file_list = gen_file_list(dur_cmp_dir, file_id_list, 'cmp')
dur_cmp_no_silence_file_list = gen_file_list(dur_cmp_no_silence_dir, file_id_list, 'cmp')
dur_cmp_no_silence_norm_file_list = gen_file_list(dur_cmp_no_silence_norm_dir, file_id_list, 'cmp')
synth_acou_lab_norm_dir = os.path.join(synth_inter_dir, 'acou_lab_norm')
if not os.path.exists(synth_acou_lab_norm_dir):
os.makedirs(synth_acou_lab_norm_dir)
synth_acou_cmp_pred_dir = os.path.join(synth_inter_dir, 'acou_cmp_pred')
if not os.path.exists(synth_acou_cmp_pred_dir):
os.makedirs(synth_acou_cmp_pred_dir)
synth_acou_lab_file_list = gen_file_list(synth_acou_lab_norm_dir, synth_id_list, 'labbin')
synth_acou_lab_norm_file_list = gen_file_list(synth_acou_lab_norm_dir, synth_id_list, 'labbin')
synth_acou_cmp_pred_file_list = gen_file_list(synth_acou_cmp_pred_dir, synth_id_list, 'cmp')
| _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Normalize label files for duration model (silence not removed) | synth_dur_lab_normaliser = MinMaxNormalisation(feature_dimension = dur_lab_dim, min_value = 0.01, max_value = 0.99)
synth_dur_lab_normaliser.load_min_max_values(dur_lab_norm_file)
synth_dur_lab_normaliser.normalise_data(synth_dur_lab_file_list, synth_dur_lab_norm_file_list)
tmp1, num1 = io_funcs.load_binary_file_frame(synth_dur_lab_norm_file_list[0], 368)
tmp2, num2 = io_funcs.load_binary_file_frame(synth_dur_lab_file_list[0], 368)
print(synth_dur_lab_norm_file_list[0])
print('num1: ', str(num1))
print('num2: ', str(num2))
# print(tmp1[0: 10, :])
# print(synth_dur_lab_norm_file_list[0]) | /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/inter/dur_lab_norm/nitech_jp_song070_f001_070.labbin
num1: 55
num2: 55
| Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Predict durations | synth_duration_model = DurationModel(dur_lab_dim, dur_cmp_dim)
synth_duration_model.load_state_dict(torch.load(dur_nn_mdl_file))
synth_duration_model.eval()
lab, num_frame = io_funcs.load_binary_file_frame(synth_dur_lab_norm_file_list[0], 368)
lab = torch.from_numpy(lab)
lab = lab[None, :, :]
dur_cmp_pred = synth_duration_model(lab)
dur_cmp_pred = dur_cmp_pred.detach().numpy()[0]
dur_cmp_pred | _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Denormalization | fid = open(dur_cmp_norm_file, 'rb')
dur_cmp_norm_info = np.fromfile(fid, dtype=np.float32)
fid.close()
dur_cmp_norm_info = dur_cmp_norm_info.reshape(2, -1)
dur_cmp_mean = dur_cmp_norm_info[0, ]
dur_cmp_std = dur_cmp_norm_info[1, ]
print(synth_dur_cmp_pred_file_list[0])
io_funcs.array_to_binary_file(dur_cmp_pred, synth_dur_cmp_pred_file_list[0])
print(dur_cmp_mean)
print(dur_cmp_std)
synth_dur_denormaliser = MeanVarianceNorm(feature_dimension=dur_cmp_dim)
synth_dur_denormaliser.feature_denormalisation(synth_dur_cmp_pred_file_list, synth_dur_cmp_pred_file_list, dur_cmp_mean, dur_cmp_std)
dur_cmp_pred, _ = io_funcs.load_binary_file_frame(synth_dur_cmp_pred_file_list[0], 5)
print(dur_cmp_pred[:10, :]) | [[ 5.551061 15.752278 14.200991 10.206416 5.2901797]
[ 5.5533843 15.87799 14.0966015 10.179635 5.295764 ]
[ 5.550715 15.884184 14.2144 10.24537 5.2878685]
[ 5.5525837 15.890511 14.132585 10.232053 5.256593 ]
[ 5.543806 15.852673 14.130717 10.249583 5.27523 ]
[ 5.546036 15.958624 14.043604 10.331711 5.2914195]
[ 5.55692 16.027588 14.147421 10.241272 5.3083267]
[ 5.5535684 15.856915 14.245941 10.315426 5.292195 ]
[ 5.5677176 15.877155 14.226478 10.230915 5.242901 ]
[ 5.556892 15.876377 14.209519 10.219336 5.262225 ]]
| Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Change original label files with newly predicted durations | from frontend.parameter_generation import ParameterGeneration
from frontend.label_modifier import HTSLabelModification
synth_dur_extention_dict = {'dur': '.dur'}
synth_dur_out_dimension_dict = {'dur': 5}
synth_dur_cmp_dim = 5
synth_dur_list = [os.path.splitext(synth_dur_cmp_pred_file_list[0])[0] + synth_dur_extention_dict['dur']]
synth_lab_list = [os.path.splitext(synth_dur_cmp_pred_file_list[0])[0] + '.lab']
print(synth_dur_list)
print(synth_lab_list)
synth_decomposer = ParameterGeneration(['mgc', 'bap', 'lf0'])
synth_decomposer.duration_decomposition(synth_dur_cmp_pred_file_list, synth_dur_cmp_dim, synth_dur_out_dimension_dict, synth_dur_extention_dict)
synth_label_modifier = HTSLabelModification(silence_pattern = silence_pattern)
synth_label_modifier.modify_duration_labels(input_lab_file_list, synth_dur_list, synth_lab_list) | ['/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/inter/dur_cmp_pred/nitech_jp_song070_f001_070.dur']
['/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/inter/dur_cmp_pred/nitech_jp_song070_f001_070.lab']
| Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Normalize label files for acoustic model (silence not removed) | synth_acou_lab_normaliser = HTSLabelNormalisation(question, add_frame_features=True, subphone_feats='full')
synth_acou_lab_normaliser.perform_normalisation(synth_lab_list, synth_acou_lab_file_list)
synth_acou_lab_normaliser = MinMaxNormalisation(feature_dimension = acou_lab_dim, min_value = 0.01, max_value = 0.99)
synth_acou_lab_normaliser.load_min_max_values(acou_lab_norm_file)
synth_acou_lab_normaliser.normalise_data(synth_acou_lab_file_list, synth_acou_lab_norm_file_list) | _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Predict acoustic features | synth_acoustic_model = DurationModel(acou_lab_dim, acou_cmp_dim)
synth_acoustic_model.load_state_dict(torch.load(acou_nn_mdl_file))
synth_acoustic_model.eval()
lab, num_frame = io_funcs.load_binary_file_frame(synth_acou_lab_norm_file_list[0], 377)
lab = torch.from_numpy(lab)
lab = lab[None, :, :]
acou_cmp_pred = synth_acoustic_model(lab)
acou_cmp_pred = acou_cmp_pred.detach().numpy()[0]
acou_cmp_pred.shape | _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Denormalization | fid = open(acou_cmp_norm_file, 'rb')
acou_cmp_norm_info = np.fromfile(fid, dtype=np.float32)
fid.close()
acou_cmp_norm_info = acou_cmp_norm_info.reshape(2, -1)
acou_cmp_mean = acou_cmp_norm_info[0, ]
acou_cmp_std = acou_cmp_norm_info[1, ]
print(synth_acou_cmp_pred_file_list[0])
io_funcs.array_to_binary_file(acou_cmp_pred, synth_acou_cmp_pred_file_list[0])
print(acou_cmp_mean)
print(acou_cmp_std)
synth_acou_denormaliser = MeanVarianceNorm(feature_dimension=acou_cmp_dim)
synth_acou_denormaliser.feature_denormalisation(synth_acou_cmp_pred_file_list, synth_acou_cmp_pred_file_list, acou_cmp_mean, acou_cmp_std)
dur_cmp_pred, _ = io_funcs.load_binary_file_frame(synth_acou_cmp_pred_file_list[0], 187)
print(dur_cmp_pred[:10, :10])
synth_acou_extention_dict = {'lf0': '.lf0', 'mgc': '.mgc', 'bap': '.bap'}
synth_acou_out_dimension_dict = {'lf0': 3, 'mgc': 180, 'bap': 3, 'vuv': 1}
synth_acou_cmp_dim = 187
# synth_dur_list = [os.path.splitext(synth_dur_cmp_pred_file_list[0])[0] + synth_dur_extention_dict['dur']]
# synth_lab_list = [os.path.splitext(synth_dur_cmp_pred_file_list[0])[0] + '.lab']
# print(synth_dur_list)
# print(synth_lab_list)
synth_decomposer = ParameterGeneration(['mgc', 'bap', 'lf0'])
synth_decomposer.acoustic_decomposition(synth_acou_cmp_pred_file_list, synth_acou_cmp_dim, synth_acou_out_dimension_dict, synth_acou_extention_dict, acou_variance_file_dict,True)
## copy features to wav
for file in synth_acou_cmp_pred_file_list:
base = os.path.splitext(file)[0]
for ext in (['.mgc', '.bap', '.lf0']):
feat_file = base + ext
copy(feat_file, wav_dir) | _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Synthesize wav | def run_process(args,log=True):
logger = logging.getLogger("subprocess")
# a convenience function instead of calling subprocess directly
# this is so that we can do some logging and catch exceptions
# we don't always want debug logging, even when logging level is DEBUG
# especially if calling a lot of external functions
# so we can disable it by force, where necessary
if log:
logger.debug('%s' % args)
try:
# the following is only available in later versions of Python
# rval = subprocess.check_output(args)
# bufsize=-1 enables buffering and may improve performance compared to the unbuffered case
p = subprocess.Popen(args, bufsize=-1, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True, env=os.environ)
# better to use communicate() than read() and write() - this avoids deadlocks
(stdoutdata, stderrdata) = p.communicate()
if p.returncode != 0:
# for critical things, we always log, even if log==False
logger.critical('exit status %d' % p.returncode )
logger.critical(' for command: %s' % args )
logger.critical(' stderr: %s' % stderrdata )
logger.critical(' stdout: %s' % stdoutdata )
raise OSError
return (stdoutdata, stderrdata)
except subprocess.CalledProcessError as e:
# not sure under what circumstances this exception would be raised in Python 2.6
logger.critical('exit status %d' % e.returncode )
logger.critical(' for command: %s' % args )
# not sure if there is an 'output' attribute under 2.6 ? still need to test this...
logger.critical(' output: %s' % e.output )
raise
except ValueError:
logger.critical('ValueError for %s' % args )
raise
except OSError:
logger.critical('OSError for %s' % args )
raise
except KeyboardInterrupt:
logger.critical('KeyboardInterrupt during %s' % args )
try:
# try to kill the subprocess, if it exists
p.kill()
except UnboundLocalError:
# this means that p was undefined at the moment of the keyboard interrupt
# (and we do nothing)
pass
raise KeyboardInterrupt
import pickle
with open('/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/test/cfg.pkl', 'rb') as f:
cfg = pickle.load(f)
def wavgen_straight_type_vocoder(gen_dir, file_id_list, logger):
'''
Waveform generation with STRAIGHT or WORLD vocoders.
(whose acoustic parameters are: mgc, bap, and lf0)
'''
pf_coef = 1.4
fw_coef = 0.58
co_coef = 511
fl_coef = 1024
mgc_dim = 60
fw_alpha = 0.58
sr = 16000
fl = 1024
counter=1
max_counter = len(file_id_list)
for filename in file_id_list:
logger.info('creating waveform for %4d of %4d: %s' % (counter,max_counter,filename) )
counter=counter+1
base = filename
files = {'sp' : base + '.sp',
'mgc' : base + '.mgc',
'f0' : base + '.f0',
'lf0' : base + '.lf0',
'ap' : base + '.ap',
'bap' : base + '.bap',
'wav' : base + '.wav'}
mgc_file_name = files['mgc']
bap_file_name = files['bap']
cur_dir = os.getcwd()
os.chdir(gen_dir)
mgc_file_name = files['mgc']+'_p_mgc'
post_filter(files['mgc'], mgc_file_name, mgc_dim, pf_coef, fw_coef, co_coef, fl_coef, gen_dir, SPTK)
run_process('{sopr} -magic -1.0E+10 -EXP -MAGIC 0.0 {lf0} | {x2x} +fd > {f0}'.format(sopr=SPTK['SOPR'], lf0=files['lf0'], x2x=SPTK['X2X'], f0=files['f0']))
run_process('{sopr} -c 0 {bap} | {x2x} +fd > {ap}'.format(sopr=SPTK['SOPR'],bap=files['bap'],x2x=SPTK['X2X'],ap=files['ap']))
run_process('{mgc2sp} -a {alpha} -g 0 -m {order} -l {fl} -o 2 {mgc} | {sopr} -d 32768.0 -P | {x2x} +fd > {sp}'
.format(mgc2sp=SPTK['MGC2SP'], alpha=fw_alpha, order=mgc_dim-1, fl=fl, mgc=mgc_file_name, sopr=SPTK['SOPR'], x2x=SPTK['X2X'], sp=files['sp']))
run_process('{synworld} {fl} {sr} {f0} {sp} {ap} {wav}'
.format(synworld=WORLD['SYNTHESIS'], fl=fl, sr=sr, f0=files['f0'], sp=files['sp'], ap=files['ap'], wav=files['wav']))
# run_process('rm -f {ap} {sp} {f0}'.format(ap=files['ap'],sp=files['sp'],f0=files['f0']))
os.chdir(cur_dir)
def post_filter(mgc_file_in, mgc_file_out, mgc_dim, pf_coef, fw_coef, co_coef, fl_coef, gen_dir, SPTK):
line = "echo 1 1 "
for i in range(2, mgc_dim):
line = line + str(pf_coef) + " "
run_process('{line} | {x2x} +af > {weight}'
.format(line=line, x2x=SPTK['X2X'], weight=os.path.join(gen_dir, 'weight')))
run_process('{freqt} -m {order} -a {fw} -M {co} -A 0 < {mgc} | {c2acr} -m {co} -M 0 -l {fl} > {base_r0}'
.format(freqt=SPTK['FREQT'], order=mgc_dim-1, fw=fw_coef, co=co_coef, mgc=mgc_file_in, c2acr=SPTK['C2ACR'], fl=fl_coef, base_r0=mgc_file_in+'_r0'))
run_process('{vopr} -m -n {order} < {mgc} {weight} | {freqt} -m {order} -a {fw} -M {co} -A 0 | {c2acr} -m {co} -M 0 -l {fl} > {base_p_r0}'
.format(vopr=SPTK['VOPR'], order=mgc_dim-1, mgc=mgc_file_in, weight=os.path.join(gen_dir, 'weight'),
freqt=SPTK['FREQT'], fw=fw_coef, co=co_coef,
c2acr=SPTK['C2ACR'], fl=fl_coef, base_p_r0=mgc_file_in+'_p_r0'))
run_process('{vopr} -m -n {order} < {mgc} {weight} | {mc2b} -m {order} -a {fw} | {bcp} -n {order} -s 0 -e 0 > {base_b0}'
.format(vopr=SPTK['VOPR'], order=mgc_dim-1, mgc=mgc_file_in, weight=os.path.join(gen_dir, 'weight'),
mc2b=SPTK['MC2B'], fw=fw_coef,
bcp=SPTK['BCP'], base_b0=mgc_file_in+'_b0'))
run_process('{vopr} -d < {base_r0} {base_p_r0} | {sopr} -LN -d 2 | {vopr} -a {base_b0} > {base_p_b0}'
.format(vopr=SPTK['VOPR'], base_r0=mgc_file_in+'_r0', base_p_r0=mgc_file_in+'_p_r0',
sopr=SPTK['SOPR'],
base_b0=mgc_file_in+'_b0', base_p_b0=mgc_file_in+'_p_b0'))
run_process('{vopr} -m -n {order} < {mgc} {weight} | {mc2b} -m {order} -a {fw} | {bcp} -n {order} -s 1 -e {order} | {merge} -n {order2} -s 0 -N 0 {base_p_b0} | {b2mc} -m {order} -a {fw} > {base_p_mgc}'
.format(vopr=SPTK['VOPR'], order=mgc_dim-1, mgc=mgc_file_in, weight=os.path.join(gen_dir, 'weight'),
mc2b=SPTK['MC2B'], fw=fw_coef,
bcp=SPTK['BCP'],
merge=SPTK['MERGE'], order2=mgc_dim-2, base_p_b0=mgc_file_in+'_p_b0',
b2mc=SPTK['B2MC'], base_p_mgc=mgc_file_out))
return
## copy features to wav
for file in synth_acou_cmp_pred_file_list:
base = os.path.splitext(file)[0]
for ext in (['.mgc', '.bap', '.lf0']):
feat_file = base + ext
copy(feat_file, wav_dir)
logger = logging.getLogger("wav_generation")
wavgen_straight_type_vocoder(wav_dir, synth_id_list, logger)
print(wav_dir)
print(synth_id_list) | /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/wav
['nitech_jp_song070_f001_070']
| Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
TEST | my_cmp_norm_info_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/acoustic_model/inter/cmp_norm_187.dat'
ml_cmp_norm_info_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s1/experiments/acoustic_model/inter_module/norm_info__mgc_lf0_vuv_bap_187_MVN.dat'
fid = open(my_cmp_norm_info_file, 'rb')
my_cmp_norm_info = np.fromfile(fid, dtype=np.float32)
fid.close()
my_cmp_norm_info = my_cmp_norm_info.reshape(2, -1)
my_cmp_mean = my_cmp_norm_info[0, ]
my_cmp_std = my_cmp_norm_info[1, ]
fid = open(ml_cmp_norm_info_file, 'rb')
ml_cmp_norm_info = np.fromfile(fid, dtype=np.float32)
fid.close()
ml_cmp_norm_info = ml_cmp_norm_info.reshape(2, -1)
ml_cmp_mean = ml_cmp_norm_info[0, ]
ml_cmp_std = ml_cmp_norm_info[1, ]
print(my_cmp_mean.all() == ml_cmp_mean.all())
print(my_cmp_std.all() == ml_cmp_std.all())
my_cmp_no_silence_norm_file = acou_cmp_no_silence_norm_file_list[0]
ml_cmp_no_silence_norm_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s1/experiments/acoustic_model/inter_module/nn_norm_mgc_lf0_vuv_bap_187/nitech_jp_song070_f001_003.cmp'
my_cmp_no_silence_norm, my_n_frame = io_funcs.load_binary_file_frame(my_cmp_no_silence_norm_file, 187)
ml_cmp_no_silence_norm, ml_n_frame = io_funcs.load_binary_file_frame(ml_cmp_no_silence_norm_file, 187)
print(my_n_frame == ml_n_frame)
print(my_cmp_no_silence_norm.all() == ml_cmp_no_silence_norm.all())
my_lab_no_silence_norm_file = acou_lab_no_silence_norm_file_list[0]
ml_lab_no_silence_norm_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s1/experiments/acoustic_model/inter_module/nn_no_silence_lab_norm_377/nitech_jp_song070_f001_003.lab'
my_lab_no_silence_norm, my_n_frame = io_funcs.load_binary_file_frame(my_lab_no_silence_norm_file, 377)
ml_lab_no_silence_norm, ml_n_frame = io_funcs.load_binary_file_frame(ml_lab_no_silence_norm_file, 377)
print(my_n_frame == ml_n_frame)
print(my_lab_no_silence_norm.all() == ml_lab_no_silence_norm.all())
test_cmp_no_silence_norm_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/acoustic_model/inter/cmp_no_silence_norm_187/nitech_jp_song070_f001_003.cmp'
test_cmp_file_list = [test_cmp_no_silence_norm_file]
test_cmp_no_silence_pred_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/test/nitech_jp_song070_f001_003.cmp'
test_denorm_file_list = [test_cmp_no_silence_pred_file]
synth_acou_denormaliser = MeanVarianceNorm(feature_dimension=acou_cmp_dim)
synth_acou_denormaliser.feature_denormalisation(test_cmp_file_list, test_denorm_file_list, acou_cmp_mean, acou_cmp_std)
synth_decomposer.acoustic_decomposition(test_denorm_file_list, synth_acou_cmp_dim, synth_acou_out_dimension_dict, synth_acou_extention_dict, acou_variance_file_dict,True)
wavgen_straight_type_vocoder('/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/test/', ['nitech_jp_song070_f001_003'], logger)
f0_file = os.path.join(wav_dir, 'nitech_jp_song070_f001_070.f0')
mgc_file = '/home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/synth/inter/acou_cmp_pred/nitech_jp_song070_f001_070.mgc'
f0, n_frame = io_funcs.load_binary_file_frame(f0_file, 1)
mgc, n_frame2 = io_funcs.load_binary_file_frame(mgc_file, 60)
print(n_frame)
print(n_frame2) | _____no_output_____ | Apache-2.0 | egs/singing_synthesis/s3/run.ipynb | YongliangHe/SingingVoiceSynthesis |
Regiment Introduction:Special thanks to: http://chrisalbon.com/ for sharing the dataset and materials. Step 1. Import the necessary libraries | import pandas as pd | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 2. Create the DataFrame with the following values: | raw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],
'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],
'name': ['Miller', 'Jacobson', 'Ali', 'Milner', 'Cooze', 'Jacon', 'Ryaner', 'Sone', 'Sloan', 'Piger', 'Riani', 'Ali'],
'preTestScore': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],
'postTestScore': [25, 94, 57, 62, 70, 25, 94, 57, 62, 70, 62, 70]} | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 3. Assign it to a variable called regiment. Don't forget to name each column | regiment = pd.DataFrame(raw_data, columns = raw_data.keys())
regiment | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 4. What is the mean preTestScore from the regiment Nighthawks? | regiment[regiment['regiment'] == 'Nighthawks'].groupby('regiment').mean() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 5. Present general statistics by company | regiment.groupby('company').describe() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 6. What is the mean each company's preTestScore? | regiment.groupby('company').preTestScore.mean() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 7. Present the mean preTestScores grouped by regiment and company | regiment.groupby(['regiment', 'company']).preTestScore.mean() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 8. Present the mean preTestScores grouped by regiment and company without heirarchical indexing | regiment.groupby(['regiment', 'company']).preTestScore.mean().unstack() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 9. Group the entire dataframe by regiment and company | regiment.groupby(['regiment', 'company']).mean() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 10. What is the number of observations in each regiment and company | regiment.groupby(['company', 'regiment']).size() | _____no_output_____ | BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Step 11. Iterate over a group and print the name and the whole data from the regiment | # Group the dataframe by regiment, and for each regiment,
for name, group in regiment.groupby('regiment'):
# print the name of the regiment
print(name)
# print the data of that regiment
print(group) | Dragoons
regiment company name preTestScore postTestScore
4 Dragoons 1st Cooze 3 70
5 Dragoons 1st Jacon 4 25
6 Dragoons 2nd Ryaner 24 94
7 Dragoons 2nd Sone 31 57
Nighthawks
regiment company name preTestScore postTestScore
0 Nighthawks 1st Miller 4 25
1 Nighthawks 1st Jacobson 24 94
2 Nighthawks 2nd Ali 31 57
3 Nighthawks 2nd Milner 2 62
Scouts
regiment company name preTestScore postTestScore
8 Scouts 1st Sloan 2 62
9 Scouts 1st Piger 3 70
10 Scouts 2nd Riani 2 62
11 Scouts 2nd Ali 3 70
| BSD-3-Clause | 03_Grouping/Regiment/Exercises_solutions.ipynb | fung991159/pandas_exercise |
Introdução | import os
import requests
import pandas as pd
from paths import * | _____no_output_____ | MIT | test/1_get_infos.ipynb | gaemapiracicaba/norma_pl_251-21 |
Função | # Lê o arquivo csv com o nome dos municípios
df = pd.read_csv(
os.path.join(input_path, 'tab_pl251.csv'),
)
# Deleta Coluna
df.drop(['municipio_nome'], axis=1, inplace=True)
print(list(set(df['unidade'])))
df
# Lê o arquivo csv com o nome dos municípios
df_mun = pd.read_csv(
'https://raw.githubusercontent.com/michelmetran/sp/main/data/tabs/tab_municipio_nome.csv',
usecols=['id_municipio', 'municipio_nome']
)
# Merge
df = pd.merge(
df_mun,
df,
how='left',
left_on='id_municipio',
right_on='id_municipio'
)
# Results
df.head()
# Escreve Tabela
df.to_csv(
os.path.join(tabs_path, 'tab_municipio_pl251.csv'),
index=False,
) | _____no_output_____ | MIT | test/1_get_infos.ipynb | gaemapiracicaba/norma_pl_251-21 |
Tests:(Chapt 11 conditions: seed 42, elu, learning rate = 0.01, he init, RGB normalization, BN, momentum = 0.9, AdamOpt, 5 layers, 100 neurons per layer, 1000 epochs, batch size 20)With Chapt 11 conditions & 2 outputs:49.70%Without BN:49.80%Without BN or RGB normalization:50.00%Without normalization and with Glorot Normal init (instead of He init):50.00%With He init and learning rate = 0.05:50.00%With He init, RGB normalization, and learning rate = 0.05:54.40%With BN again:50.00%Without BN and with 1140 outputs:50.20%Same as Chapt 11 with GradientDescent instead of AdamOpt and without BN:58.50%With learning rate = 0.05:59.20%Same as Chapt 11 with GradientDescent and momentum = 0.8:58.90%With batch size 5:64.20%Chapt 11 + GD + batch size 5 + 3 layers instead of 5:66.20% | with tf.Session() as sess:
saver.restore(sess, "./mini_project_final.ckpt") # or better, use save_path
X_new_scaled = X_test[:20]
Z = logits.eval(feed_dict={X: X_new_scaled})
y_pred = np.argmax(Z, axis=1)
from tensorflow_graph_in_jupyter import show_graph
show_graph(tf.get_default_graph()) | _____no_output_____ | MIT | mini_project.ipynb | prathusb/TensorFlow_NNs |
"Working with NumPy"> "Looking at Bangor preciptiation data using only NumPy and matplotlib."- toc: false- badges: true- comments: true- author: Antonio Jurlina- categories: [learning, python] | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
os.chdir('/Users/antoniojurlina/Projects/learning_python/data/')
csv = "BangorPrecip.csv"
bangorprecip = pd.read_csv(csv, index_col=0)
months = bangorprecip.index.to_numpy()
years = bangorprecip.columns.to_numpy()
bangorprecip = bangorprecip.to_numpy()
print(bangorprecip.shape)
bangorprecip | (12, 10)
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**1. What was the total cumulative precipitation over the ten years?** | total_precip = np.sum(bangorprecip)
print("Total cumulative precipitation over the ten years was", total_precip, "inches.") | Total cumulative precipitation over the ten years was 425.26 inches.
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**2. What was the driest year?** | yearly_totals = bangorprecip.sum(0)
precip = float(yearly_totals[yearly_totals == yearly_totals.min()])
year = int(years[yearly_totals == yearly_totals.min()])
print("The driest year was", year, "with a total of", precip, "inches of precipitation.")
| The driest year was 2016 with a total of 34.35 inches of precipitation.
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**3. What are the yearly precipitation means?** | averages = bangorprecip.mean(0)
%matplotlib inline
plt.style.use('ggplot')
plt.bar(years, averages)
plt.title("Average yearly precipitation")
plt.ylabel("Inches")
plt.show() | _____no_output_____ | Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**4. What are the monthly min, mean, and max values over the ten years?** | mins = bangorprecip.min(1)
means = bangorprecip.mean(1)
maxs = bangorprecip.max(1)
%matplotlib inline
plt.style.use('ggplot')
plt.bar(months, mins, alpha = 0.8)
plt.bar(months, means, alpha = 0.6)
plt.bar(months, maxs, alpha = 0.4)
plt.title("Monthly precipitation")
plt.ylabel("Inches")
plt.legend(["min", "mean", "max"])
plt.show() | _____no_output_____ | Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**5. What was the smallest monthly precipitation value and in which month and year did this occur?** | yearly_mins = bangorprecip.min(0)
monthly_mins = bangorprecip.min(1)
year = int(years[yearly_mins == yearly_mins.min()])
month = int(months[monthly_mins == monthly_mins.min()])
min_precip = bangorprecip.min(1).min()
print("The smallest monthly precipitation was ", min_precip,
" inches and it occured during ", month,"/",year, ".", sep = "") | The smallest monthly precipitation was 0.58 inches and it occured during 7/2012.
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**6. How many months had precipitation amounts greater than 5 inches?** | answer = np.sum(bangorprecip > 5)
print(answer, "months had precitipation amounts greater than 5 inches.") | 26 months had precitipation amounts greater than 5 inches.
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**7. How many months had precipitation greater than zero and less than 1.5 inches? What were these values and in what months and years did they occur?** | answer = np.logical_and([bangorprecip > 0], [bangorprecip < 1.5])
print(np.sum(answer), "months had precipitation greater than 0 and less than 1.5 inches.")
print("")
for count,val in enumerate(years):
month = months[bangorprecip[:,count] < 1.5]
values = bangorprecip[:,2][bangorprecip[:,count] < 1.5]
if sum(values) != 0:
print("In", years[count], ", month(s)", month,
"had rainfalls of", values, ", respectively."); | 9 months had precipitation greater than 0 and less than 1.5 inches.
In 2012 , month(s) [ 3 7 11] had rainfalls of [1.4 0.58 1.13] , respectively.
In 2013 , month(s) [ 1 10] had rainfalls of [1.95 6.96] , respectively.
In 2014 , month(s) [9] had rainfalls of [6.33] , respectively.
In 2015 , month(s) [3 7] had rainfalls of [1.4 0.58] , respectively.
In 2016 , month(s) [9] had rainfalls of [6.33] , respectively.
| Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**8. How different were monthly precipitation values in 2019 from 2018?** | nineteen = np.concatenate(bangorprecip[:,years == '2019'])
eighteen = np.concatenate(bangorprecip[:,years == '2018'])
%matplotlib inline
plt.style.use('ggplot')
plt.bar(months, nineteen, alpha = 0.7)
plt.bar(months, eighteen, alpha = 0.7)
plt.title("Monthly precipitation (2018 vs. 2019)")
plt.ylabel("Inches")
plt.legend(["2019", "2018"])
plt.show() | _____no_output_____ | Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
**9. Create a heatmap of the 12 x 10 array** | %matplotlib inline
plt.style.use('ggplot')
imgplot = plt.imshow(bangorprecip, extent=[2010,2019,12,1], aspect='auto', cmap='viridis')
plt.colorbar();
| _____no_output_____ | Apache-2.0 | _notebooks/2021-02-07-working-with-numpy.ipynb | antoniojurlina/portfolio |
class Student:
def __init__ (self, name,student_number,age, school,course):
self.name = name
self.student_number= student_number
self.age= age
self.school= school
self.course=course
def myself(self):
print("My Name is", self.name, self.age, "years old.", "My Student Number is", self.student_number,".")
print("I'm taking", self.course, "at", self.school)
S = Student("Nicole Shaira A. Tabligan", 202150371,19, "Adamson University", "Bachelor of Science in Computer Engineering")
S.myself() | My Name is Nicole Shaira A. Tabligan 19 years old. My Student Number is 202150371 .
I'm taking Bachelor of Science in Computer Engineering at Adamson University
| Apache-2.0 | Prelim_Exam.ipynb | NicoleShairaTabligan/OOP-58002 |
|
This notebook begins with an example of using the Diagram Generator to generate diagrams for optical nonlinear spectroscopy using the 2D photon echo as an example. We then move on to the fluorescence-detected analogue of 2D photon echo as a counter-point. Following that are further examples. A list of all examples included in this notebook follows, in order of appearance:1. Tranditional 2D photon echo (2DPE)2. Fluorescence-detected 2DPE (or any action detection method)3. Transient Absoroption (TA)4. 5th-order correction to TA in the pump amplitude5. 5th-order correction to TA in the probe amplitude6. Exciton-exciton interaction 2D spectroscopy7. 2DPE for IR vibrational spectroscopy 1. 2DPE Generic case | # initialize the module
tdpe = DG()
# DiagramAutomation needs to know the phase-matching/-cycling condition
# 2DPE example
tdpe.set_phase_discrimination([(0,1),(1,0),(1,0)])
# Set the pulse durations
t0 = np.linspace(-1,1,num=11)
t1 = np.linspace(-2,2,num=21)
t2 = np.linspace(-2,2,num=11)
tlo = np.linspace(-3,3,num=31)
# set the pulse durations of each pulse
# the local oscillator does not impact diagram generation, but is still required at this time
tdpe.efield_times = [t0,t1,t2,tlo]
# using a list of pulse arrival times, we can generate the diagrams that contribute for
# that set of arrival times
# note the arrival time of the local oscillator is irrelevant, but needed by the code at this time
# here we choose for the local oscillator to "arrive" simulltaneously with the 3rd pulse
time_ordered_diagrams = tdpe.get_diagrams([0,100,200,200])
time_ordered_diagrams
#display the diagrams for visual inspection (takes a few seconds to render)
tdpe.display_diagrams(time_ordered_diagrams)
all_diagrams = tdpe.get_diagrams([0,1,2,2])
print('There are ',len(all_diagrams),' diagrams in total')
# Check in this folder after running this cell to see 16 individual diagrams saved as pdf files
tdpe_diagrams_folder = 'TDPE_all_diagrams'
os.makedirs(tdpe_diagrams_folder,exist_ok=True)
# rendering and saving the diagrams takes a few seconds
tdpe.save_diagrams(all_diagrams,folder_name=tdpe_diagrams_folder) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
To play with different cases where only some of the pulses overlap, uncomment and execute any of the following: | #ab_overlap = tdpe.get_diagrams([0,1,6,6])
#bc_overlap = tdpe.get_diagrams([0,4,6,6])
#ab_bc_overlap = tdpe.get_diagrams([0,3,6,6]) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
And uncomment the following for the case you want to see | #tdpe.display_diagrams(ab_overlap) #<--- change the argument of display diagrams to the case you have uncommented and executed | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
Time-ordered example for only one electronic excited state If the system under study has only one excited electronic state, then the excited-state absoroption process cannot take place. This is captured by setting the attribute 'maximum_manifold' (default value $\infty$) as follows | tdpe.maximum_manifold = 1
time_ordered_diagrams = tdpe.get_diagrams([0,100,200,200])
tdpe.display_diagrams(time_ordered_diagrams) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
Note that even for the case of a single electronic excitation, if there is a significant electronic relaxation rate, 'maximum_manifold' should not be set to 1, but left at the default value $\infty$ 2. Action-detected 2DPE | tdfs = DG(detection_type='fluorescence')
tdfs.set_phase_discrimination([(0,1),(1,0),(1,0),(0,1)])
t3 = np.linspace(-2.5,2.5,num=25)
tdfs.efield_times = [t0,t1,t2,t3]
time_ordered_diagrams = tdfs.get_diagrams([0,100,200,300])
tdfs.display_diagrams(time_ordered_diagrams)
# and all possibly relevant diagrams can be generated by setting the pulse delays so that all pulses overlap
all_diagrams = tdfs.get_diagrams([0,1,2,2])
# Check in this folder to see 16 individual diagrams
tdfs_diagrams_folder = 'TDFS_all_diagrams'
os.makedirs(tdfs_diagrams_folder,exist_ok=True)
tdfs.save_diagrams(all_diagrams,folder_name=tdfs_diagrams_folder) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
To play with different cases where only some of the pulses overlap, uncomment and execute any of the following: | #ab_overlap = tdfs.get_diagrams([0,1,6,12])
#bc_overlap = tdfs.get_diagrams([0,5,5,12])
#cd_overlap = tdfs.get_diagrams([0,5,10,12])
#ab_bc_overlap = tdfs.get_diagrams([0,3,6,12])
#ab_cd_overlap = tdfs.get_diagrams([0,1,10,12])
# and so on | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
And uncomment the following for the case you want to see | #tdfs.display_diagrams(ab_overlap) #<--- change the argument of display diagrams to the case you have uncommented and executed | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
TA | ta = DG()
ta.set_phase_discrimination([(1,1),(1,0)])
pump_interval = t0
probe_interval = t1
ta.efield_times = [t0,t1] | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
TA 5th-order corrections Higher order in pump amplitude | ta5order_pump = DG()
ta5order_pump.set_phase_discrimination([(2,2),(1,0)])
ta5order_pump.efield_times = [t0,t1]
# Time-ordered diagrams
ta5order_pump.get_diagrams([0,100,100]) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
Higher order in probe amplitude | ta5order_probe = DG()
ta5order_probe.set_phase_discrimination([(1,1),(2,1)])
ta5order_probe.efield_times = [t0,t1]
ta5order_probe.get_diagrams([0,100,100]) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
EEI2D | eei2d = DG()
eei2d.set_phase_discrimination([(0,2),(2,0),(1,0)])
eei2d.efield_times = [t0,t1,t2,tlo]
eei2d.get_diagrams([0,100,200,300]) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
2DPE for IR vibrational spectroscopy For IR vibrational spectroscopy, the 'maximum_manifold' should be set to the default of $\infty$. In addition, the 'minimum_manifold' should be set to a negative number. This is because, outside of zero temperature limit, the initial state of the system is a Boltzmann distribution of vibrational occupational states. The $n=1$ vibrational state can be de-excited once, the $n=2$ vibrational state can be de-excited twice, and so on. Depending on the ratio of $k_BT/\hbar\omega$, where $\omega$ is the vibrational frequency, the initial distribution will contain appreciable weight in the first $n$ vibrational ladder states. This information should be used in setting 'minimum_manifold'. Here are two examples | tdpe.maximum_manifold = np.inf
tdpe.minimum_manifold = -1
tdpe.display_diagrams(tdpe.get_diagrams([0,100,200,200]))
# or
tdpe.maximum_manifold = np.inf
tdpe.minimum_manifold = -2
tdpe.display_diagrams(tdpe.get_diagrams([0,100,200,200])) | _____no_output_____ | MIT | DiagramGeneratorExample.ipynb | gharib85/ufss |
Supplemental TablesThis Jupyter notebook reproduces a number of Supplemental Tables that are not included in any of the other notebooks. | %reload_ext autoreload
%autoreload 2
%matplotlib inline
import sys
sys.path.append('../src')
from io import StringIO
import numpy as np
import pandas as pd | _____no_output_____ | MIT | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses |
Supplementary Table S2 - ILC insertionsOverview of all insertions identified by IM-Fusion in the ILC dataset. | insertion_column_map = {
'transposon_anchor': 'feature_anchor',
'id': 'insertion_id',
'seqname': 'chromosome',
'orientation': 'gene_orientation'
}
col_order = ['insertion_id', 'sample', 'chromosome', 'position', 'strand',
'support', 'support_junction', 'support_spanning',
'feature_name','feature_type', 'feature_anchor', 'feature_strand',
'ffpm', 'ffpm_junction', 'ffpm_spanning',
'gene_id', 'gene_name', 'gene_strand', 'gene_orientation',
'novel_transcript']
insertions_sb = (
pd.read_csv('../data/processed/sb/star/insertions.txt', sep='\t')
.rename(columns=insertion_column_map)[col_order]
.rename(columns=lambda c: c.replace('_', ' ').capitalize()))
insertions_sb.to_excel('../reports/supplemental/tables/table_s2_insertions_sb.xlsx', index=False) | _____no_output_____ | MIT | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses |
Supplementary Table S3 - ILC CTGsOverview of the CTGs identified by IM-Fusion in the ILC dataset. | ctgs = pd.read_csv('../data/processed/sb/star/ctgs.txt', sep='\t')
ctg_overview = (ctgs
.assign(de_direction=lambda df: df['de_direction'].map({-1: 'down', 1: 'up'}))
.drop(['de_test', 'gene_id'], axis=1)
.rename(columns={
'gene_name': 'Gene',
'p_value': 'CTG p-value',
'q_value': 'CTG q-value',
'n_samples': 'Num. samples',
'de_pvalue': 'DE p-value',
'de_direction': 'DE direction'
}))
ctg_overview.head() | _____no_output_____ | MIT | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses |
Supplementary Table S5 - B-ALL insertionsOverview of all insertions identified by IM-Fusion in the B-ALL dataset. | insertions_sanger = (
pd.read_csv('../data/processed/sanger/star/insertions.txt', sep='\t')
.rename(columns=insertion_column_map)[col_order]
.rename(columns=lambda c: c.replace('_', ' ').capitalize()))
insertions_sanger.to_excel('../reports/supplemental/tables/table_s5_insertions_sanger.xlsx', index=False) | _____no_output_____ | MIT | notebooks/A. Supplementary tables.ipynb | jrderuiter/imfusion-analyses |
 **Matlotlib**: Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python.Website: https://matplotlib.org/GitHub: https://github.com/matplotlib/matplotlib In the previous notebook, we saw some basic examples of plotting and visualization in the context of learning `numpy`. In this notebook, we dive much deeper. The goal is to understand how `matplotlib` represents figures internally. | from matplotlib import pyplot as plt
%matplotlib inline | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Figure and Axes The *figure* is the highest level of organization of `matplotlib` objects. If we want, we can create a figure explicitly. | fig = plt.figure()
fig = plt.figure(figsize=(13, 5))
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
fig = plt.figure()
ax = fig.add_axes([0, 0, 0.5, 1])
fig = plt.figure()
ax1 = fig.add_axes([0, 0, 0.5, 1])
ax2 = fig.add_axes([0.6, 0, 0.3, 0.5], facecolor='g') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Subplots Subplot syntax is one way to specify the creation of multiple axes. | fig = plt.figure()
axes = fig.subplots(nrows=2, ncols=3)
fig = plt.figure(figsize=(12, 6))
axes = fig.subplots(nrows=2, ncols=3)
axes | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
There is a shorthand for doing this all at once, **which is our recommended way to create new figures!** | fig, ax = plt.subplots()
ax
fig, axes = plt.subplots(ncols=2, figsize=(8, 4), subplot_kw={'facecolor': 'g'})
axes | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Drawing into Axes All plots are drawn into axes. It is easiest to understand how matplotlib works if you use the [object-oriented](https://matplotlib.org/faq/usage_faq.htmlcoding-styles) style. | # create some data to plot
import numpy as np
x = np.linspace(-np.pi, np.pi, 100)
y = np.cos(x)
z = np.sin(6*x)
fig, ax = plt.subplots()
ax.plot(x, y) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
This does the same thing as | plt.plot(x, y) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
This starts to matter when we have multiple axes to worry about. | fig, axes = plt.subplots(figsize=(8, 4), ncols=2)
ax0, ax1 = axes
ax0.plot(x, y)
ax1.plot(x, z) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Labeling Plots | fig, axes = plt.subplots(figsize=(8, 4), ncols=2)
ax0, ax1 = axes
ax0.plot(x, y)
ax0.set_xlabel('x')
ax0.set_ylabel('y')
ax0.set_title('x vs. y')
ax1.plot(x, z)
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax1.set_title('x vs. z')
# squeeze everything in
plt.tight_layout() | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Customizing Line Plots | fig, ax = plt.subplots()
ax.plot(x, y, x, z) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
It’s simple to switch axes | fig, ax = plt.subplots()
ax.plot(y, x, z, x) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
A “parametric” graph: | fig, ax = plt.subplots()
ax.plot(y, z) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Line Styles | fig, axes = plt.subplots(figsize=(16, 5), ncols=3)
axes[0].plot(x, y, linestyle='dashed')
axes[0].plot(x, z, linestyle='--')
axes[1].plot(x, y, linestyle='dotted')
axes[1].plot(x, z, linestyle=':')
axes[2].plot(x, y, linestyle='dashdot', linewidth=5)
axes[2].plot(x, z, linestyle='-.', linewidth=0.5) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Colors As described in the [colors documentation](https://matplotlib.org/2.0.2/api/colors_api.html), there are some special codes for commonly used colors:* b: blue* g: green* r: red* c: cyan* m: magenta* y: yellow* k: black* w: white | fig, ax = plt.subplots()
ax.plot(x, y, color='k')
ax.plot(x, z, color='r') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Other ways to specify colors: | fig, axes = plt.subplots(figsize=(16, 5), ncols=3)
# grayscale
axes[0].plot(x, y, color='0.8')
axes[0].plot(x, z, color='0.2')
# RGB tuple
axes[1].plot(x, y, color=(1, 0, 0.7))
axes[1].plot(x, z, color=(0, 0.4, 0.3))
# HTML hex code
axes[2].plot(x, y, color='#00dcba')
axes[2].plot(x, z, color='#b029ee') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
There is a default color cycle built into `matplotlib`. | plt.rcParams['axes.prop_cycle']
fig, ax = plt.subplots(figsize=(12, 10))
for factor in np.linspace(0.2, 1, 11):
ax.plot(x, factor*y) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Markers There are [lots of different markers](https://matplotlib.org/api/markers_api.html) availabile in matplotlib! | fig, axes = plt.subplots(figsize=(12, 5), ncols=2)
axes[0].plot(x[:20], y[:20], marker='.')
axes[0].plot(x[:20], z[:20], marker='o')
axes[1].plot(x[:20], z[:20], marker='^',
markersize=10, markerfacecolor='r',
markeredgecolor='k') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Label, Ticks, and Gridlines | fig, ax = plt.subplots(figsize=(12, 7))
ax.plot(x, y)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(r'A complicated math function: $f(x) = \cos(x)$')
ax.set_xticks(np.pi * np.array([-1, 0, 1]))
ax.set_xticklabels([r'$-\pi$', '0', r'$\pi$'])
ax.set_yticks([-1, 0, 1])
ax.set_yticks(np.arange(-1, 1.1, 0.2), minor=True)
#ax.set_xticks(np.arange(-3, 3.1, 0.2), minor=True)
ax.grid(which='minor', linestyle='--')
ax.grid(which='major', linewidth=2) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Axis Limits | fig, ax = plt.subplots()
ax.plot(x, y, x, z)
ax.set_xlim(-5, 5)
ax.set_ylim(-3, 3) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Text Annotations | fig, ax = plt.subplots()
ax.plot(x, y)
ax.text(-3, 0.3, 'hello world')
ax.annotate('the maximum', xy=(0, 1),
xytext=(0, 0), arrowprops={'facecolor': 'k'}) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Other 1D Plots Scatter Plots | fig, ax = plt.subplots()
splot = ax.scatter(y, z, c=x, s=(100*z**2 + 5))
fig.colorbar(splot) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Bar Plots | labels = ['first', 'second', 'third']
values = [10, 5, 30]
fig, axes = plt.subplots(figsize=(10, 5), ncols=2)
axes[0].bar(labels, values)
axes[1].barh(labels, values) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
2D Plotting Methods imshow | x1d = np.linspace(-2*np.pi, 2*np.pi, 100)
y1d = np.linspace(-np.pi, np.pi, 50)
xx, yy = np.meshgrid(x1d, y1d)
f = np.cos(xx) * np.sin(yy)
print(f.shape)
fig, ax = plt.subplots(figsize=(12,4), ncols=2)
ax[0].imshow(f)
ax[1].imshow(f, origin='bottom') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
pcolormesh | fig, ax = plt.subplots(ncols=2, figsize=(12, 5))
pc0 = ax[0].pcolormesh(x1d, y1d, f)
pc1 = ax[1].pcolormesh(xx, yy, f)
fig.colorbar(pc0, ax=ax[0])
fig.colorbar(pc1, ax=ax[1])
x_sm, y_sm, f_sm = xx[:10, :10], yy[:10, :10], f[:10, :10]
fig, ax = plt.subplots(figsize=(12,5), ncols=2)
# last row and column ignored!
ax[0].pcolormesh(x_sm, y_sm, f_sm, edgecolors='k')
# same!
ax[1].pcolormesh(x_sm, y_sm, f_sm[:-1, :-1], edgecolors='k')
y_distorted = y_sm*(1 + 0.1*np.cos(6*x_sm))
plt.figure(figsize=(12,6))
plt.pcolormesh(x_sm, y_distorted, f_sm[:-1, :-1], edgecolors='w')
plt.scatter(x_sm, y_distorted, c='k') | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
contour / contourf | fig, ax = plt.subplots(figsize=(12, 5), ncols=2)
# same thing!
ax[0].contour(x1d, y1d, f)
ax[1].contour(xx, yy, f)
fig, ax = plt.subplots(figsize=(12, 5), ncols=2)
c0 = ax[0].contour(xx, yy, f, 5)
c1 = ax[1].contour(xx, yy, f, 20)
plt.clabel(c0, fmt='%2.1f')
plt.colorbar(c1, ax=ax[1])
fig, ax = plt.subplots(figsize=(12, 5), ncols=2)
clevels = np.arange(-1, 1, 0.2) + 0.1
cf0 = ax[0].contourf(xx, yy, f, clevels, cmap='RdBu_r', extend='both')
cf1 = ax[1].contourf(xx, yy, f, clevels, cmap='inferno', extend='both')
fig.colorbar(cf0, ax=ax[0])
fig.colorbar(cf1, ax=ax[1]) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
quiver | u = -np.cos(xx) * np.cos(yy)
v = -np.sin(xx) * np.sin(yy)
fig, ax = plt.subplots(figsize=(12, 7))
ax.contour(xx, yy, f, clevels, cmap='RdBu_r', extend='both', zorder=0)
ax.quiver(xx[::4, ::4], yy[::4, ::4],
u[::4, ::4], v[::4, ::4], zorder=1) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
streamplot | fig, ax = plt.subplots(figsize=(12, 7))
ax.streamplot(xx, yy, u, v, density=2, color=(u**2 + v**2)) | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Exercise 3: Replicating Plots using `Matplotlib` and `Numpy` The goal here is to replicate the figures you see as closely as possible. Note that the data in *Part I* is hosted online and updated automatically - your figures may not look exactly the same!In order to get some data, you will have to run the code in the cells below. There is no need to focus on how this code exactly works. In the end, it will give you some `numpy` arrays, which you will use in your plots. This exercise should be done using **only `numpy` and `matplotlib`**. Part I: Line and Contour Plots to Visualize Global Temperature Data The temperature data are from the [NCEP/NCAR atmospheric reanalysis 1](https://psl.noaa.gov/data/gridded/data.ncep.reanalysis.html). | import xarray as xr
ds_url = 'http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.MONTHLY/.Diagnostic/.surface/.temp/dods'
ds = xr.open_dataset(ds_url, decode_times=False)
#########################################################
#### BELOW ARE THE VARIABLES YOU SHOULD USE IN THE PLOTS
#### (numpy arrays)
#### NO XARRAY ALLOWED :)
#########################################################
temp = ds.temp[-1].values - 273.15
lon = ds.X.values
lat = ds.Y.values | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Below is the figure to replicate using the `numpy` variables `temp`, `lon`, and `lat`.Hint 1: Zonal-mean is synonymous with longitudinal-mean, i.e. the mean must be taken along the `axis` corresponding to `lon`.Hint 2: To create subplots of different sizes, consider reading the [`plt.subplots` documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html).Hint 3: For the left subplot, check out the [2D Plotting Methods section](2D_Plotting_Methods).Hint 4: For the right subplot, check out the [Label, Ticks, and Gridlines subsection](Label).Hint 5: Don't spend too too much time making your figure perfect as there is still a lot of ground to cover in the next notebooks 😀  | # Replicate the figure here | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Part II: Scatter Plots to Visualize Earthquake Data Here, we will make a map plot of earthquakes from a USGS catalog of historic large earthquakes. Color the earthquakes by `log10(depth)` and adjust the marker size to be `magnitude/100` | import pooch
fname = pooch.retrieve(
"https://unils-my.sharepoint.com/:u:/g/personal/tom_beucler_unil_ch/EW1bnM3elHpAtjb1KtiEw0wB9Pl5w_FwrCvVRlnilXHCtg?download=1",
known_hash='22b9f7045bf90fb99e14b95b24c81da3c52a0b4c79acf95d72fbe3a257001dbb',
processor=pooch.Unzip()
)[0]
earthquakes = np.genfromtxt(fname, delimiter='\t')
depth = earthquakes[:, 8]
magnitude = earthquakes[:, 9]
latitude = earthquakes[:, 20]
longitude = earthquakes[:, 21] | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Below is the figure to replicate using the `numpy` variables `earthquake`, `depth`, `magnitude`, `latitude`, and `longitude`.Hint: Check out the [Scatter Plots subsection](Scatter) and consider reading the documentation for [`plt.scatter`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html) and [`plt.colorbar`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.colorbar.html).  | # Replicate the figure here | _____no_output_____ | MIT | Lab_Notebooks/S1_3_Matplotlib.ipynb | tbeucler/2022_ML_Earth_Env_Sci |
Data Preparation Settings/FunctionsRead in settings and functions. | libraries <-c('here','missForest','stringr','imputeMissings','regclass'
,'purrr','DescTools')
suppressWarnings(lapply(libraries, require, character.only = TRUE))
suppressWarnings(source(here::here('Stock Estimation', 'settings.R'))) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
DataRead in the final data set from the data preparation notebook. | data <- fread(paste0(dir$final_data,'combined_financial.csv')) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
More Cleaning Duplicates | #Checking for duplicate column sums
dups <- data[ , which(duplicated(t(data)))]
dups <- names(dups)
dups
#Removing any duplicate column sums after verifying them
data <- data %>% dplyr::select(-c(dups))
dim(data)
#Looking for missing values & evaluating list of variable names
na <- apply(is.na(data),2,sum)
max(na)
# NOTE: The following code has been commmented out due to the length of its output.
#print(na)
head(sort(na, decreasing = TRUE), n=25)
#Merging and dropping duplicated variable names
#NOTE: Portions of the following code has been commmented out due to the length of its output.
#view(data[, c("Payout Ratio", "payoutRatio")])
data <- Name_Changer(dat=data,x='Payout Ratio',y='payoutRatio')
#view(data[, c('interestCoverage', 'Interest Coverage')])
data <- Name_Changer(dat=data,x='Interest Coverage',y='interestCoverage')
#view(data[, c('netProfitMargin', 'Net Profit Margin')])
data <- Name_Changer(dat=data,x='Net Profit Margin',y='netProfitMargin')
#view(data[, c('PE ratio', 'priceEarningsRatio')])
data <- Name_Changer(dat=data,x='PE ratio',y='priceEarningsRatio')
#view(data[, c('priceToFreeCashFlowsRatio', 'PFCF ratio')])
data <- Name_Changer(dat=data,x='PFCF ratio',y='priceToFreeCashFlowsRatio')
#view(data[, c('priceToOperatingCashFlowsRatio', 'POCF ratio')])
data <- Name_Changer(dat=data,x='POCF ratio',y='priceToOperatingCashFlowsRatio')
#view(data[, c('priceToSalesRatio', 'Price to Sales Ratio')])
data <- Name_Changer(dat=data,x='Price to Sales Ratio',y='priceToSalesRatio')
#view(data[, c('Days Payables Outstanding', 'daysOfPayablesOutstanding')])
data <- Name_Changer(dat=data,x='Days Payables Outstanding',y='daysOfPayablesOutstanding')
#view(data[, c('Free Cash Flow per Share', 'freeCashFlowPerShare')])
data <- Name_Changer(dat=data,x='Free Cash Flow per Share',y='freeCashFlowPerShare')
#view(data[, c('ROE', 'returnOnEquity')])
data <- Name_Changer(dat=data,x='ROE',y='returnOnEquity')
#view(data[, c('priceToBookRatio', 'PTB ratio')])
data <- Name_Changer(dat=data,x='PTB ratio',y='priceToBookRatio')
#view(data[, c('priceBookValueRatio', 'PB ratio')])
data <- Name_Changer(dat=data,x='PB ratio',y='priceBookValueRatio')
#view(data[, c('operatingCashFlowPerShare', 'Operating Cash Flow per Share')])
data <- Name_Changer(dat=data,x='Operating Cash Flow per Share',y='operatingCashFlowPerShare')
#view(data[, c('Cash per Share', 'cashPerShare')])
data <- Name_Changer(dat=data,x='Cash per Share',y='cashPerShare')
dim(data) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Variable Names | #Checking variable names
names(data)
data <- setDT(data)
#Changing all names to lower case and replacing spaces with "_"
#Amending various features to make more compatible models
names(data) <- str_trim(names(data), side = "both")
names(data) <- str_to_lower(names(data), locale = "en")
names(data) <- str_replace_all(names(data), " ", "_")
names(data) <- str_replace_all(names(data), "-", "")
names(data) <- str_replace_all(names(data), "&", ".")
names(data) <- str_replace_all(names(data), "\\(", "")
names(data) <- str_replace_all(names(data), "\\)", "")
names(data) <- str_replace_all(names(data), "3y", "three_yr")
names(data) <- str_replace_all(names(data), "5y", "five_yr")
names(data) <- str_replace_all(names(data), "10y", "ten_yr")
names(data) <- str_replace_all(names(data), "\\\\", "")
names(data) <- str_replace_all(names(data), "////", "_")
names(data) <- str_replace_all(names(data), ",", "")
names(data) <- str_replace_all(names(data), "_._", "_")
names(data) <- str_replace_all(names(data), "/", "_")
setnames(data, 'eps', 'earnings_per_share')
names(data) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Categorical Encoding | #Categorical Encoding
data[, sector := as.factor(sector)]
data[, sector_num := as.numeric(sector)]
#Reordering data to put "sector" with "sector_num"
data <- data %>%
dplyr::select('stock','nextyr_price_var','class','year','sector','sector_num', everything()) %>%
setDT() | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Missing Data | na <- apply(is.na(data),2,sum)
#print(na)
max(na)
#sort(na, decreasing = TRUE)
head(sort(na, decreasing = TRUE), n=25)
summary(na)
#Checking how many rows are complete
sum(complete.cases(data))
#Checking for NA across rows
data$na <- rowSums(is.na(data))
max(data$na)
head(sort(data$na, decreasing = TRUE),n = 20)
summary(data$na)
#Found that 50 was a good cut off for dropping rows
drop <- data %>%
filter(na >= 50)
dim(drop)
data <- data %>%
filter(na <= 50)
data <- dplyr::select(data, -c(na))
#Re-checking the NAs across columns
na <- apply(is.na(data),2,sum)
max(na)
# NOTE: The following code has been commmented out due to the length of its output.
#print(na)
#sort(na, decreasing = TRUE)
head(sort(na, decreasing = TRUE), n=25)
#Keeping only columns with less than ~15 percent missing
perc <- apply(data,2,Perc_Missing)
max(perc)
# NOTE: The following code has been commmented out due to the length of its output.
#print(perc)
#sort(perc, decreasing = TRUE)
head(sort(perc, decreasing = TRUE), n=25)
#Choosing to only keep variables with less than 15% missing data
data <- data[, which(apply(data,2,Perc_Missing) < 15.0)] | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Multicollinearity/Linear Dependence/Winsorization | #Splitting datasets
data <- setDT(data)
data2 <- select(data, c('stock','nextyr_price_var','sector'))
data <- select(data, -c('stock','nextyr_price_var','sector'))
#Converting class to a factor
data <- data[, class := as.factor(class)]
#Run regression to identify linearly dependent variables
set.seed(123)
glm <- suppressWarnings(glm(class~., family = binomial
, data = data))
#Find the linearly dependent variables
vars <- attributes(alias(glm)$Complete)$dimnames[[1]]
vars
# Remove the linearly dependent variables
remove <- match(vars,names(data))
remove
dim(data)
data <- select(data, -c(remove))
dim(data)
#Re-run regression without linearly dependent variables
set.seed(123)
glm <- suppressWarnings(glm(class~., family = binomial
, data = data))
#NOTE: This section of the code will take some time to run.
#The function VIF_Check runs a regression and removes the max
#VIF, repeating this process until all VIFs are below threshold
#Removing variables with VIFs above 5
data <- VIF_Check(dat=data, threshold=5)
#Re-combine data
data <- cbind(data2,data) %>% setDT()
dim(data)
#Re-run regression without linearly dependent variables
set.seed(123)
glm <- suppressWarnings(glm(class~., family = binomial(link = "logit")
, data = data[, -c('stock','nextyr_price_var','sector')], control = list(maxit = 100)))
#Split data for winsorization
data2 <- select(data, c('class','year','sector_num','stock'
,'nextyr_price_var','sector'))
data <- select(data, -c('class','year','sector_num','stock'
,'nextyr_price_var','sector'))
#Winsorize each column accordingly
data <- map_df(data, ~Winsorize(., probs=c(0.05,0.95),na.rm=TRUE))
#Recombine datasets
data <- cbind(data2,data) %>% setDT()
dim(data)
#Re-run regression to see if error has been corrected
#No warning message occurs
set.seed(123)
glm <- glm(class~., family = binomial(link = "logit")
, data = data[, -c('stock','nextyr_price_var','sector')], control = list(maxit = 100))
summary(glm) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Imputing Missing Values | #Imputation will be implemented if necessary in the modeling notebook | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Uniformity | #Implementing scaling in the modeling notebook | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Additional Cleaning | #No additional cleaning was performed in this notebook | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Save the Modeling Dataset | fwrite(data, paste0(dir$final_data,'clean_financial.csv')) | _____no_output_____ | MIT | Stock Estimation/notebooks/3.0_data_preparation.ipynb | ndysle1/R-Projects |
Pandas Daten Visualisierung | import numpy as np
import pandas as pd
%matplotlib inline
pd.read_csv('',index_col=0) #die Erste Zeile der csv ist nun der Spaltenindex/Schlüssel pro Zeile
#das Styling des Plots wird verändert (rote Balken)
#stacked = True -> Werte werden übereinander gelegt
#Lineplot
s=df1['C']*100 #die Diagrammpkt. werden größer dargestellt | _____no_output_____ | MIT | Pandas/Pandas Daten Visualisierung.ipynb | florianfricke/data_science_jupyter_notebooks |
Plotly ist eine Visualisierungslibary -> 3D Dia. möglCufflinks verbindet Plotly mit Pandasbeide müssen installiert werdennicht mit Anaconda installierbar -> mit Terminal installieren`pip install plotly` `pip install cufflinks` | import seaborn as sns
df = pd.read_csv('tips.csv')
df.head()
sns.violinplot(x='day', y='total_bill', data=df)
sns.violinplot | _____no_output_____ | MIT | Pandas/Pandas Daten Visualisierung.ipynb | florianfricke/data_science_jupyter_notebooks |
INCIDENCE MATRIX decomposing Incidence matrix and plotting node features(W) | inci = nx.incidence_matrix(G).todense()
print(inci.shape)
print(inci) | (30, 154)
[[ 1. 1. 1. ..., 0. 0. 0.]
[ 1. 0. 0. ..., 0. 0. 0.]
[ 0. 1. 0. ..., 0. 0. 0.]
...,
[ 0. 0. 0. ..., 1. 1. 0.]
[ 0. 0. 0. ..., 1. 0. 1.]
[ 0. 0. 0. ..., 0. 1. 1.]]
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
NMF Decomposition | from sklearn.decomposition import NMF
model = NMF(n_components=2,init='random', random_state=0)
W = model.fit_transform(inci)
H = model.components_
err = model.reconstruction_err_
it = model.n_iter_
print(err)
print(it)
print(W.shape)
print(H.shape)
# print(W[0])
# print(H[:,0]) | 16.3736251866
89
(30, 2)
(2, 154)
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
NMF displaying learned nodes | # displaying learned nodes
import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow']#, 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
svd.scatter(W[:, 0], W[:, 1],c=np.array(list(partition.values())),marker='o',s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("W-nodes")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
NMF displaying learned edge vectors(H) | #color edges
edges = G.edges()
ed_label = []
for ed in edges:
if partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==0:
ed_label.append(0)
elif partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==1:
ed_label.append(1)
elif partition[ed[0]]==partition[ed[1]] and partition[ed[0]]==2:
ed_label.append(2)
elif partition[ed[0]]==0 and partition[ed[1]]==1:
ed_label.append(3)
elif partition[ed[0]]==1 and partition[ed[1]]==2:
ed_label.append(4)
elif partition[ed[0]]==0 and partition[ed[1]]==2:
ed_label.append(5)
print(len(edges))
print(len(ed_label))
# displaying learned edge vectors(H)
import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
# 0-0 1-1 2-2 0-1 1-2 0-2
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
H1 = np.transpose(H)
svd.scatter(H1[:, 0], H1[:, 1],c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("W-edges")
plt.show()
# PCA and t-SNE for node features (W)
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
# W1 = normalize(W)
tsne = fig.add_subplot(1,2,1)
X_tsne = TSNE(n_components=2, perplexity=40).fit_transform(W)
tsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
tsne.title.set_text("t-SNE")
pca = fig.add_subplot(1,2,2)
X_pca = PCA(n_components=2).fit_transform(W)
pca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(list(partition.values())), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))
pca.title.set_text("PCA")
plt.show()
# PCA and t-SNE for edge features (H)
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
H1 = np.transpose(H)
# H1 = normalize(H1)
tsne = fig.add_subplot(1,2,1)
X_tsne = TSNE(n_components=2, perplexity=40).fit_transform(H1)
tsne.scatter(X_tsne[:, 0], X_tsne[:, 1], c=np.array(ed_label),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
tsne.title.set_text("t-SNE")
pca = fig.add_subplot(1,2,2)
X_pca = PCA(n_components=2).fit_transform(H1)
pca.scatter(X_pca[:, 0], X_pca[:, 1], c=np.array(ed_label), s=[50, 50], cmap=matplotlib.colors.ListedColormap(colors))
pca.title.set_text("PCA")
plt.show() | _____no_output_____ | MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
SVD decomposition of Incidence matrix | # SVD decomposition
ui,si,vi = np.linalg.svd(inci)
print(ui.shape)
# u=np.around(u,decimals=5)
# print(ui)
print(si.shape)
# s=np.around(s)
# print(si)
print(vi.shape)
# v=np.around(v,decimals=5)
# print(vi) | (30, 30)
(30,)
(154, 154)
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
SVD features of nodes decomposed from incidence matrix | import matplotlib
import numpy as np
fig = plt.figure(figsize=(10,10))
colors=['green','hotpink','yellow', 'cyan','red','purple']
svd = fig.add_subplot(1,1,1)
print(len(list(partition.values())))
print(ui[:,0].shape)
svd.scatter([ui[:, 0]], [ui[:, 1]],c=np.array(list(partition.values())),s=[50,50],cmap=matplotlib.colors.ListedColormap(colors))
svd.title.set_text("U-nodes")
plt.show() | 30
(30, 1)
| MIT | incidence-mat-exp.ipynb | supriya-pandhre/incidence-mat-exp |
Subsets and Splits