markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
No Water | no_water_loops = collections.defaultdict(list)
no_water_path = Path('no_water')
for f in no_water_path.iterdir():
if 'loops_hist' in f.name:
k = float(f.name.split('_')[3])
with f.open('rb') as iv:
d = pickle.load(iv)
no_water_loops[k].extend(d[-1]) | _____no_output_____ | Unlicense | paper/analysis/Loops.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions |
With water | water_loops = collections.defaultdict(list)
water_path = Path('with_water')
for f in water_path.iterdir():
if 'loops_hist' in f.name:
k = float(f.name.split('_')[3])
with f.open('rb') as iv:
d = pickle.load(iv)
water_loops[k].extend(d[-1]) | _____no_output_____ | Unlicense | paper/analysis/Loops.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions |
Water rev | water_rev_loops = collections.defaultdict(list)
water_rev_path = Path('with_water_rev')
for f in water_rev_path.iterdir():
if 'loops_hist' in f.name and 'rt1' in f.name:
k = float(f.name.split('_')[3])
kr = float(f.name.split('_')[4])
if kr != 0.001:
continue
print(k,kr)
with f.open('rb') as iv:
d = pickle.load(iv)
water_rev_loops[k].extend(d[-1])
print(k, d[-1])
matplotlib
plt.rcParams['figure.figsize'] = (8, 6)
markers = {0.001: '*', 0.01: 'h', 0.1: 'X'}
for k in no_water_loops:
if no_water_loops[k]:
n, x = np.histogram(no_water_loops[k], density=False, bins='auto')
n = np.asarray(n, np.float)
n[n <= 0.0001] = np.nan
plt.plot(x[:-1], n, 'h', linestyle='None', label='no water k={}'.format(k))
for k in water_loops:
if water_loops[k]:
n, x = np.histogram(water_loops[k], density=False, bins='auto')
n = np.asarray(n, np.float)
n[n <= 0.0001] = np.nan
plt.plot(x[:-1], n, 'd', linestyle='None', label='with water k={}'.format(k))
for k in water_rev_loops:
if water_rev_loops[k]:
n, x = np.histogram(water_rev_loops[k], density=False, bins='auto')
n = np.asarray(n, np.float)
n[n <= 0.0001] = np.nan
plt.plot(x[:-1], n, markers[k], linestyle='None', label='with water $k_f={}$ $k_r=0.001$ (hydrolysis)'.format(k))
plt.legend()
plt.xlabel('loop size (monomers)')
plt.ylabel('num. of structures')
plt.savefig('hist_loops.pdf', dpi=200, tight_layout=True) | _____no_output_____ | Unlicense | paper/analysis/Loops.ipynb | jkrajniak/paper-cg-md-simulations-of-polymerization-with-forward-and-backward-reactions |
Transformations & Transformation Tuning Parametersdefine the transformations we want to do, some transformations will have parameters (e.g. base of log tranform (or no transform), type of scaling, whether or not to add column combinations (e.g. age * hours-per-week) Below is the pipeline for captail-gain/lost. We want to tune whether or not we should log transform. We need to do this after imputing but before scaling, so it needs to be it's own pipeline. | cap_gain_loss_pipeline = Pipeline([
('selector', DataFrameSelector(attribute_names=['capital-gain', 'capital-loss'])),
('imputer', Imputer()),
# tune Log trasformation base (or no transformation); update: tuned - chose base e
('custom_transform', CustomLogTransform(base=math.e)),
# tune "net gain" (have to do it after log transform; log of <=0 doesn't exist)
('custom_cap_gain_minus_loss', CombineCapitalGainLossTransform(combine=True)),
# tune MinMax vs StandardScaler; we chose MinMax; update: tuned - chose MinMax
('custom_scaler', ChooserTransform(base_transformer=MinMaxScaler())),
]) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Below is the pipeline for the rest of numeric features: | num_pipeline = Pipeline([
('selector', DataFrameSelector(attribute_names=['age', 'education-num', 'hours-per-week'])),
('imputer', Imputer()),
# tune age * hours-per-week; update: tuned -chose not to include
#('combine_agehours', CombineAgeHoursTransform()),
# tune MinMax vs StandardScaler; update: tuned - chose MinMax
('custom_scaler', ChooserTransform(base_transformer=MinMaxScaler())),
]) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Pipeline that simply gets the categorical/encoded columns from the previous transformation (which used `oo-learning`) | append_categoricals = Pipeline([
('append_cats', DataFrameSelector(attribute_names=one_hot_transformer.encoded_columns)) # already encoded
]) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Below is the pipeline for combining all of the other pipelines | # combine pipelines
transformations_pipeline = FeatureUnion(transformer_list=[
("cap_gain_loss_pipeline", cap_gain_loss_pipeline),
("num_pipeline", num_pipeline),
("cat_pipeline", append_categoricals),
]) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Choose the transformations to tune, below: Below calculates the a standard value for `scale_pos_weight` based on the recommendation from http://xgboost.readthedocs.io/en/latest/parameter.html> Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases) | model = LogisticRegression(random_state=42,
#penalty='l2',
C=1.0,
)
full_pipeline = Pipeline([
('preparation', transformations_pipeline),
#('pca_chooser', ChooserTransform()), # PCA option lost; didn't include
#('feature_selection', TopFeatureSelector(feature_importances, k)),
('model', model)
]) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Tuning strategy according to https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/ | from scipy.stats import randint, uniform, expon
model_param_dict = {
# 1st
'model__penalty': ['l1', 'l2'],
'model__C': uniform(0.001, 100),
}
# actual hyper-parameters/options to tune for transformations.
transformation_parameters = {
#'preparation__num_pipeline__imputer__strategy': ['mean', 'median', 'most_frequent'], # tune strategy
#'pca_chooser__base_transformer': [PCA(n_components=0.95, random_state=42), None], # PCA vs not
#'preparation__cap_gain_loss_pipeline__custom_transform__base': [None, math.e], # Log transform (base e) or not
#'preparation__cap_gain_loss_pipeline__custom_scaler__base_transformer': [MinMaxScaler(), StandardScaler()],
#'preparation__num_pipeline__custom_scaler__base_transformer': [MinMaxScaler(), StandardScaler()],
#'preparation__num_pipeline__combine_agehours__combine': [True, False],
#'preparation__cap_gain_loss_pipeline__custom_cap_gain_minus_loss__combine': [True, False]
}
param_grid = {**transformation_parameters, **model_param_dict}
param_grid
# def binary_roc_auc(y_true, y_score):
# return roc_auc_score(y_true=y_true,
# # binary makes it so it converts the "scores" to predictions
# y_score=[1 if x > 0.5 else 0 for x in y_score])
scorer = make_scorer(roc_auc_score, greater_is_better=True)
y = transformed_data[target_variable].apply(lambda x: 1 if x == positive_class else 0)
transformed_data[target_variable].values[0:10]
y[0:10]
print('Starting....')
time_start = time.time()
from sklearn.model_selection import RandomizedSearchCV
grid_search = RandomizedSearchCV(estimator=full_pipeline,
param_distributions=param_grid,
n_iter=50,
cv=RepeatedKFold(n_splits=5, n_repeats=2),
scoring=scorer,
return_train_score=True,
n_jobs=-1,
verbose=2)
grid_search.fit(transformed_data.drop(columns=target_variable), y)
time_end = time.time()
print('Time: {}m'.format(round((time_end-time_start)/60, 1)))
results_df = pd.concat([pd.DataFrame({'mean_score': grid_search.cv_results_["mean_test_score"],
'st_dev_score': grid_search.cv_results_["std_test_score"]}),
pd.DataFrame(grid_search.cv_results_["params"])],
axis=1)
results_df.sort_values(by=['mean_score'], ascending=False).head(10)
grid_search.best_score_, grid_search.best_params_
rescaled_means = MinMaxScaler(feature_range=(100, 1000)).fit_transform(results_df['mean_score'].values.reshape(-1, 1))
rescaled_means = rescaled_means.flatten() # reshape back to array
#rescaled_means
def compare_two_parameters(x_label, y_label):
x = results_df[x_label]
y = results_df[y_label]
plt.scatter(x,y,c=rescaled_means, s=rescaled_means, alpha=0.5)
plt.xlabel(x_label)
plt.ylabel(y_label)
index_of_best = np.argmax(rescaled_means)
plt.scatter(x[index_of_best], y[index_of_best], marker= 'x', s=200, color='red')
x_label = 'model__C'
y_label = 'model__penalty'
compare_two_parameters(x_label, y_label) | _____no_output_____ | MIT | data_scientist_nanodegree/projects/p1_charityml/custom/Logistic_RandomizedSearch_sklearn.ipynb | shane-kercheval/udacity |
Descarga de excels | from datetime import datetime,timedelta
Inicio_programa=datetime.now() | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Importo los paquetes | import os
import tabula
import re
from openpyxl import Workbook,load_workbook
import pandas as pd
import shutil
from natsort import natsorted
path_local="/home/rodrigo/Scrapper_Sueldos_Municipales" | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Verificación de carpetas | path_folder_pdf=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/pdf"
lista_folder_pdf=natsorted(os.listdir(path_folder_pdf))
for i in lista_folder_pdf:
if re.search("Sueldos",i) is None:
lista_folder_pdf.remove(i)
lista_folder_pdf
index_carpeta_a_modificar=-1
folder_pdf_a_convertir=lista_folder_pdf[index_carpeta_a_modificar] | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Crea la carpeta donde se van a guardar los excel en crudo | path_folder_pdf_a_convertir=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/pdf/"+folder_pdf_a_convertir
Lista_pdf_nuevos=natsorted(os.listdir(path_folder_pdf_a_convertir))
Lista_nombres_secretarias=[re.sub("\.pdf","",i) for i in Lista_pdf_nuevos]
try :
os.mkdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza")
for secretaria in Lista_nombres_secretarias:
os.mkdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza"+"/"+secretaria)
except :
pass
for ubicacion_secretaria,nombre_secretaria in enumerate(Lista_pdf_nuevos):
variable = True
n=0
df = tabula.read_pdf(path_folder_pdf_a_convertir+"/"+nombre_secretaria, pages = "all")
while (variable == True):
try :
df[n].to_excel(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza"+"/"+Lista_nombres_secretarias[ubicacion_secretaria]+"/"+Lista_nombres_secretarias[ubicacion_secretaria]+"_imperfecto_"+str(n)+".xlsx")
n+=1
except :
variable=False
carpeta_especifica=re.sub("_Sueldos","",folder_pdf_a_convertir)
carpeta_especifica | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Acomodo de excel sucios | # Estos 2 nombres no me gustan
path_carpetas_excels_sucios=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza"
carpetas_excels_sucios=natsorted(os.listdir(path_carpetas_excels_sucios))
for carpeta_secretaria in carpetas_excels_sucios:
path_excels_sin_modificar= path_carpetas_excels_sucios+"/"+ carpeta_secretaria
archivos_pdf=natsorted(os.listdir(path_excels_sin_modificar))
n=0
for nombre_archivo_pdf in archivos_pdf:
wb = load_workbook(filename=path_excels_sin_modificar+"/"+nombre_archivo_pdf)
sheet= wb.active
sheet.delete_cols(1)
celda_1=sheet.cell(row=1, column=1).value
celda_2=sheet.cell(row=1, column=2).value
if re.search("[a-z]",celda_1) is not None :
os.remove(path_excels_sin_modificar+"/"+nombre_archivo_pdf)
n+=1
elif re.search("[0-9]",celda_1) is not None and re.search("Unnamed: 0",celda_2) is not None:
inicio=re.search("[0-9]",celda_1).span()[0]
nombre=celda_1[:inicio]
numero=celda_1[inicio:]
sheet["A1"] = nombre
sheet["B1"] = numero
sheet.insert_rows(1)
sheet["A1"],sheet["B1"],sheet["C1"],sheet["D1"],sheet["E1"],sheet["F1"]="apellido_y_nombre","sueldo_bruto","sueldo_neto","costo_laboral","planta","funcion"
wb.save(filename=path_excels_sin_modificar+"/"+nombre_archivo_pdf)
os.rename(path_excels_sin_modificar+"/"+nombre_archivo_pdf , path_excels_sin_modificar+"/"+carpeta_secretaria+"_perfecto_"+str(n)+".xlsx")
n+=1
else :
sheet.insert_rows(1)
sheet["A1"],sheet["B1"],sheet["C1"],sheet["D1"],sheet["E1"],sheet["F1"]="apellido_y_nombre","sueldo_bruto","sueldo_neto","costo_laboral","planta","funcion"
wb.save(filename=path_excels_sin_modificar+"/"+nombre_archivo_pdf)
os.rename(path_excels_sin_modificar+"/"+nombre_archivo_pdf,path_excels_sin_modificar+"/"+carpeta_secretaria+"_perfecto_"+str(n)+".xlsx")
n+=1 | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Crea la carpeta donde se almacenan los excel | path_carpeta_sueldos_nuevos=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"
try:
os.mkdir(path_carpeta_sueldos_nuevos)
except:
pass
nombres_secciones_secretarias=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/secciones"))
#print(nombres_secciones_secretarias)
for secretaria in nombres_secciones_secretarias:
try:
secretaria_sin_extension=re.sub(".txt","",secretaria)
os.mkdir(os.path.join(path_carpeta_sueldos_nuevos,secretaria_sin_extension))
except:
pass
path_secretarias=os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/secciones",secretaria)
with open(path_secretarias,"r", encoding='utf8') as f:
secciones=f.readlines()
for seccion in secciones:
try:
seccion_sin_extension=re.sub(".txt","",seccion)
os.mkdir(os.path.join(os.path.join(path_carpeta_sueldos_nuevos,secretaria_sin_extension),seccion_sin_extension))
except Exception as e:
print(e) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Re-acomodo los excel en cada subcarpeta | lista_secretarias_sin_excepciones=["salario_ambiente",
"salario_control",
"salario_deporte_turismo",
"salario_hacienda",
"salario_ilar",
"salario_modernizacion",
"salario_movilidad",
"salario_obras",
"salario_salud",
"salario_spv"]
for secretaria in lista_secretarias_sin_excepciones:
path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/"+secretaria
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/"+secretaria
lista_secretaria=natsorted(os.listdir(path_secretaria))
parametro_de_referencia=0
numero_archivo=0
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1])) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Manejo de excepciones Salario Cultura | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_cultura"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_cultura"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_cultura_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_cultura_perfecto_0.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(1,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=1
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
#try:
# lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
# for archivo in lista_ultimos_archivos:
# shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
#except:
# pass
# Para el caso 2021_01
#shutil.move(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba/salario_cultura_perfecto_92.xlsx",os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_cultura/40_TEATRO_LA_COMEDIA","salario_cultura_perfecto_92.xlsx"))
#try:
# lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
# for archivo in lista_ultimos_archivos:
# shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-2]))
#except:
# pass
# Para el caso 2021_04
shutil.move(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba/salario_cultura_perfecto_95.xlsx",os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_cultura/"+lista_secretaria[-1],"salario_cultura_perfecto_95.xlsx"))
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-2]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario Desarrollo Economico | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_desarrollo_economico"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_desarrollo_economico"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_desarrollo_economico_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_desarrollo_economico_perfecto_0.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_desarrollo_economico_perfecto_1.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),"salario_desarrollo_economico_perfecto_1.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(2,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=1
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario desarrollo humano | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_desarrollo_humano"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_desarrollo_humano"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_desarrollo_humano_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_desarrollo_humano_perfecto_0.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_desarrollo_humano_perfecto_1.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),"salario_desarrollo_humano_perfecto_1.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(2,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=2
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario Genero | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_genero_ddhh"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_genero_ddhh"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_genero_ddhh_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_genero_ddhh_perfecto_0.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(1,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=1
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario Gobierno | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_gobierno"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_gobierno"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_gobierno_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_gobierno_perfecto_0.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_gobierno_perfecto_1.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),"salario_gobierno_perfecto_1.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(2,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=2
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario intendencia | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_intendencia"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_intendencia"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_intendencia_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_intendencia_perfecto_0.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_intendencia_perfecto_1.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),"salario_intendencia_perfecto_1.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(3,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=2
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Salario Planeamiento | path_limpieza_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza/salario_planeamiento"
path_secretaria=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos/salario_planeamiento"
lista_secretaria=natsorted(os.listdir(path_secretaria))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_planeamiento_perfecto_0.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[0]),"salario_planeamiento_perfecto_0.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_planeamiento_perfecto_1.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[1]),"salario_planeamiento_perfecto_1.xlsx"))
shutil.move(os.path.join(path_limpieza_secretaria,"salario_planeamiento_perfecto_2.xlsx"),os.path.join(os.path.join(path_secretaria,lista_secretaria[2]),"salario_planeamiento_perfecto_2.xlsx"))
nombre_ultimo_archivo_secretaria=re.sub(".xlsx","",natsorted(os.listdir(path_limpieza_secretaria))[-1])
lista_valores=[*range(4,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
# Para el caso 2020_09
#lista_valores=[*range(3,int(re.findall(r'\d+',nombre_ultimo_archivo_secretaria)[0])+1)]
parametro_de_referencia=0
numero_archivo=3
for archivo_para_localizar in natsorted(os.listdir(path_limpieza_secretaria)):
archivo_para_extraer_numero=re.sub(".xlsx","",archivo_para_localizar)
numero=re.search("[0-999]",archivo_para_extraer_numero)
inicio_del_numero=numero.span()[0]
numero_extraido=int(archivo_para_extraer_numero[inicio_del_numero:])
if numero_extraido == lista_valores[parametro_de_referencia]:
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
else:
for k in natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba")):
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",k),os.path.join(path_secretaria,lista_secretaria[numero_archivo]))
numero_archivo+=1
#print("Aca deberia ir una funcion que mande todos los excel recogidos dentro de la carpeta\n")
shutil.move(os.path.join(path_limpieza_secretaria,archivo_para_localizar),os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo_para_localizar))
parametro_de_referencia+=1
parametro_de_referencia+=1
try:
lista_ultimos_archivos=natsorted(os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"))
for archivo in lista_ultimos_archivos:
shutil.move(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba",archivo),os.path.join(path_secretaria,lista_secretaria[-1]))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Elimina la carpeta "Excels_proceso_limpieza" | shutil.rmtree(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/Excels_proceso_limpieza") | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Creo los archivos finales | path_folder_carpeta=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"
lista_secretarias=natsorted(os.listdir(path_folder_carpeta))
path_carpeta_prueba=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/carpeta_prueba"
for secretaria in lista_secretarias:
path_folder_secretaria=os.path.join(path_folder_carpeta,secretaria)
lista_secciones_secretaria=natsorted(os.listdir(path_folder_secretaria))
for seccion in lista_secciones_secretaria:
path_folder_secciones=os.path.join(path_folder_secretaria,seccion)
lista_archivos_secciones=natsorted(os.listdir(path_folder_secciones))
path_primer_archivo_seccion=os.path.join(path_folder_secciones, lista_archivos_secciones[0])
path_archivo_final=path_folder_secciones+".xlsx"
if len(lista_archivos_secciones) == 1:
shutil.move(path_primer_archivo_seccion,path_archivo_final)
else:
shutil.move(path_primer_archivo_seccion,path_carpeta_prueba)
for archivo in lista_archivos_secciones[1:]:
shutil.move(os.path.join(path_folder_secciones,archivo),path_carpeta_prueba)
lista_archivos_carpeta_prueba=natsorted(os.listdir(path_carpeta_prueba))
path_df_1=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[0])
path_df_2=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[1])
df_1=pd.read_excel(path_df_1)
df_2=pd.read_excel(path_df_2)
df_final=pd.concat([df_1,df_2],ignore_index=True)
os.remove(path_df_1)
os.remove(path_df_2)
df_final.to_excel(path_df_1,index=False,header=True)
path_ubicacion_archivo_final=os.path.join(path_carpeta_prueba,lista_archivos_carpeta_prueba[0])
shutil.move(path_ubicacion_archivo_final,path_archivo_final) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Elimina las carpetas | for secretarias in os.listdir(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"):
for archivos_secretaria in os.listdir(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos",secretarias)):
try:
os.rmdir(os.path.join(os.path.join(path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos",secretarias),archivos_secretaria))
except:
pass | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Reviso la columna 1 de todos los excel y muevo los numeros a la segunda columna | path=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"
lista_secretarias=natsorted(os.listdir(path))
for secretaria in lista_secretarias:
path_secretaria=os.path.join(path,secretaria)
lista_archivos=natsorted(os.listdir(path_secretaria))
for archivo in lista_archivos:
path_archivo=os.path.join(path_secretaria,archivo)
df=pd.read_excel(path_archivo)
final=df.shape[0]
workbook= load_workbook(filename=path_archivo)
sheet= workbook.active
for fila in range(1,final+2):
celda=sheet.cell(row=fila, column=1).value
try:
inicio_numero=re.search("[0-9]",celda).span()[0]
nombre=celda[:inicio_numero]
numero=celda[inicio_numero:]
sheet["A"+str(k)] = nombre
sheet["B"+str(k)] = numero
except:
pass
workbook.save(filename=path_archivo) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Reemplazo las comas por puntos | path=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"
for secretaria in natsorted(os.listdir(path)):
lista_secretaria=natsorted(os.listdir(os.path.join(path,secretaria)))
for archivo in lista_secretaria:
path_archivo=os.path.join(os.path.join(path,secretaria),archivo)
df=pd.read_excel(path_archivo)
final=df.shape[0]
workbook= load_workbook(filename=path_archivo)
sheet= workbook.active
for fila in range(1,final+2):
celda_1=sheet.cell(row=fila, column=2).value
celda_2=sheet.cell(row=fila, column=3).value
celda_3=sheet.cell(row=fila, column=4).value
try:
sheet["B"+str(k)]=re.sub(",",".",celda_1)
sheet["C"+str(k)]=re.sub(",",".",celda_2)
sheet["D"+str(k)]=re.sub(",",".",celda_3)
except:
pass
workbook.save(filename=path_archivo) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Esto revisa unos numeros extras que salen random luego de los 2 decimales | path=path_local+"/Gasto_Publico_Argentino_files/Salarios_Rosario/XLSX/"+carpeta_especifica+"_Sueldos"
lista_secretarias=natsorted(os.listdir(path))
for columna in [2,3,4]:
for secretaria in lista_secretarias:
lista_archivos=natsorted(os.listdir(os.path.join(path,secretaria)))
for archivo in lista_archivos:
path_archivo=os.path.join(os.path.join(path,secretaria),archivo)
df=pd.read_excel(path_archivo)
final=df.shape[0]
workbook= load_workbook(filename=path_archivo)
sheet= workbook.active
for fila in range(1,final+2):
celda=str(sheet.cell(row=fila, column=columna).value)
if len(re.findall("\.",celda))>1:
final=re.search("\.",celda).span()[1]
sheet["B"+str(fila)]=celda[:final+2]
else:
pass
workbook.save(filename=path_archivo) | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Mido cuanto tardo el programa (aproximadamente) | Finalizacion_programa=datetime.now()
tiempo=Finalizacion_programa-Inicio_programa
tiempo_medido=str(timedelta(seconds=tiempo.seconds))[2:]
tiempo_medido | _____no_output_____ | MIT | Fase_2.ipynb | rodrigotesone1997/Limpieza_Datos_Municipales |
Lasso and Elastic Net for Sparse SignalsEstimates Lasso and Elastic-Net regression models on a manually generatedsparse signal corrupted with an additive noise. Estimated coefficients arecompared with the ground-truth. | print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# #############################################################################
# Generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 100
X = np.random.randn(n_samples, n_features)
# Decreasing coef w. alternated signs for visualization
idx = np.arange(n_features)
coef = (-1) ** idx * np.exp(-idx / 10)
coef[10:] = 0 # sparsify coef
y = np.dot(X, coef)
# Add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
# #############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
# #############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
m, s, _ = plt.stem(np.where(enet.coef_)[0], enet.coef_[enet.coef_ != 0],
markerfmt='x', label='Elastic net coefficients',
use_line_collection=True)
plt.setp([m, s], color="#2ca02c")
m, s, _ = plt.stem(np.where(lasso.coef_)[0], lasso.coef_[lasso.coef_ != 0],
markerfmt='x', label='Lasso coefficients',
use_line_collection=True)
plt.setp([m, s], color='#ff7f0e')
plt.stem(np.where(coef)[0], coef[coef != 0], label='true coefficients',
markerfmt='bx', use_line_collection=True)
plt.legend(loc='best')
plt.title("Lasso $R^2$: %.3f, Elastic Net $R^2$: %.3f"
% (r2_score_lasso, r2_score_enet))
plt.show() | _____no_output_____ | MIT | sklearn/sklearn learning/demonstration/auto_examples_jupyter/linear_model/plot_lasso_and_elasticnet.ipynb | wangyendt/deeplearning_models |
Campus RecruitmentIn this notebook we will try to answer these questions.* Which factor influenced a candidate in getting placed?* Does percentage matters for one to get placed?* Which degree specialization is much demanded by corporate?* Play with the data conducting all statistical tests.At the end we'll use DecisionTreeClassifier to predict if a student will be placed or not depending on the given data. Data Import & Information | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("../input/factors-affecting-campus-placement/Placement_Data_Full_Class.csv")
df.head()
df.describe()
df.info() | <class 'pandas.core.frame.DataFrame'>
RangeIndex: 215 entries, 0 to 214
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sl_no 215 non-null int64
1 gender 215 non-null object
2 ssc_p 215 non-null float64
3 ssc_b 215 non-null object
4 hsc_p 215 non-null float64
5 hsc_b 215 non-null object
6 hsc_s 215 non-null object
7 degree_p 215 non-null float64
8 degree_t 215 non-null object
9 workex 215 non-null object
10 etest_p 215 non-null float64
11 specialisation 215 non-null object
12 mba_p 215 non-null float64
13 status 215 non-null object
14 salary 148 non-null float64
dtypes: float64(6), int64(1), object(8)
memory usage: 25.3+ KB
| Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
We can observe some Null rows for the columns salary | df.isna().any()
df['salary'].mean()
df.groupby('degree_t')['salary'].mean() | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Student in Science and Tech get more money than others | df.groupby('gender')['salary'].mean() | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Male earn more money than female Plot data | sns.barplot(x='gender',y='salary',data=df,palette="Blues_d")
sns.barplot(x='degree_t',y='salary',data=df,palette="Blues_d")
sns.countplot(x='gender',data=df,palette="Blues_d")
sns.barplot(x="workex",y="salary",data=df,palette="Blues_d")
sns.barplot(x="status",y="degree_p",data=df,palette="Blues_d") | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
placed person arehigher degree than not_placed | sns.barplot(x="workex",y="degree_p",data=df,palette="Blues_d")
sns.barplot(x="gender",y="degree_p",data=df,palette="Blues_d") | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Female have better degree than male | sns.barplot(x="ssc_b",y="salary",data=df,palette="Blues_d")
sns.barplot(x="specialisation",y="salary",data=df,palette="Blues_d") | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Global plot | sns.pairplot(data=df,palette="Blues_d") | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
We Can see the correlation (linearity) between different columns | df.groupby('gender')['status'].value_counts() | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Male are more recruited than female however we saw above that female got better mark than male | # Check the correlation
sns.heatmap(df.corr(),cmap="Blues") | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
if we look at the salary we can say that the degree nor the gender of anything have a real impact to the salary of the student | sns.kdeplot(df['salary'])
# we get only categorical data
cols = df.columns
num_cols = df._get_numeric_data().columns
num_cols
categorical_col = list(set(cols) - set(num_cols))
for i in categorical_col:
plt.figure()
sns.stripplot(x=i, y="salary",hue='gender',data=df, palette="Set1", dodge=True)
df.groupby('degree_t').count()
df.groupby('degree_t')['status'].value_counts() | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Student in Commerce are more placed than Prediction a DecisionTreeClassifier | from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
df.columns
labelEncode = LabelEncoder()
for i in categorical_col:
df[i] = labelEncode.fit_transform(df[i])
df
# people not placed don't have a salary so we fill it with the mean
df = df.fillna(df.mean())
df.isna().sum()
X = df.drop(['sl_no','status'],axis=1)
y = df['status']
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)
len(x_test),len(y_test) | _____no_output_____ | Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Build DecisionTreeClassifier | decisionTree = DecisionTreeClassifier()
decisionTree.fit(x_train,y_train)
decisionTree.score(x_test,y_test)
y_pred = decisionTree.predict(x_test)
y_pred
len(y_test)
y_test.head()
y_test = y_test.tolist()
number_error = 0
for i in range(len(y_test)):
if y_test[i] != y_pred[i]:
number_error += 1
number_error
print('confusion matrix: \n',confusion_matrix(y_pred,y_test),'\n') | confusion matrix:
[[12 1]
[ 0 30]]
| Apache-2.0 | campus-recruitment-97-score.ipynb | theyazilimci/KaggleProject |
Distributions | from empiricaldist import Pmf | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
Probability Mass Functions PMF- for dicrete outcomes (ex: head or tails)- maps each possible outcome to it's probability | # Representing the outcome of a coin toss
coin = Pmf()
coin['heads'] = 1/2
coin['tails'] = 1/2
coin
# Create a probability mass function for a series of die outcomes
die = Pmf.from_seq([1, 2, 3, 4, 5, 6])
die | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
All outcomes in the sequence appear once, so they all have the same probability, 1/6 | letters = Pmf.from_seq(list('Mississippi'))
letters | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
'M' appears once, so probability = 1/11 = 0.0909'i' appears 4 times, so probability = 4/11 = 0.3636and so on... | letters['s']
# Avoid KeyError
try:
letters['t']
except KeyError as e:
print("Please choose a letter contained in the Pmf") | Please choose a letter contained in the Pmf
| MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
You can also call a Pmf as if it were a function | letters('s') | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
Calling a quantity that does not exists in the distribution will yield a 0, not an error | letters('t') | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
Can also call some of the elements in the distribution | die([1,4,6]) | _____no_output_____ | MIT | notebooks/chap03me.ipynb | mdominguez2010/ThinkBayes2 |
Step 0: This analysis research question will require several different datasets: 1. The `RAW_us_confirmed_cases.csv` file from the [Kaggle repository](https://www.kaggle.com/antgoldbloom/covid19-data-from-john-hopkins-university?select=RAW_us_confirmed_cases.csv) of John Hopkins University COVID-19 data.2. The [CDC dataset](https://data.cdc.gov/Policy-Surveillance/U-S-State-and-Territorial-Public-Mask-Mandates-Fro/62d6-pm5i) of masking mandates by county.3. The New York Times [mask compliance survey data](https://github.com/nytimes/covid-19-data/tree/master/mask-use). The majority of this data is by US County by Day. The mask compliance is a single shot estimator that gives you a compliance estimate for every County in the US. You should carefully review the data descriptions that accompany these datasets. They each have some interesting caveats. As well, some of them are explicit with regard to the way you should interpret missing data. Lastly, you have been assigned a specific US County for analysis. You are NOT analyzing the entire dataset. You have been assigned one US County that forms the basis for your individual analysis. You can find your individual US County assignment from this Google spreadsheet. Setup- specify county of interestAssigned Montgomery County in Maryland | # setup- county of interest
# Montgomery, MD
state = 'Maryland'
st = 'MD'
county = 'Montgomery' | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Get Cases data | # Unzip cases file
os.chdir('data_raw')
with ZipFile('RAW_us_confirmed_cases.csv.zip') as zipfiles:
zipfiles.extractall()
os.chdir('..')
os.getcwd()
# load cases data
raw = pd.read_csv('data_raw/RAW_us_confirmed_cases.csv')
raw.columns | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Get Mask policy data | # get mask policy data
'https://data.cdc.gov/resource/62d6-pm5i.json?state_tribe_territory=TX&county'
base_url = 'https://data.cdc.gov/resource/62d6-pm5i.json?state_tribe_territory={st}&county_name={county}'
params = {
'st' : st,
'county' : county + ' County'
}
mask_url = base_url.format(**params)
masks_json = requests.get(mask_url).json()
masks_df = pd.DataFrame.from_dict(masks_json)
masks_df.shape | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Get mask compliance data | # get mask compliance data
compliance_url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/mask-use/mask-use-by-county.csv'
compliance_df = pd.read_csv(compliance_url)
compliance_df.head() | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Filter based on given county | # filter
cases_filter = raw.loc[(raw.Province_State==state) & (raw.Admin2==county)].copy().reset_index(drop=True)
fips_county = int('10' + masks_df.fips_county[0])
compliance_filter = compliance_df.loc[compliance_df.COUNTYFP==fips_county].copy().reset_index(drop=True) | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Step 1: Analyze The common question that you are to answer is:- How did masking policies change the progression of confirmed COVID-19 cases from February 1, 2020 through October 15, 2021? Answering this question can be a little tricky - and it will be useful for you all (whole class) to discuss this on Slack. We will also spend some time in class on this discussion. Some of the issues that you probably should consider when conducting your analysis include:1. What needs to be cleaned and standardized over the three datasets?2. There is a delay between the time of infection and the time a case is confirmed. Many factors may contribute to such delay. People may not show symptoms right away after infection. It may take a few days for the testing results to become available especially during the early period of the pandemic. Should we model the delay?3. Masking may simply make it longer to get infected or it may prevent some percentage of infection. How should we consider the effect of a mask?4. The research question is about how a time series changes. The infection time series is a set of slopes. Therefore the question is about a derivative function. That is, you want to answer a question about the change in slope over time. How can we test the difference in the derivative function?5. Masking survey data shows probability of compliance in several categories. How can we model different proportions for population compliance?6. Masking policies varied in their implementation (e.g., size of “crowd” required, different situations, restaurants, bars, clubs ...). How should I handle things when my County implemented two different policies at different times?7. The County I was assigned did not implement a masking policy! What is a reasonable way to answer this question? That is, how might I model “voluntary” masking?8. Vaccinations probably impacted the apparent effectiveness of masks. How should we account for different vaccination rates in different populations within the same County? We note that we did not enumerate all potential issues that you may want to discuss. Further, there are some better and worse ways to handle these questions. We are not looking for the one right answer. We are looking for reasonable solutions. There are aspects of this problem that are very hard to model - and so you will probably want to make simplifying assumptions. For example, you might decide to ignore the impacts of vaccinations or consider pre-vaccine availability as one time series and post-vaccine availability as a totally different time series. Define additional datapoints that are initially given | # additional data
pop = 1062061 # given
sq_mi = 491.25 # given
sq_km = 1272.34 # given | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Cases dataset | # cases have dates in columns- transpose to have each entry as rows
# also convert dates to datetime
cases = cases_filter[cases_filter.columns[11:]].T.reset_index().rename(columns={'index':'date', 0:'cases'})
cases.date = pd.to_datetime(cases.date)
#quick visualization
cases.plot(x='date', y='cases')
# cases per population
cases['pct_pop'] = cases.cases/pop
# same, just different units
cases.plot(x='date', y='pct_pop') | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Masks dataset | # look at data types
for col in masks_df.columns:
print('Data type for column {} is {}'.format(col, type(masks_df[col][0])))
# convert data types
# dates to datetime
# order_code to int
masks_df.date = pd.to_datetime(masks_df.date)
masks_df.order_code = masks_df.order_code.astype(int)
# quick visualization
masks_df.plot(x='date', y='order_code') | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Mask compliance dataset | # also setup as columns --> transpose to show each entry as rows
# also show as magnitude
mask_comp = compliance_filter.drop(columns=['COUNTYFP']).T.reset_index().rename(columns={'index':'response',0:'pct_pop'})
mask_comp['pop'] = mask_comp.pct_pop * pop
mask_comp | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Merging datasets | # helper function
def case_per_pop(df, subset):
"""
Helper function to join mask compliance population % with cases dataset
df: pd.DataFrame
Cases dataframe
subset: str
String of mask compliance category
Returns: new pd.DataFrame with columns added for population given mask compliance response
"""
subset_pop = mask_comp.loc[mask_comp.response==subset]['pop'].values[0]
df[subset.lower()] = subset_pop
return df
# apply helper function- populate columns with mask compliance population
cases_mask = cases.copy()
for resp in mask_comp.response:
cases_mask = case_per_pop(cases_mask, resp)
# additional merge- merge case + compliance with mask order code
cases_merge = cases_mask.merge(masks_df[['order_code', 'date']], on='date') | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Getting daily cases | # get daily cases- daily difference between total cases
cases_merge['daily_cases'] = cases_merge.cases - cases_merge.cases.shift(1)
# quick visualization
cases_merge.plot(x='date', y='daily_cases') | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Get rolling average | # Helper function
def get_rolling_avg(df, days):
col_name = 'rolling_avg_' + str(days)
df[col_name] = df.daily_cases.rolling(days).mean()
return df
# get rolling average- 7 day
cases_merge = get_rolling_avg(cases_merge, 7)
# get rolling average- 14 day
cases_merge = get_rolling_avg(cases_merge, 14)
# quick visualization - 7 day
cases_merge.plot(x='date', y='rolling_avg_7')
# quick visualization - 14 day
cases_merge.plot(x='date', y='rolling_avg_14') | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Getting change in daily cases | # helper function
def get_pct_chg(df, days):
col_name = 'pct_chg_' + str(days) + 'D_avg'
pct_col = 'rolling_avg_' + str(days)
df[col_name] = df[pct_col].pct_change(1)
return df
# get pct change- 7 days RA
cases_merge = get_pct_chg(cases_merge, 7)
# get pct change- 14 days RA
cases_merge = get_pct_chg(cases_merge, 14)
# get pct change- daily
cases_merge['pct_chg_daily'] = cases_merge.daily_cases.pct_change(1)
# quick visualization 7D pct chg
cases_merge.plot(x='date', y='pct_chg_7D_avg')
# quick visualization 14D pct chg
cases_merge.plot(x='date', y='pct_chg_14D_avg')
# quick visualization daily pct chg
cases_merge.plot(x='date', y='pct_chg_daily')
# recap- what do we have so far
cases_merge.head() | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Save to file | # save cleaned dataset to file
cases_merge.to_csv('data_clean/cases_clean.csv', index=False) | _____no_output_____ | MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Step 2: Visualize In this step we want you to create a graph that visualizes how the course of the disease was changed by masking policies. For your county, you should create a time series showing the changes in the derivative function of the rate of infection. Your graph should indicate days where masking policies were in effect (or not) and whether the difference in the derivative function was significant. Optionally, you can add a second time series that shows the actual rate of infection. | # read from file
cases = pd.read_csv('data_clean/cases_clean.csv')
cases.head()
# helper function- get masked array
def get_ma(var, order):
return np.ma.masked_where(cases.order_code==order, cases[var])
# get variables
x = pd.to_datetime(cases.date)
# get masked arrays (for different masking orders)
daily_mask = get_ma('daily_cases', 2)
daily_no_mask = get_ma('daily_cases', 1)
total_mask = get_ma('cases', 2)
total_no_mask = get_ma('cases', 1)
roll7_avg_mask = get_ma('rolling_avg_7', 2)
roll7_avg_no_mask = get_ma('rolling_avg_7', 1)
roll14_avg_mask = get_ma('rolling_avg_14', 2)
roll14_avg_no_mask = get_ma('rolling_avg_14', 1)
fig = plt.figure(figsize=(20, 10), facecolor='white')
ax = fig.add_subplot()
ax.set_title('Daily COVID19 Cases in Montegomery County, MD \nFeb 1 2020 - Oct 15 2021')
# plot daily cases
ax.bar(x, daily_mask, alpha=0.8, linewidth=2, color='lightblue',
label='Daily Cases-Mask Mandate (LHS)')
ax.bar(x, daily_no_mask, alpha=0.5, color='silver', label='Daily Cases-No Mask Mandate (LHS)')
# plot rolling avg 7D
ax.plot(x, roll7_avg_mask, alpha=0.8, linewidth=2, color='crimson',
label='7D Rolling Avg of Cases-Mask Mandate (LHS)')
ax.plot(x, roll7_avg_no_mask, alpha=0.5, color='crimson', linestyle='dashed',
label='7D Rolling Avg of Cases-No Mask Mandate (LHS)')
# plot rolling avg 14D
ax.plot(x, roll14_avg_mask, alpha=0.8, linewidth=2, color='darkblue',
label='14D Rolling Avg of Cases-Mask Mandate (LHS)')
ax.plot(x, roll14_avg_no_mask, alpha=0.5, color='darkblue', linestyle='dashed',
label='14D Rolling Avg of Cases-No Mask Mandate (LHS)')
# set labels
ax.set_xlabel('Date')
ax.set_ylabel('Number of Positive Cases')
plt.legend(loc='upper left')
ax2 = ax.twinx()
ax2.plot(x, total_mask, c='darkgreen', alpha=0.6, linewidth=5, label='Total Cases-Mask Mandate (RHS)')
ax2.plot(x, total_no_mask, c='darkgreen', alpha=0.3, linewidth=3,
linestyle='dashed', label='Total Cases-No Mask Mandate (RHS)')
ax2.set_ylabel('Number of Positive Cases')
plt.legend(loc='upper right')
plt.savefig('visualization/plot.png', facecolor=fig.get_facecolor(), bbox_inches='tight') | C:\ProgramData\Anaconda3\lib\site-packages\numpy\lib\stride_tricks.py:256: UserWarning: Warning: converting a masked element to nan.
args = [np.array(_m, copy=False, subok=subok) for _m in args]
| MIT | A4-Common-Analysis.ipynb | emi90/data-512-a4 |
Gaussian bayes classifierIn this assignment we will use a Gaussian bayes classfier to classify our data points. Import packages | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from sklearn.metrics import classification_report
from matplotlib import cm | _____no_output_____ | MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Load training dataOur data has 2D feature $x1, x2$. Data from the two classes is are in $\texttt{class1_train}$ and $\texttt{class2_train}$ respectively. Each file has two columns corresponding to the 2D feature. | class1_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class1_train').to_numpy()
class2_train = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/class2_train').to_numpy()
class1_train[:10]
class1_train.shape
class2_train.shape | _____no_output_____ | MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Visualize training dataGenerate 2D scatter plot of the training data. Plot the points from class 1 in red and the points from class 2 in blue. | import seaborn as sns
classes = ['class-1','class-2']
for i in range(class1_train.shape[0]):
plt.scatter(class1_train[i][0],class1_train[i][1] ,c="red",alpha=0.6, edgecolors='none')
# plt.legend(loc='best', fontsize=16)
plt.xlabel('Growth %')
plt.ylabel('Population')
for j in range(class2_train.shape[0]):
plt.scatter(class1_train[j][0],class1_train[j][1] ,c="blue")
| /usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
| MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Maximum likelihood estimate of parametersWe will model the likelihood, $P(\mathbf{x}|C_1)$ and $P(\mathbf{x}|C_2)$ as $\mathcal{N}(\mathbf{\mu_1},\Sigma_1)$ and $\mathcal{N}(\mathbf{\mu_2},\Sigma_2)$ respectively. The prior probability of the classes are called, $P(C_1)=\pi_1$ and $P(C_2)=\pi_2$.The maximum likelihood estimate of the parameters as follows:\begin{align*}\pi_k &= \frac{\sum_{i=1}^N \mathbb{1}(t^i=k)}{N}\\\mathbf{\mu_k} &= \frac{\sum_{i=1}^N \mathbb{1}(t^i=k)\mathbf{x}^i}{\sum_{i=1}^N \mathbb{1}(t^i=k)}\\\Sigma_k &= \frac{\sum_{i=1}^N \mathbb{1}(t^i=k)(\mathbf{x}^i-\mathbf{\mu_k})(\mathbf{x}^i-\mathbf{\mu_k})^T}{\sum_{i=1}^N \mathbb{1}(t^i=k)}\\\end{align*}Here, $t^i$ is the target or class of $i^{th}$ sample. $\mathbb{1}(t^i=k)$ is 1 if $t^i=k$ and 0 otherwise.Compute maximum likelihood values estimates of $\pi_1$, $\mu_1$, $\Sigma_1$ and $\pi_2$, $\mu_2$, $\Sigma_2$ Also print these values $pi$ = `Prior` $mu$ and $sigma$ = `Likelihood` | def calculate_pi_1():
num = class1_train.shape[0]
deno = class1_train.shape[0] + class2_train.shape[0]
return num/deno
def calculate_pi_2():
num = class2_train.shape[0]
deno = class1_train.shape[0] + class2_train.shape[0]
return num/deno
def calculate_mu_1():
return class1_train.mean(axis=0)
def calculate_mu_2():
return class2_train.mean(axis=0)
def calculate_cov_1():
x = class1_train
print(x.shape)
mu = x.mean(axis=0)
x_norm = x-mu
x_transpose = x_norm.transpose()
return np.cov(x_transpose)
def calculate_cov_2():
x = class2_train
print(x.shape)
mu = x.mean(axis=0)
x_norm = x-mu
x_transpose = x_norm.transpose()
return np.cov(x_transpose)
print( 'pi_1 : {} and pi_2 : {}'.format(calculate_pi_1(),calculate_pi_2()))
print( 'mu_1 : {} and mu_2 : {}'.format(calculate_mu_1(),calculate_mu_2()))
print( 'sigma_1 : \n{} \n sigma_2 : \n{}'.format(calculate_cov_1(),calculate_cov_2()))
## Another way to get Pi , mu and sigma
pi1 = len(class1_train)/(len(class1_train)+len(class2_train))
pi2 = len(class2_train)/(len(class1_train)+len(class2_train))
mu1 = class1_train.mean(axis=0)
mu2 = class2_train.mean(axis=0)
sig1 = np.cov(class1_train,rowvar=False)
sig2 = np.cov(class2_train,rowvar=False)
print("Pi-1 {} and Pi-2 {}".format(pi1,pi2))
print("mu-1 {} and mu-2 {}".format(mu1,mu2))
print("sig-1 {} and sig-2 {}".format(sig1,sig2))
| Pi-1 0.8040201005025126 and Pi-2 0.19597989949748743
mu-1 [0.96998989 1.02894917] and mu-2 [-1.02482819 -0.91492055]
sig-1 [[0.96127884 0.07824879]
[0.07824879 0.82105102]] and sig-2 [[1.1978678 0.48182629]
[0.48182629 0.93767199]]
| MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Visualize the likelihoodNow that you have the parameters, let us visualize how the likelihood looks like.1. Use $\texttt{np.mgrid}$ to generate points uniformly spaced in -5 to 5 along 2 axes1. Use $\texttt{multivariate_normal.pdf}$ to get compute the Gaussian likelihood for each class 1. Use $\texttt{plot_surface}$ to plot the likelihood of each class.1. Use $\texttt{contourf}$ to plot the likelihood of each class. You may find the code in the lecture notebook helpful. For the plots, use $\texttt{cmap=cm.Reds}$ for class 1 and $\texttt{cmap=cm.Blues}$ for class 2. Use $\texttt{alpha=0.5}$ to overlay both plots together. | from matplotlib import cm
x,y = np.mgrid[-5:5:.01, -5:5:.01]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
mu1 = calculate_mu_1()
mu2 = calculate_mu_2()
cov1 = calculate_cov_1()
cov2 = calculate_cov_2()
rv1 = multivariate_normal(mean = mu1, cov = cov1)
rv2 = multivariate_normal(mean = mu2, cov = cov2)
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(121, projection='3d')
plt.xlabel('x')
plt.ylabel('y')
ax.plot_surface(x,y,rv1.pdf(pos), cmap=cm.Reds,alpha=0.5)
ax.plot_surface(x,y,rv2.pdf(pos), cmap=cm.Blues,alpha=0.5)
plt.subplot(122)
plt.contourf(x, y, rv1.pdf(pos), cmap=cm.Reds,alpha=0.5)
plt.contourf(x, y, rv2.pdf(pos), cmap=cm.Blues,alpha=0.5)
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y') | (160, 2)
(39, 2)
| MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Visualize the posteriorUse the prior and the likelihood you've computed to obtain the posterior distribution for each class.Like in the case of the likelihood above, make same similar surface and contour plots for the posterior. | likelihood1 = rv1.pdf(pos)
likelihood2 = rv2.pdf(pos)
p1 = (likelihood1 * pi1)/(likelihood1*pi1+likelihood2*pi2)
p2 = (likelihood2 * pi2)/(likelihood1*pi1+likelihood2*pi2)
x, y = np.mgrid[-5:5:.01, -5:5:.01]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(131, projection='3d')
plt.xlabel('x')
plt.ylabel('y')
ax.plot_surface(x,y,p1, cmap=cm.Reds,alpha=0.5)
ax.plot_surface(x,y,p2, cmap=cm.Blues,alpha=0.5)
plt.subplot(132)
plt.contourf(x,y,p1,cmap=cm.Reds,alpha=0.5)
plt.contourf(x,y,p2,cmap=cm.Blues,alpha=0.5)
plt.xlabel('x')
plt.ylabel('y') | _____no_output_____ | MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Decision boundary1. Decision boundary can be obtained by $P(C_2|x)>P(C_1|x)$ in python. Use $\texttt{contourf}$ to plot the decision boundary. Use $\texttt{cmap=cm.Blues}$ and $\texttt{alpha=0.5}$1. Also overlay the scatter plot of train data points from the 2 classes on the same plot. Use red color for class 1 and blue color for class 2 | des = p2>p1
plt.contourf(x,y,p1,cmap=cm.Reds,alpha=0.5)
plt.contourf(x,y,p2,cmap=cm.Blues,alpha=0.5)
plt.contourf(x,y,des,cmap=cm.Greens,alpha=0.3)
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(class1_train[:,0],class1_train[:,1],marker='*',color='red')
plt.scatter(class2_train[:,0],class2_train[:,1],marker='+',color='blue') | _____no_output_____ | MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Test DataNow let's use our trained model to classify test data points1. $\texttt{test_data}$ contains the $x1,x2$ features of different data points1. $\texttt{test_label}$ contains the true class of the data points. 0 means class 1. 1 means class 2. 1. Classify the test points based on whichever class has higher posterior probability for each data point1. Use $\texttt{classification_report}$ to test the classification performance | test = pd.read_csv('https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/MachineLearning/L3/test').to_numpy()
test_data, test_label = test[:,:2], test[:,2]
test_data
## likelihood
l1 = rv1.pdf(test_data)
l2 = rv2.pdf(test_data)
##Posterior
p1_test= (l1*pi1)/(l1*pi1+l2*pi2)
p2_test= (l2*pi2)/(l1*pi1+l2*pi2)
## Descision bundory
test_data_predict=p2_test>p1_test
test_data_predict
test_data_predict = np.where(test_data_predict==True,1,0)
test_data_predict
from sklearn.metrics import classification_report,accuracy_score
print(accuracy_score(test_label,test_data_predict))
print(classification_report(test_label,test_data_predict))
| _____no_output_____ | MIT | 07-Assignment/Bayesian_Classifier_Assignment3_ML.ipynb | hritik5102/SHALA2020 |
Data Wrangling & Cleaning | # import the library
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# convert scientific notation to decimals
pd.set_option('display.float_format', lambda x: '%.2f' % x) | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Load & Merge the data | df_listing = pd.read_csv('data/kc_house_data.csv')
df_walking_score = pd.read_csv('data/walking_score.csv')
df_income = pd.read_csv('data/ZIP-3.csv') | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Summarizing your data for inspection | print('Listings')
print(df_listing.columns)
print(df_listing.head())
print(df_listing.describe())
print('')
print('Walking Score')
# TODO: print the columns, head and describe for the Walking Score dataframe
print('')
print('Income')
# TODO: print the columns, head and describe for the Income dataframe | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Fixing column name | df_income.columns = ['zipcode', 'median_income', 'mean_income', 'population'] | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Converting data types | df_listing['date'] = pd.to_datetime(df_listing['date'])
df_income['median_income'] = df_income['median_income'].str.replace(',', '').astype(float)
df_income['mean_income'] = df_income['mean_income'].str.replace(',', '').astype(float)
df_income.head()
# TODO: Convert the data type of the population column
df_income | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Dealing with missing valuesHow to deal with the missing values? Should we remove the rows or fill the gap with a value? | # Number of missing values by columns
print(df_listing.isnull().sum())
print('')
print(df_walking_score.isnull().sum())
print('')
print(df_income.isnull().sum())
# select all the rows with missing values
df_walking_score[df_walking_score.isnull().any(axis=1)]
# select all the rows with missing values
df_income[df_income.isnull().any(axis=1)]
# TODO: Create a strategy to handle the missing values on the Walking Score and Income dataframes | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Removing outliersSome algorithms are very sensitive to outliers. Considering the number of bedrooms, should we remove houses with an extreme number of bedrooms? How many bedrooms are too many? (Suggestion: as a rule of thumb, three standard deviations from the mean is a good measure to identify outliers). | # bedrooms
print(df_listing['bedrooms'].value_counts())
print('mean', np.mean(df_listing['bedrooms']))
print('std', np.std(df_listing['bedrooms']))
plt.hist(df_listing['bedrooms'], bins=20)
plt.show()
# TODO: Remove the outlier houses considering the number of bedrooms
# Dealing with outliers
houses_to_remove = []
# remove based on zipcode and price
for zipcode in df_listing['zipcode'].unique():
df_zipcode = df_listing[df_listing['zipcode']==zipcode]
m = np.mean(df_zipcode['price'])
s = np.std(df_zipcode['price'])
houses_to_remove = houses_to_remove + list(df_zipcode[df_zipcode['price']>m+3.0*s].index)
print('')
print('# houses to remove', len(houses_to_remove))
df_listing = df_listing[~df_listing.index.isin(houses_to_remove)] | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Merging Data Sets | df_merge = df_listing.copy()
df_merge = df_merge.merge(df_walking_score, on='zipcode', how='left')
df_merge = df_merge.merge(df_income, on='zipcode', how='left')
print('Total # houses', len(df_merge)) | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Saving the processed file | df_merge.to_csv('data/house_pricing.csv', index=False) | _____no_output_____ | MIT | data-wrangling-analysis.ipynb | Clercy/proj_bedbugs |
Standalone Convergence Checker for the numerical vKdV solver KISSME bottom - KISSME stratificationGetting more realistic now. Using the real KISSME stratification. Still linear non hydrostaticStill using an offshore 'blank' zone with initial conditions not boundary conditions | import xarray as xr
from iwaves.kdv.kdvimex import KdVImEx#from_netcdf
from iwaves.kdv.vkdv import vKdV
from iwaves.kdv.solve import solve_kdv
from iwaves.utils.plot import vKdV_plot
import iwaves.utils.initial_conditions as ics
import iwaves.utils.boundary_conditions as bcs
import pandas as pd
import numpy as np
from scipy.interpolate import PchipInterpolator as pchip
from scipy.interpolate import interp1d
import scipy.signal
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import rcParams
# Set font sizes
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Bitstream Vera Sans']
rcParams['font.serif'] = ['Bitstream Vera Sans']
rcParams["font.size"] = "14"
rcParams['axes.labelsize']='large'
# CONSTANTS FOR WHOLE NOTEBOOK
d = 252.5
L_d = 3.0e5
Nz = 100
bathy_params = [L_d/2, 60000, d, d/2]
runtime = 4.5*86400.
runtime = 1.5*86400.
runtime = 2*86400.
nonlinear = True
nonhydrostatic = True
a0 = 0.
a_bc_left = 35
kb_start = 1000
drag_param = 0.0006
Wn = 1/200 # Filtering both in and outside of the KdV module.
def get_kissme_h(dx, kb_start, start_depth=650, sponge_start_depth=150):
"""
kb_start is where the KISSME bathy starts - will be constant depth before that.
dx is the depth to interpolate to.
sponge_start_depth is the depth at which the sponge boundary will kick in.
"""
data = pd.read_csv(r'..\..\..\..\03_CODE\GA BATHY CODES [and data]\xyz for chapter 3.txt', names=['x', 'y', 'h', 'z'])
x = data['x'].values.tolist()
y = data['y'].values.tolist()
z = data['z'].values.tolist()
x.reverse()
y.reverse()
z.reverse()
x = np.array(x)
y = np.array(y)
z = np.array(z)
delta_x = x - x[0]
delta_y = y - y[0]
horz_km = 110*np.sqrt((delta_x**2+(delta_y*np.cos(np.pi*y/180))**2))
##########
## OUTPUTS
x = 1000*horz_km
h = -z
#########
## LIMIT
ind = np.where(abs(h-start_depth)==min(abs(h-start_depth)))[0][0]
x = x[ind:-1]
h = h[ind:-1]
x = x-x[0]+kb_start
############
## ADD START
print(np.max(x))
print(dx)
xi = np.arange(-2*dx, L_d+dx, dx)
F = interp1d(x, h, bounds_error=False, fill_value='extrapolate')
hi = F(xi)
ind = np.where(xi<kb_start)[0]
hi[ind] = h[0]
ind = np.where(xi>max(x))[0]
hi[ind] = h[-1]
#########
## FILTER
b, a = scipy.signal.butter(4, Wn)
hi = scipy.signal.filtfilt(b, a, hi)
#########
## SPONGE
ind = np.where(abs(hi-sponge_start_depth)==min(abs(hi-sponge_start_depth)))[0][0]
spongedist = xi[-1] - xi[ind]
return xi, hi, spongedist
def get_rho_kissme_apr3():
ncfile = r'\\drive.irds.uwa.edu.au\CEME-BBLE-001\KISSME\Processed data\Moorings\Mooring-Temp-SP250_2short.nc'
ncfile = r'./data/Mooring-Temp-SP250_2short.nc'
ncfile = r'C:\Users\AZulberti\Dropbox\University\PhD\2016_Andrew_Zulberti_2\02 WRITING\3 - Energy dissipation/data/Mooring-Temp-SP250_2short.nc'
# April 4
ti_i = 3390 - 200
ti_f = 3420 + 200
# Apr 3
ti_i = 1000
ti_f = 1500
ti_m = int(np.mean((ti_i, ti_f)))
ti_wave = np.arange(ti_i, ti_f)
ti_full = np.arange(ti_i-200, ti_f+200)
ds = xr.open_dataset(ncfile)
rho_opt = 'hat'
# rho_opt = 'star'
if rho_opt == 'hat':
rho = ds.dens_hat.values[:, ti_m]
elif rho_opt == 'star':
rho = ds.dens_star.values[:, ti_m]
z = ds.height_star.values
rho = rho[::-1]
z = z[::-1]
z = z-max(z)
return z, rho
# Functions
def run_kdv(args):
"""
Main function for generating different soliton scenarios
"""
rho_params, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw, solver = args
####################################################
# Inputs
mode = 0
Nz = 100
ntout = 1800.0
# z = np.linspace(0, -d, Nz)
# rhoz = ics.rho_double_tanh_rayson(rho_params,z)
z, rhoz = get_rho_kissme_apr3()
dz = np.abs(z[1]-z[0])
if solver == 'vkdv':
# h = 0*x+d
# h = ics.depth_tanh2(bathy_params, x)
x, h, spongedist = get_kissme_h(dx, kb_start)
pass
elif solver == 'imex':
x = np.arange(-2*dx,L_d+dx,dx)
h = None
spongedist = 0
kdvargs = dict(\
verbose=False,\
a0=a0,\
Lw=Lw,\
mode=mode,
dt=dt,\
nu_H=nu_H,\
ekdv=False,\
wavefunc=ics.eta_fullsine,\
#L_d = L_d,
x=x,\
Nsubset=10,
nonlinear=nonlinear,
nonhydrostatic=nonhydrostatic,
spongedist=spongedist,
drag_param = drag_param
)
###
# THIS WAS COPIED FROM THE KdV VERSION. IT INITIALISES EACH vKdV 3 TIMES - QUITE SLOW.
###
ii=0
#rhoz = single_tanh_rho(
# z, pp['rho0'][ii], pp['drho1'][ii], pp['z1'][ii], pp['h1'][ii])
######
## Call the vKdV run function
mykdv, Bda = solve_kdv(rhoz, z, runtime,\
solver=solver, h=h, ntout=ntout, outfile=None, a_bc_left=a_bc_left, Wn=Wn, **kdvargs)
print('Done with dx={} and dt={}'.format(dx, dt))
return mykdv, Bda
#betas = [1023.7, 1.12, 105, 52, 155, 43] # ~April 5
#betas = [1023.5, 1.22, 67, 55, 157, 52] # ~March 1
betas_w = [1023.8229810318612,
0.9865506702797462,
143.5428700089361,
46.1265812512485,
136.66278860120943,
41.57014327398592] # 15 July 2016
betas_s =[1023.6834358117951,
1.2249066117658955,
156.78804559089772,
53.66835548728355,
73.14183287436342,
40.21031777315428] # 1st April 2017
mode =0
nu_H = 0
# Going to make Lw an input for the vKdV as it will really speed things up.
dx = 100
dt = 10
dx = 10
# x = np.arange(-2*dx,L_d+dx,dx)
# h = ics.depth_tanh2(bathy_params, x) # Intended bathy profile
x, h, spongedist = get_kissme_h(50, kb_start)
z = np.linspace(0, -d, Nz)
rhoz_s = ics.rho_double_tanh_rayson(betas_s, z)
Lw_s = ics.get_Lw(rhoz_s, z, z0=max(h), mode=0)
print(Lw_s)
print(spongedist)
dxs = [800, 400, 200, 100, 50] # Note this is not necessary, it is set by the KISSME bathy currently.
dxs = [50] # Note this is not necessary, it is set by the KISSME bathy currently.
dt = 4.
all_vkdv_dx_s = []
all_kdvimex_dx_s = []
for dx in dxs:
print(' ')
print('Running dx={}'.format(dx))
print(' ')
mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'vkdv'))
all_vkdv_dx_s.append(mykdv)
print(' ')
print('Completed dx={}'.format(dx))
print(' ')
|
Running dx=50
178852.84114035743
50
Calculating eigenfunctions...
0.0 % complete...
5.0 % complete...
10.0 % complete...
15.0 % complete...
20.0 % complete...
25.0 % complete...
30.0 % complete...
35.0 % complete...
40.0 % complete...
45.0 % complete...
50.0 % complete...
55.0 % complete...
60.0 % complete...
65.0 % complete...
70.0 % complete...
75.0 % complete...
80.0 % complete...
85.0 % complete...
90.0 % complete...
95.0 % complete...
100.0 % complete...
Calculating nonlinear structure functions...
0.0 % complete...
5.0 % complete...
10.0 % complete...
15.0 % complete...
20.0 % complete...
25.0 % complete...
30.0 % complete...
35.0 % complete...
40.0 % complete...
45.0 % complete...
50.0 % complete...
55.0 % complete...
60.0 % complete...
65.0 % complete...
70.0 % complete...
75.0 % complete...
80.0 % complete...
85.0 % complete...
90.0 % complete...
95.0 % complete...
100.0 % complete...
Calculating buoyancy coefficients...
Done with dx=50 and dt=4.0
Completed dx=50
| BSD-2-Clause | tests/.ipynb_checkpoints/standalone_vkdv_convergence KISSME bathy REAL BVP-checkpoint.ipynb | iosonobert/iwaves |
Just double check that vKdV used the correct bathy | x, h, spongedist = get_kissme_h(50, kb_start)
# h = 0*x+d
plt.figure(figsize=(9,5))
plt.plot(x, h, 'b', label='Intended bathy', linewidth=2)
plt.plot(all_vkdv_dx_s[-1].x, all_vkdv_dx_s[-1].h, 'r--', label='Actual vKdV bathy')
plt.ylabel('h (m)')
plt.xlabel('x (m)')
plt.title('vKdV bathy')
plt.legend()
plt.show()
import importlib, iwaves
importlib.reload(iwaves.utils.plot)
from iwaves.utils.plot import vKdV_plot
f = vKdV_plot(all_vkdv_dx_s[-1])
f.savefig('Draft for ch3.png')
full_lims = (0, 230000)
zoom_lims_vkdv = (50000, 100000)
zoom_lims_vkdv2 = (110000, 160000)
zoom_lims_y = (-70, 40)
###########################
##### KISSME
plt.figure(figsize=(12,5))
ax=plt.subplot(131)
for mykdv in all_vkdv_dx_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
plt.xlim(full_lims)
plt.ylim(zoom_lims_y)
plt.ylabel('A (m)')
plt.xlabel('x (m)')
plt.title('KISSME vKdV full')
plt.grid()
plt.legend()
ax=plt.subplot(132)
for mykdv in all_vkdv_dx_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
plt.xlim(zoom_lims_vkdv)
plt.ylim(zoom_lims_y)
plt.xlabel('x (m)')
plt.title('KISSME vKdV zoom')
plt.grid()
ax=plt.subplot(133)
for mykdv in all_vkdv_dx_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)
plt.xlim(zoom_lims_vkdv2)
plt.ylim(zoom_lims_y)
plt.xlabel('x (m)')
plt.title('KISSME vKdV zoom 2')
plt.grid()
# Compute the errors
X = np.arange(0,L_d, 10.)
nx = X.shape[0]
ndx = len(dxs)
def get_rms_error(mykdv, nd):
solns = np.zeros((nd, nx))
for ii, mykdv in enumerate(mykdv):
Fx = pchip(mykdv.x, mykdv.B)
solns[ii,:] = Fx(X)
# Compute the error between each solution
#err = np.diff(solns, axis=0)
err = solns - solns[-1,:]
err_rms = np.linalg.norm(err, ord=2, axis=1) # L2-norm
#err_rms_w = np.sqrt(np.mean(err**2,axis=1))
return err_rms
err_rms_vkdv_s = get_rms_error(all_vkdv_dx_s, ndx)
err_rms_kdvimex_s = get_rms_error(all_kdvimex_dx_s, ndx)
def make_dx_convergence_plot(kdv_s, err_s, tit):
plt.figure(figsize=(9,8))
plt.loglog(dxs[:-1],err_s[:-1],'ko')
plt.xlim(2e1,2e3)
plt.ylim(1e-1,5e3)
plt.grid(b=True)
x0 = np.array([50,100.])
plt.plot(x0, 100/x0[0]**2*x0**2, 'k--')
plt.plot(x0, 100/x0[0]**1*x0**1, 'k:')
plt.ylabel('L2-norm Error [m]')
plt.xlabel('$\Delta x$ [m]')
plt.title(tit)
alpha_s = -2*kdv_s[0].c1*kdv_s[0].r10
beta_s = -1*kdv_s[0].r01
print(type(alpha_s))
if not type(alpha_s) == np.float64:
plt.legend((r'$\alpha$ = (%3.4f,%3.4f), $\beta$ = (%3.4f,%3.4f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),
), loc='lower right')
else:
plt.legend((r'$\alpha$ = (%3.4f), $\beta$ = (%3.4f)'%(alpha_s, beta_s),), loc='lower right')
make_dx_convergence_plot(all_kdvimex_dx_s, err_rms_kdvimex_s, 'IMEX')
make_dx_convergence_plot(all_vkdv_dx_s, err_rms_vkdv_s, 'vKdV')
# Delta t comparison
dts = [20,10.,5,2.5,1.25,0.6,0.3]
dx = 50.
all_vkdv_dt_w = []
all_vkdv_dt_s = []
all_kdvimex_dt_w = []
all_kdvimex_dt_s = []
for dt in dts:
print(' ')
print('Running dt={}'.format(dt))
print(' ')
mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'imex'))
all_kdvimex_dt_s.append(mykdv)
mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s, 'vkdv'))
all_vkdv_dt_s.append(mykdv)
print(' ')
print('Completed dt={}'.format(dt))
print(' ')
###########################
##### SUMMER
full_lims = (0, 230000)
zoom_lims_imex = (150000, 230000)
zoom_lims_vkdv = (150000, 230000)
zoom_lims_y = (-30, 30)
###########################
##### IMEX
plt.figure(figsize=(12,5))
ax=plt.subplot(121)
for mykdv in all_kdvimex_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.ylabel('A (m)')
plt.xlabel('x (m)')
plt.title('Summer IMEX full')
plt.xlim((full_lims))
plt.grid()
ax=plt.subplot(122)
for mykdv in all_kdvimex_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.ylim(zoom_lims_y)
plt.xlim((zoom_lims_imex))
plt.xlabel('x (m)')
plt.title('Summer IMEX zoom')
plt.grid()
###########################
##### vKdV
plt.figure(figsize=(12,5))
ax=plt.subplot(121)
for mykdv in all_vkdv_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.ylabel('A (m)')
plt.xlabel('x (m)')
plt.title('Summer vKdV full')
plt.xlim((full_lims))
plt.grid()
ax=plt.subplot(122)
for mykdv in all_vkdv_dt_s:
plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)
plt.ylim(zoom_lims_y)
plt.xlim((zoom_lims_vkdv))
plt.xlabel('x (m)')
plt.title('Summer vKdV zoom')
plt.grid()
plt.show()
ndt = len(dts)
err_rms_vkdv_dt_s = get_rms_error(all_vkdv_dt_s, ndt)
err_rms_kdvimex_dt_s = get_rms_error(all_kdvimex_dt_s, ndt)
def make_dt_convergence_plot(kdv_s, err_s, tit):
plt.figure(figsize=(9,8))
plt.loglog(dts[:-1],err_s[:-1],'kd', markersize=6)
plt.xlim(0,0.5e2)
plt.ylim(1e-2,1e3)
plt.grid(b=True)
x0 = np.array([5,20])
plt.plot(x0, 10/x0[0]**2*x0**2, 'k--')
plt.plot(x0, 10/x0[0]**1*x0**1, 'k:')
#plt.ylabel('L2-norm Error [m]')
plt.xlabel('$\Delta t$ [s]')
plt.title(tit)
plt.text(0.05,0.95,'(b)',transform=ax.transAxes)
alpha_s = -2*kdv_s[0].c1*kdv_s[0].r10
beta_s = -1*kdv_s[0].r01
if not type(alpha_s) == np.float64:
plt.legend((r'$\alpha$ = (%3.4f,%3.4f), $\beta$ = (%3.0f,%3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),
), loc='lower right')
else:
plt.legend((r'$\alpha$ = (%3.4f), $\beta$ = (%3.0f)'%(alpha_s, beta_s),
), loc='lower right')
plt.savefig('../FIGURES/vkdv_convergence_dxdt.png',dpi=150)
plt.savefig('../FIGURES/vkdv_convergence_dxdt.pdf',dpi=150)
make_dt_convergence_plot(all_kdvimex_dt_s, err_rms_kdvimex_dt_s, 'KdV IMEX')
make_dt_convergence_plot(all_vkdv_dt_s, err_rms_vkdv_dt_s, 'vKdV')
| <ipython-input-91-3b71a6ddff0e>:5: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.
Invalid limit will be ignored.
plt.xlim(0,0.5e2)
<ipython-input-91-3b71a6ddff0e>:5: UserWarning: Attempted to set non-positive left xlim on a log-scaled axis.
Invalid limit will be ignored.
plt.xlim(0,0.5e2)
| BSD-2-Clause | tests/.ipynb_checkpoints/standalone_vkdv_convergence KISSME bathy REAL BVP-checkpoint.ipynb | iosonobert/iwaves |
Copyright 2021 The TF-Agents Authors. | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
REINFORCE 代理 在 TensorFlow.org 上查看 在 Google Colab 运行 在 Github 上查看源代码 下载笔记本 简介 本例介绍如何使用 TF-Agents 库在 Cartpole 环境中训练 [REINFORCE](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf) 代理,与 [DQN 教程](1_dqn_tutorial.ipynb)比较相似。我们会引导您完成强化学习 (RL) 流水线中关于训练、评估和数据收集的所有部分。 设置 如果尚未安装以下依赖项,请运行以下命令: | !sudo apt-get update
!sudo apt-get install -y xvfb ffmpeg freeglut3-dev
!pip install 'imageio==2.4.0'
!pip install pyvirtualdisplay
!pip install tf-agents[reverb]
!pip install pyglet xvfbwrapper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import imageio
import IPython
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import pyvirtualdisplay
import reverb
import tensorflow as tf
from tf_agents.agents.reinforce import reinforce_agent
from tf_agents.drivers import py_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
# Set up a virtual display for rendering OpenAI gym environments.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
超参数 | env_name = "CartPole-v0" # @param {type:"string"}
num_iterations = 250 # @param {type:"integer"}
collect_episodes_per_iteration = 2 # @param {type:"integer"}
replay_buffer_capacity = 2000 # @param {type:"integer"}
fc_layer_params = (100,)
learning_rate = 1e-3 # @param {type:"number"}
log_interval = 25 # @param {type:"integer"}
num_eval_episodes = 10 # @param {type:"integer"}
eval_interval = 50 # @param {type:"integer"} | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
环境RL 环境用于描述要解决的任务或问题。在 TF-Agents 中,使用 `suites` 可以轻松创建标准环境。我们提供了不同的 `suites`,只需提供一个字符串环境名称,即可帮助您从来源加载环境,如 OpenAI Gym、Atari、DM Control 等。现在,我们试试从 OpenAI Gym 套件加载 CartPole 环境。 | env = suite_gym.load(env_name) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
我们可以渲染此环境以查看其形式:小车上连接一条自由摆动的长杆。目标是向右或向左移动小车,使长杆保持朝上。 | #@test {"skip": true}
env.reset()
PIL.Image.fromarray(env.render()) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
在该环境中,`time_step = environment.step(action)` 语句用于执行 `action`。返回的 `TimeStep` 元组包含该操作在环境中的下一个观测值和奖励。环境中的 `time_step_spec()` 和 `action_spec()` 方法分别返回 `time_step` 和 `action` 的规范(类型、形状、边界)。 | print('Observation Spec:')
print(env.time_step_spec().observation)
print('Action Spec:')
print(env.action_spec()) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
我们可以看到,该观测值是一个包含 4 个浮点数的数组:小车的位置和速度,长杆的角度位置和速度。由于只有两个操作(向左或向右移动),因此,`action_spec` 是一个标量,其中 0 表示“向左移动”,1 表示“向右移动”。 | time_step = env.reset()
print('Time step:')
print(time_step)
action = np.array(1, dtype=np.int32)
next_time_step = env.step(action)
print('Next time step:')
print(next_time_step) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
通常,我们会创建两个环境:一个用于训练,另一个用于评估。大部分环境都是使用纯 Python 语言编写的,但是使用 `TFPyEnvironment` 包装器可轻松将其转换至 TensorFlow 环境。原始环境的 API 使用 NumPy 数组,但凭借 `TFPyEnvironment`,这些数组可以与 `Tensors` 相互转换,从而更轻松地与 TensorFlow 策略和代理交互。 | train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
代理我们用于解决 RL 问题的算法以 `Agent` 形式表示。除了 REINFORCE 代理,TF-Agents 还为各种 `Agents` 提供了标准实现,如 [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf)、[DDPG](https://arxiv.org/pdf/1509.02971.pdf)、[TD3](https://arxiv.org/pdf/1802.09477.pdf)、[PPO](https://arxiv.org/abs/1707.06347) 和 [SAC](https://arxiv.org/abs/1801.01290)。要创建 REINFORCE 代理,首先需要有一个通过环境提供的观测值,学会预测操作的 `Actor Network`。使用观测值和操作的规范,我们可以轻松创建 `Actor Network`。我们也可以在网络中指定层,本例中是设置为 `ints` 元祖(表示每个隐藏层的大小)的 `fc_layer_params` 参数(请参阅上面的“超参数”部分)。 | actor_net = actor_distribution_network.ActorDistributionNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
我们还需要一个 `optimizer` 来训练刚才创建的网络,以及一个跟踪网络更新次数的 `train_step_counter` 变量。 | optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
train_step_counter = tf.Variable(0)
tf_agent = reinforce_agent.ReinforceAgent(
train_env.time_step_spec(),
train_env.action_spec(),
actor_network=actor_net,
optimizer=optimizer,
normalize_returns=True,
train_step_counter=train_step_counter)
tf_agent.initialize() | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
策略在 TF-Agents 中,策略是 RL 中的标准策略概念:给订 `time_step` 来产生操作或操作的分布。主要方法是 `policy_step = policy.step(time_step)`,其中 `policy_step` 是命名元祖 `PolicyStep(action, state, info)`。`policy_step.action` 是要应用到环境的 `action`,`state` 表示有状态 (RNN) 策略的状态,而 `info` 可能包含辅助信息(如操作的对数几率)。代理包含两个策略:一个是用于评估/部署的主要策略 (agent.policy),另一个是用于数据收集的策略 (agent.collect_policy)。 | eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
指标和评估用于评估策略的最常用指标是平均回报。回报就是在环境中运行策略时,某个片段获得的奖励总和,我们通常会计算几个片段的平均值。计算平均回报指标的代码如下。 | #@test {"skip": true}
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
# Please also see the metrics module for standard implementations of different
# metrics. | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
回放缓冲区为了跟踪从环境收集的数据,我们将使用 [Reverb](https://deepmind.com/research/open-source/Reverb),这是 Deepmind 打造的一款高效、可扩展且易于使用的回放系统。它会在我们收集轨迹时存储经验数据,并在训练期间使用。此回放缓冲区使用描述要存储的张量的规范进行构造,可以使用 `tf_agent.collect_data_spec` 从代理获取这些张量。 | table_name = 'uniform_table'
replay_buffer_signature = tensor_spec.from_spec(
tf_agent.collect_data_spec)
replay_buffer_signature = tensor_spec.add_outer_dim(
replay_buffer_signature)
table = reverb.Table(
table_name,
max_size=replay_buffer_capacity,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
signature=replay_buffer_signature)
reverb_server = reverb.Server([table])
replay_buffer = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
table_name=table_name,
sequence_length=None,
local_server=reverb_server)
rb_observer = reverb_utils.ReverbAddEpisodeObserver(
replay_buffer.py_client,
table_name,
replay_buffer_capacity
) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
对于大多数代理,`collect_data_spec` 是一个 `Trajectory` 命名元组,其中包含观测值、操作和奖励等。 数据收集当 REINFORCE 从全部片段中学习时,我们使用给定数据收集策略定义一个函数来收集片段,并在回放缓冲区中将数据(观测值、操作、奖励等)保存为轨迹。这里我们使用“PyDriver”运行经验收集循环。您可以在我们的 [driver 教程](https://tensorflow.google.cn/agents/tutorials/4_drivers_tutorial)中了解到有关 TF Agents driver 的更多信息。 | #@test {"skip": true}
def collect_episode(environment, policy, num_episodes):
driver = py_driver.PyDriver(
environment,
py_tf_eager_policy.PyTFEagerPolicy(
policy, use_tf_function=True),
[rb_observer],
max_episodes=num_episodes)
initial_time_step = environment.reset()
driver.run(initial_time_step) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
训练代理训练循环包括从环境收集数据和优化代理的网络。在训练过程中,我们偶尔会评估代理的策略,看看效果如何。运行下面的代码大约需要 3 分钟。 | #@test {"skip": true}
try:
%%time
except:
pass
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
tf_agent.train = common.function(tf_agent.train)
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few episodes using collect_policy and save to the replay buffer.
collect_episode(
train_py_env, tf_agent.collect_policy, collect_episodes_per_iteration)
# Use data from the buffer and update the agent's network.
iterator = iter(replay_buffer.as_dataset(sample_batch_size=1))
trajectories, _ = next(iterator)
train_loss = tf_agent.train(experience=trajectories)
replay_buffer.clear()
step = tf_agent.train_step_counter.numpy()
if step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, train_loss.loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
可视化 绘图我们可以通过绘制回报与全局步骤的图形来了解代理的性能。在 `Cartpole-v0` 中,长杆每停留一个时间步骤,环境就会提供一个 +1 的奖励,由于最大步骤数量为 200,所以可以获得的最大回报也是 200。 | #@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim(top=250) | _____no_output_____ | Apache-2.0 | site/zh-cn/agents/tutorials/6_reinforce_tutorial.ipynb | RedContritio/docs-l10n |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.