code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Population Density of Mexican States and Municipalities # # This Notebook downloads Geopandas GeoDataFrames for States (admin1) and Municipalities (admin2) derived from the 2020 Mexican Census: [INEGI](https://www.inegi.org.mx/temas/mg/). # # For details how these dataframe were created, see the [mexican-boundaries](https://github.com/sbl-sdsc/mexico-boundaries) GitHub project. # # It also uses the variables of dataframe obtain in the [Week 3 analyzes](Week3States.ipynb). # # Due the bad clustering generated this part of the project will remain pending until the data that is being used is optimal for the clustering method is better or when a better understanding of clustering is achieve. # + from io import BytesIO from urllib.request import urlopen import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import ipywidgets as widgets import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler # - pd.options.display.max_rows = None # display all rows pd.options.display.max_columns = None # display all columsns # ## Boundaries of Mexican Municipalities # Read boundary polygons for Mexican states from shapefile admin2_url = 'https://raw.githubusercontent.com/sbl-sdsc/mexico-boundaries/main/data/mexico_admin2.parquet' resp = urlopen(admin2_url) admin2 = gpd.read_parquet(BytesIO(resp.read())) # Calculate the area of each state (convert area from m^2 to km^2 admin2.crs admin2['CVE_MUNI'] = admin2['CVE_ENT'] + admin2['CVE_MUN'] admin2.head() admin2.plot(); # ## Map of Population by Municipality # Get week 3 analyzes data files var_admin2 = pd.read_csv('../../data/week3analyzesMunicipalities.csv') var_admin2.head() # Add 5-digit municipality code column (example: convert 5035 -> 05035) var_admin2['CVE_MUNI'] = var_admin2['cve_ent'].apply(lambda i: f'{i:05d}') var_admin2.head() # Merge the geo dataframe with the population dataframe using the common CVE_MUNI column df_admin2 = admin2.merge(var_admin2, on='CVE_MUNI') df_admin2.head() # The columns that are not needed for this analysis are excluded a2 = df_admin2.iloc[:,7:].copy() a2.head() # Only the data of interest is selected a2 = a2[['case_rate', 'death_rate', 'pct_mental_problem', 'pct_no_problems','pct_pop_obesity', 'population/sqkm']].copy() # The data is normalized # + #std_scaler = StandardScaler() std_scaler = RobustScaler() #std_scaler = MinMaxScaler() std_scaler # fit and transform the data X = pd.DataFrame(std_scaler.fit_transform(a2)) X.head(10) # - # The clustering beggings using the DBSCAN method # + # Compute DBSCAN db = DBSCAN(eps=0.5, min_samples=5).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ #with np.printoptions(threshold=np.inf): # print(labels) # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) print('Estimated number of clusters: %d' % n_clusters_) print('Estimated number of noise points: %d' % n_noise_) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) # - df_labels = pd.DataFrame(labels, columns=['cluster']) df2 = pd.concat([df_admin2, df_labels], axis=1) df2.head() title = 'Population Density Clusters for Municipalities in Mexico' ax1 = df2.plot(column='cluster', # cmap='OrRd', # color maps: https://matplotlib.org/stable/tutorials/colors/colormaps.html cmap='Set1', legend=True, legend_kwds={'label': 'Cluster Number', 'orientation': 'horizontal'}, figsize=(16, 11)); ax1.set_title(title, fontsize=15); # + # try Plotly with KDE density plot # bubble maps (see Ebola example with time series): # https://plotly.com/python/bubble-maps/
notebooks/dev/Week6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=[] # # Example: CanvasXpress histogram Chart No. 2 # # This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at: # # https://www.canvasxpress.org/examples/histogram-2.html # # This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function. # # Everything required for the chart to render is included in the code below. Simply run the code block. # + from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="histogram2", data={ "x": { "Description": [ "Survival time in days" ] }, "z": { "Organ": [ "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Stomach", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Bronchus", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Colon", "Ovary", "Ovary", "Ovary", "Ovary", "Ovary", "Ovary", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast", "Breast" ] }, "y": { "smps": [ "Survival" ], "vars": [ "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "s32", "s33", "s34", "s35", "s36", "s37", "s38", "s39", "s40", "s41", "s42", "s43", "s44", "s45", "s46", "s47", "s48", "s49", "s50", "s51", "s52", "s53", "s54", "s55", "s56", "s57", "s58", "s59", "s60", "s61", "s62", "s63", "s64" ], "data": [ [ 124 ], [ 42 ], [ 25 ], [ 45 ], [ 412 ], [ 51 ], [ 1112 ], [ 46 ], [ 103 ], [ 876 ], [ 146 ], [ 340 ], [ 396 ], [ 81 ], [ 461 ], [ 20 ], [ 450 ], [ 246 ], [ 166 ], [ 63 ], [ 64 ], [ 155 ], [ 859 ], [ 151 ], [ 166 ], [ 37 ], [ 223 ], [ 138 ], [ 72 ], [ 245 ], [ 248 ], [ 377 ], [ 189 ], [ 1843 ], [ 180 ], [ 537 ], [ 519 ], [ 455 ], [ 406 ], [ 365 ], [ 942 ], [ 776 ], [ 372 ], [ 163 ], [ 101 ], [ 20 ], [ 283 ], [ 1234 ], [ 89 ], [ 201 ], [ 356 ], [ 2970 ], [ 456 ], [ 1235 ], [ 24 ], [ 1581 ], [ 1166 ], [ 40 ], [ 727 ], [ 3808 ], [ 791 ], [ 1804 ], [ 3460 ], [ 719 ] ] }, "m": { "Name": "Cancer Survival", "Description": "Patients with advanced cancers of the stomach, bronchus, colon, ovary or breast were treated with ascorbate. The purpose of the study was to determine if the survival times differ with respect to the organ affected by the cancer.", "Reference": "<NAME>. and <NAME>. (1978) Supplemental ascorbate in the supportive treatment of cancer: re-evaluation of prolongation of survival times in terminal human cancer. Proceedings of the National Academy of Science USA, 75. Also found in: <NAME>. (1986) Multivariate Statistical Methods: A Primer, New York: Chapman & Hall, 11. Also found in: <NAME>., et al. (1994) A Handbook of Small Data Sets, London: Chapman & Hall, 255." } }, config={ "axisTitleFontStyle": "italic", "citation": "<NAME>. and <NAME>. (1978). Proceedings of the National Academy of Science USA, 75.", "graphType": "Scatter2D", "histogramBins": 10, "showTransition": False, "theme": "CanvasXpress", "title": "Patients with advanced cancers of the stomach,bronchus, colon, ovary or breast treated with ascorbate.", "xAxisTitle": "Survival (days)", "yAxisTitle": "Number of Subjects" }, width=613, height=613, events=CXEvents(), after_render=[ [ "createHistogram", [ False, None, None ] ] ], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="histogram_2.html")
tutorials/notebook/cx_site_chart_examples/histogram_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from PIL import Image from numpy import * from pylab import * import os import pickle import sift import imagesearch imagesearch = reload(imagesearch) with open('caltech_imlist.pkl', 'rb') as f: imlist = pickle.load(f) featlist = pickle.load(f) nbr_images = len(imlist) with open('vocabulary.pkl', 'rb') as f: voc = pickle.load(f) src = imagesearch.Searcher('test.db', voc) imshow(Image.open(imlist[920])) title(imlist[920]) axis('off') show() imagesearch = reload(imagesearch) src = imagesearch.Searcher('test.db', voc) print 'try a query...' print src.query(imlist[0]) imagesearch = reload(imagesearch) src = imagesearch.Searcher('test.db', voc) res = [w[1] for w in src.query(imlist[920])[:10]] print res imagesearch.plot_results(src, res) import imagesearchE imagesearchE = reload(imagesearchE) srcE = imagesearchE.Searcher('test.db', voc) nbr_results = 10 res = [w[1] for w in srcE.query(imlist[920])[:nbr_results]] print res imagesearchE.plot_results(srcE, res) import imagesearchE imagesearchE = reload(imagesearchE) src2 = imagesearchE.Searcher2('test.db', voc) nbr_results = 10 res = [w[1] for w in src2.query(imlist[920])[:nbr_results]] print res imagesearchE.plot_results(src2, res) print int(1.0)
Chapter-7/CV Book Ch 7 Exercise 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Wine # ### Introduction: # # This exercise is a adaptation from the UCI Wine dataset. # The only pupose is to practice deleting data with pandas. # # ### Step 1. Import the necessary libraries import pandas as pd import numpy as np # ### Step 2. Import the dataset from this [address](https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data). # ### Step 3. Assign it to a variable called wine # + url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data' wine = pd.read_csv(url) wine.head() # - # ### Step 4. Delete the first, fourth, seventh, nineth, eleventh, thirteenth and fourteenth columns # + wine = wine.drop(wine.columns[[0,3,6,8,11,12,13]], axis = 1) wine.head() # - # ### Step 5. Assign the columns as below: # # The attributes are (dontated by <NAME>, riclea '@' anchem.unige.it): # 1) alcohol # 2) malic_acid # 3) alcalinity_of_ash # 4) magnesium # 5) flavanoids # 6) proanthocyanins # 7) hue wine.columns = ['alcohol', 'malic_acid', 'alcalinity_of_ash', 'magnesium', 'flavanoids', 'proanthocyanins', 'hue'] wine.head() # ### Step 6. Set the values of the first 3 rows from alcohol as NaN wine.iloc[0:3, 0] = np.nan wine.head() # ### Step 7. Now set the value of the rows 3 and 4 of magnesium as NaN wine.iloc[2:4, 3] = np.nan wine.head() # ### Step 8. Fill the value of NaN with the number 10 in alcohol and 100 in magnesium # + wine.alcohol.fillna(10, inplace = True) wine.magnesium.fillna(100, inplace = True) wine.head() # - # ### Step 9. Count the number of missing values wine.isnull().sum() # ### Step 10. Create an array of 10 random numbers up until 10 random = np.random.randint(10, size = 10) random # ### Step 11. Use random numbers you generated as an index and assign NaN value to each of cell. wine.alcohol[random] = np.nan wine.head(10) # ### Step 12. How many missing values do we have? wine.isnull().sum() # ### Step 13. Delete the rows that contain missing values wine = wine.dropna(axis = 0, how = "any") wine.head() # ### Step 14. Print only the non-null values in alcohol mask = wine.alcohol.notnull() mask wine.alcohol[mask] # ### Step 15. Reset the index, so it starts with 0 again wine = wine.reset_index(drop = True) wine.head() # ### BONUS: Create your own question and answer it.
10_Deleting/Wine/Exercises_code_and_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data cleaning and merging # ## We had a very large geojson file (23mb) # # ![image.png](attachment:image.png) # ### Running this file really bogged down the system. I found <mapshaper.com> # ### This site allows you to lower the resolution and merge borders of a file to shrink it. # # http://www.icdzn.com/misc/mapshaper.gif # ## The new file is only 600kb # # ![image.png](attachment:image.png) # ### The next step was to append the related data from a .csv file into the geojson. # ### The output was logged to see what matched up and what did not. # ### Instances like 'United States' vs 'United States of America' had to be cleaned in order for the data to match. # # ![image.png](attachment:image.png) # ## LifeX was added in as a property of the feature. for each matching feature we aded a list of years and life expectancies. # # ![image.png](attachment:image.png) # ### A Flask was created but not used since we decided to move the charts to Tableau because of time contraints. This included a BMI calculator. # # ![image.png](attachment:image.png) # ## A choropleth was created in leaflet but was later moved to Tableau. # # ![image.png](attachment:image.png)
charts/JeffMD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pickle import pandas as pd import xgboost as xgb import numpy as np from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_sample_weight from sklearn.preprocessing import QuantileTransformer # # Cross-validation with hyperparameter tuning data = pd.read_csv(r"hk_eui_sample_github.csv",index_col=0,encoding='ISO-8859-1') y = data['kWh_m2_ann'].reset_index(drop=True) log_y = np.log10(y) log_y_category = pd.cut(log_y, bins=5) log_y_weight = compute_sample_weight('balanced',log_y_category) #Weight adjustment for i in range(len(log_y)): if log_y[i]<=1.2 or log_y[i]>2.25: log_y_weight[i] = log_y_weight[i]*32 elif log_y[i]>1.70 and log_y[i]<1.90: log_y_weight[i] = log_y_weight[i]/16 else: log_y_weight[i] = log_y_weight[i]*6 X = data[['log_hotel_kern','log_parking_ke','log_food_kerne','log_office_ker','log_retail_ker','DNI_HK','HKG_wind_s','tsd_mask','log_area', 'No_of_Stor','Year_Built','retro_before','retro_after','log_mtr_dist','is_commercial']].reset_index(drop=True) data_matrix = xgb.DMatrix(data=X, label=log_y, weight=log_y_weight) from typing import Tuple def r2(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]: y = dtrain.get_label() r2_value = r2_score(y, predt) return "r2", -r2_value from hyperopt import STATUS_OK, Trials, fmin, hp, tpe space = {'max_depth': hp.quniform("max_depth",3,10,1), 'learning_rate': hp.uniform("learning_rate", 0.1, 2), 'colsample_bytree': hp.uniform("colsample_bytree", 0.5, 1), 'min_child_weight': hp.quniform("min_child_weight", 1, 20, 1), 'reg_alpha': hp.quniform("reg_alpha", 0, 100,1), 'reg_lambda': hp.uniform("reg_lambda", 0, 2), 'subsample': hp.uniform("subsample",0.5, 1), 'min_split_loss': hp.uniform("min_split_loss", 0, 9), 'rate_drop': hp.uniform("rate_drop", 0, 1)} def fobjective(space): param_dict_tunning = {'max_depth': int(space['max_depth']), 'learning_rate': space['learning_rate'], 'colsample_bytree': space['colsample_bytree'], 'min_child_weight': int(space['min_child_weight']), 'reg_alpha': int(space['reg_alpha']), 'reg_lambda': space['reg_lambda'], 'subsample': space['subsample'], 'min_split_loss': space['min_split_loss'], 'booster': 'dart', 'rate_drop': space['rate_drop'], 'objective': 'reg:squarederror' } xgb_cv_result = xgb.cv(dtrain=data_matrix, params=param_dict_tunning, nfold=5, early_stopping_rounds=30, as_pandas=True, num_boost_round = 200, seed=8000, feval=r2) print ("rmse:", (xgb_cv_result["test-rmse-mean"]).tail(1).iloc[0]) print ("r2:", (xgb_cv_result["test-r2-mean"]).tail(1).iloc[0]) return {"loss": (xgb_cv_result["test-r2-mean"]).tail(1).iloc[0], "status": STATUS_OK} trials = Trials() best_hyperparams = fmin(fn=fobjective, space=space, algo=tpe.suggest, max_evals=200, trials=trials) best_hyperparams param_dict_tunning = {'objective': 'reg:squarederror', 'eval_metric': 'rmse', 'max_depth': int(best_hyperparams['max_depth']), 'reg_alpha': best_hyperparams['reg_alpha'],'reg_lambda':best_hyperparams['reg_lambda'], 'min_child_weight': best_hyperparams['min_child_weight'], 'colsample_bytree': best_hyperparams['colsample_bytree'], 'learning_rate': best_hyperparams['learning_rate'], 'subsample':best_hyperparams['subsample'], 'min_split_loss': best_hyperparams['min_split_loss'],'rate_drop':best_hyperparams['rate_drop'],'booster': 'dart'} xgb_cv_result = xgb.cv(dtrain=data_matrix, params=param_dict_tunning, nfold=5, early_stopping_rounds=30, as_pandas=True, num_boost_round = 200, seed=8000, feval=r2) xgb_cv_result xgb_model = None for train, test in KFold(n_splits=5, shuffle=True, random_state=8000).split(X, log_y): input_matrix = xgb.DMatrix(data=X.loc[train], label=log_y.loc[train], weight=log_y_weight[train]) test_matrix = xgb.DMatrix(data=X.loc[test], label=log_y.loc[test], weight=log_y_weight[test]) watchlist = [(test_matrix, 'eval'), (input_matrix, 'train')] xgb_model = xgb.train(params=param_dict_tunning, dtrain=input_matrix, num_boost_round=200, evals=watchlist, feval=r2, xgb_model=None, verbose_eval=False, early_stopping_rounds=30) log_y_pred = xgb_model.predict(test_matrix) log_y_true = log_y.loc[test].values accuracy_num = 0 for i in range(len(log_y_pred)): if log_y_true[i] > log_eui_median: if log_y_pred[i] > log_eui_median: accuracy_num = accuracy_num + 1 if log_y_true[i] <= log_eui_median: if log_y_pred[i] <= log_eui_median: accuracy_num = accuracy_num + 1 rmse_test = mean_squared_error(log_y.iloc[test], log_y_pred, squared=False) r2_test = r2_score(log_y.loc[test],log_y_pred) accuarcy_test = "{:.2%}".format(accuracy_num/len(log_y_pred)) print ('RMSE: %.4f'%rmse_test) print ('R2: %.4f'%r2_test) print ('Accuracy: ', accuarcy_test) log_y_pred = xgb_model.predict(test_matrix) # + fig, ax = plt.subplots(1,1,figsize=(6,6)) ax.scatter(log_y.iloc[test], log_y_pred, color='b', s=10) ax.plot([-10,10],[-10,10], color='k', linewidth=1) ax.set_xlim([0.5,3]) ax.set_ylim([0.5,3]) ax.tick_params(axis='both', which='major', labelsize=22) ax.set_title("XGBoost_test:R2=0.31", fontsize=22) ax.set_xlabel("log10_EUI_observation", fontsize=22) ax.set_ylabel("log10_EUI_prediction", fontsize=22) ax.xaxis.set_major_locator(plt.MaxNLocator(5)) ax.yaxis.set_major_locator(plt.MaxNLocator(5)) plt.subplots_adjust(left=0.2,right=0.9) plt.show() plt.close() # - # # Shaply Interpretation import shap y_pred_shap = xgb_model.predict(data_matrix,pred_contribs=True) shap_value_dep = y_pred_shap[:,:-1] X_name = ['log10_hotel_ker','log10_parking_ker','log10_food_ker','log10_office_ker','log10_retail_ker', 'DNI','wind_speed_10m','CDH','log10_area','No_of_Stor','year_built', 'retro_before_audit','retro_after_audit','log10_mtr_dist','is_commercial_land_use'] X_shap = [] for i in range(len(X_name)): X_shap.append(abs(shap_value_dep[:,i]).mean()) X_mean_shap = pd.DataFrame({'X_name':X_name, 'X_shap':X_shap}) shap.dependence_plot("HKG_wind_s",shap_value_dep, X, interaction_index='log_area')
xgb_hk_commerical_eui_estimator_github.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: engcomp # language: python # name: engcomp # --- # + [markdown] slideshow={"slide_type": "slide"} # # 03 Fundamentals # - # - Conditions # - Branching # - Loops # - Functions # - Objects # - Classes # + [markdown] slideshow={"slide_type": "slide"} # # Conditions # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/01_comparison_False_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/02_comparison_False_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/03_comparison_True_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/04_comparison_True_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/05_equality_True.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/06_equality_False.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/01_Conditions/07_inequality_test.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} a = 6 a # + slideshow={"slide_type": "slide"} a == 7 # + slideshow={"slide_type": "slide"} a == 6 # + slideshow={"slide_type": "slide"} i = 101 i > 100 # + slideshow={"slide_type": "slide"} i = 99 i > 100 # + slideshow={"slide_type": "slide"} "AC/DC" == "<NAME>" # + slideshow={"slide_type": "slide"} "AC/DC" != "<NAME>" # + [markdown] slideshow={"slide_type": "slide"} # ## Branching # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/01_if_False.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/02_if_True.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/03_else_False.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/04_else_True.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/05_elif_True.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/06_not_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/07_not_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/08_or_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/09_or_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/10_and_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/02_Branching/11_and_2.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} age=17 if age>=18: print("Enter") print("Move") # + slideshow={"slide_type": "subslide"} age=17 if age>=18: print("Enter") else: print("Meat Loaf") print("Move") # + slideshow={"slide_type": "subslide"} age=18 if age>18: print("Enter") elif age==18: print("Pink Floyd") else: print("Meat Loaf") print("Move") # + [markdown] slideshow={"slide_type": "slide"} # ## Logic Operators # + slideshow={"slide_type": "slide"} not (True) # + slideshow={"slide_type": "slide"} not (False) # + slideshow={"slide_type": "slide"} A = False B = True A or B # + slideshow={"slide_type": "slide"} A = False B = False A or B # + slideshow={"slide_type": "slide"} album_year = 1990 if (album_year<1980) or (album_year>1989): print("This 70's or 90's") else: print("This 80's") # + slideshow={"slide_type": "slide"} A = False B = True A and B # + slideshow={"slide_type": "slide"} A = True B = True A and B # + slideshow={"slide_type": "slide"} album_year = 1983 if (album_year>1979) and (album_year<1990): print("This 80's") # + [markdown] slideshow={"slide_type": "slide"} # # Loops # + [markdown] slideshow={"slide_type": "slide"} # ## range # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/01_range_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/02_range_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/03_range_3.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} range(3) # + slideshow={"slide_type": "slide"} list(range(3)) # + slideshow={"slide_type": "slide"} list(range(10,15)) # + [markdown] slideshow={"slide_type": "slide"} # ## for loops # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/04_for_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/05_for_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/06_for_3.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/07_for_4.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/08_for_5.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/09_for_6.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/10_for_7.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/11_for_enumerate_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/12_for_enumerate_2.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} squares_indexes = range(5) list(squares_indexes) # + slideshow={"slide_type": "subslide"} squares = ['red','yellow','green','purple','blue'] squares # + slideshow={"slide_type": "subslide"} print(f'Before squares {squares}') for i in range(5): print(f'Before square {i} is {squares[i]}') squares[i]="white" print(f'After square {i} is {squares[i]}') print(f'After squares {squares}') # + slideshow={"slide_type": "slide"} squares = ['red','yellow','green'] squares # + slideshow={"slide_type": "subslide"} for square in squares: print(square) # + slideshow={"slide_type": "subslide"} for i,square in enumerate(squares): print(f'index {i},square {square}') # + [markdown] slideshow={"slide_type": "slide"} # ## while loop # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/13_while_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/14_while_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/15_while_3.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/16_while_4.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/03_Loops/17_while_5.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} squares = ['orange','orange','purple','blue'] squares # + slideshow={"slide_type": "subslide"} newsquares =[] newsquares # + slideshow={"slide_type": "subslide"} i=0 i # + slideshow={"slide_type": "subslide"} while squares[i]=='orange': newsquares.append(squares[i]) i+=1 newsquares # + [markdown] slideshow={"slide_type": "slide"} # # Functions # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/01_function_input.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/02_function_output.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/03_blocks_of_code_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/04_blocks_of_code_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/05_blocks_of_code_3.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} def function(a): """add 1 to a""" b = a + 1 print(f'a + 1 = {b}') return b # + slideshow={"slide_type": "subslide"} function(3) # + slideshow={"slide_type": "subslide"} def f1(input): """add 1 to input""" output=input+1 return output # + slideshow={"slide_type": "subslide"} def f2(input): """add 2 to input""" output=input+2 return output # + slideshow={"slide_type": "subslide"} f1(1) f2(f1(1)) f2(f2(f1(1))) f1(f2(f2(f1(1)))) # + [markdown] slideshow={"slide_type": "slide"} # ## Built-in Functions # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/07_built_in_len.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/08_built_in_sum.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} album_ratings = [10.0,8.5,9.5] album_ratings # + slideshow={"slide_type": "subslide"} Length=len(album_ratings) Length # + slideshow={"slide_type": "subslide"} Sum=sum(album_ratings) Sum # + [markdown] slideshow={"slide_type": "slide"} # ## Sorted vs Sort # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/09_sorted.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/10_sort.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} print(f'Before album_ratings {album_ratings}') sorted_album_ratings=sorted(album_ratings) print(f'sorted_album_ratings {sorted_album_ratings}') print(f'After album_ratings {album_ratings}') # + slideshow={"slide_type": "slide"} print(f'Before album_ratings {album_ratings}') album_ratings.sort() print(f'After album_ratings {album_ratings}') # + [markdown] slideshow={"slide_type": "slide"} # ## Making Functions # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/11_define_call_function.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/12_docstring.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/14_multiply_integer_string.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/15_return_None.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/16_nowork.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/17_perform_more_tasks.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/18_loops_in_functions.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} def add1(a): """ add 1 to a """ b=a+1 return b # + slideshow={"slide_type": "subslide"} help(add1) # + slideshow={"slide_type": "subslide"} add1(5) # + slideshow={"slide_type": "subslide"} c=add1(10) c # + slideshow={"slide_type": "subslide"} def Mult(a,b): c=a*b return c # + slideshow={"slide_type": "subslide"} Mult(2,3) # + slideshow={"slide_type": "subslide"} Mult(2,'<NAME> ') # + slideshow={"slide_type": "subslide"} def MJ(): print('<NAME>') # + slideshow={"slide_type": "subslide"} MJ() # + slideshow={"slide_type": "subslide"} def NoWork(): pass # + slideshow={"slide_type": "subslide"} NoWork() # + slideshow={"slide_type": "subslide"} print(NoWork()) # + slideshow={"slide_type": "subslide"} def NoWork(): pass return None # + slideshow={"slide_type": "subslide"} NoWork() # + slideshow={"slide_type": "subslide"} print(NoWork()) # + slideshow={"slide_type": "slide"} def add1(a): b=a+1 print(f'{a} plus 1 equals {b}') return b # + slideshow={"slide_type": "subslide"} add1(2) # + slideshow={"slide_type": "slide"} def printStuff(Stuff): for i,s in enumerate(Stuff): print(f'Album {i} Rating is {s}') # + slideshow={"slide_type": "subslide"} album_ratings # + slideshow={"slide_type": "subslide"} printStuff(album_ratings) # + [markdown] slideshow={"slide_type": "slide"} # ## Collecting arguments # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/19_collect_arguments.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} def ArtistNames(*names): for name in names: print(f'Name {name}') # + slideshow={"slide_type": "subslide"} ArtistNames("<NAME>","AC/DC","<NAME>") # + slideshow={"slide_type": "subslide"} ArtistNames("<NAME>","AC/DC") # + [markdown] slideshow={"slide_type": "slide"} # ## Scope # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/20_global_variable_scope.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/21_local_variable_scope.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/22_variable_not_defined_within_function.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/04_Functions/23_global_keyword.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # ### Global Scope # + slideshow={"slide_type": "slide"} def AddDC(y): x =y+"DC" print(f'Local x {x}') return x x="AC" print(f'Global x {x}') z=AddDC(x) print(f'Global z {x}') # + [markdown] slideshow={"slide_type": "slide"} # ### Local Variables # + slideshow={"slide_type": "slide"} def Thriller(): Date=1982 return Date Thriller() # + slideshow={"slide_type": "slide"} # Date # NameError: name 'Date' is not defined # + slideshow={"slide_type": "slide"} Date = 2017 # + slideshow={"slide_type": "slide"} print(Thriller()) # + slideshow={"slide_type": "slide"} print(Date) # + slideshow={"slide_type": "slide"} def ACDC(y): print(f'Rating {Rating}') return Rating+y # + slideshow={"slide_type": "slide"} Rating=9 Rating # + slideshow={"slide_type": "slide"} z=ACDC(1) print(f'z {z}') # + slideshow={"slide_type": "slide"} print(f'Rating {Rating}') # + slideshow={"slide_type": "slide"} def PinkFloyd(): global ClaimedSales ClaimedSales = '45 million' return ClaimedSales # + slideshow={"slide_type": "slide"} PinkFloyd() # + slideshow={"slide_type": "slide"} print(f'ClaimedSales {ClaimedSales}') # + slideshow={"slide_type": "slide"} def type_of_album(artist, album, year_released): print(artist, album, year_released) if year_released > 1980: return "Modern" else: return "Oldie" x = type_of_album("<NAME>", "Thriller", 1980) print(x) # + [markdown] slideshow={"slide_type": "slide"} # # Objects # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/01_objects.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/02_int_objects.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/03_type_of_object.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/04_methods_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/05_methods_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/05_Objects/06_methods_3.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} type([1,34,3]) # + slideshow={"slide_type": "subslide"} type(1) # + slideshow={"slide_type": "subslide"} type("yellow") # + slideshow={"slide_type": "subslide"} type({"dog":1,"cat":2}) # + slideshow={"slide_type": "subslide"} Ratings = [10,9,6,5] Ratings # + slideshow={"slide_type": "subslide"} Ratings.sort() Ratings # + slideshow={"slide_type": "subslide"} Ratings.reverse() Ratings # + [markdown] slideshow={"slide_type": "slide"} # # Classes # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/01_class.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/02_attributes.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/03_create_class.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/04_Circle_Rectangle.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/05_Circle_objects.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/06_Rectangle_objects.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/07_class_instances.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/08_create_Circle_class.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/09_class_constructor.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/10_create_class_Circle_instance_1.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/11_create_class_Circle_instance_2.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/12_create_class_Circle_instance_3.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/13_attribute_value.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/14_change_attribute_value.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/15_change_circle_radius_method.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # ## Define Classes # + # Class Circle # Data Attributes radius,color class Circle(object): pass # + slideshow={"slide_type": "slide"} # Object 1: instance of type Circle # Data Attributes # radius=4 # color='red' # + # Object 2: instance of type Circle # Data Attributes # radius=2 # color='green' # + slideshow={"slide_type": "slide"} # Class Rectangle # Data Attributes width,height,color class Rectangle(object): pass # + slideshow={"slide_type": "slide"} # Object 1: instance of type Rectangle # Data Attributes # widrh=2 # height=2 # color='blue' # + # Object 2: instance of type Rectangle # Data Attributes # widrh=3 # height=1 # color='yellow' # + slideshow={"slide_type": "slide"} class Circle(object): def __init__(self,radius,color): self.radius = radius self.color = color # + slideshow={"slide_type": "slide"} class Rectangle(object): def __init__(self,height,width,color): self.height = height self.width = width self.color = color # + slideshow={"slide_type": "slide"} RedCircle = Circle(10,"red") print(f'RedCircle radius {RedCircle.radius}') print(f'RedCircle color {RedCircle.color}') # + slideshow={"slide_type": "subslide"} C1 = Circle(10,"blue") print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + slideshow={"slide_type": "subslide"} C1.color = 'yellow' print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + slideshow={"slide_type": "subslide"} C1.radius = 25 C1.color = 'green' print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + [markdown] slideshow={"slide_type": "slide"} # ## Methods # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/16_add_radius.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/17_call_add_radius_method.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/18_change_object_radius_value.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/19_default_parameters_values.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/20_class_Circle_objects.png", width=800, height=600>| # </p> # + [markdown] slideshow={"slide_type": "slide"} # <p align="center"> # <img src="../images/03_Fundamentals/06_Classes/21_dir_function.png", width=800, height=600>| # </p> # + slideshow={"slide_type": "slide"} # Method add_radius to change Circle size # + slideshow={"slide_type": "slide"} class Circle(object): def __init__(self,radius,color): self.radius = radius self.color = color def add_radius(self,r): self.radius = self.radius + r return self.radius def change_color(self,c): self.color = c return self.color def draw_circle(): pass # + slideshow={"slide_type": "slide"} C1=Circle(2,'red') print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + slideshow={"slide_type": "subslide"} C1.add_radius(8) print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + slideshow={"slide_type": "subslide"} C1.change_color('blue') print(f'C1 radius {C1.radius}') print(f'C1 color {C1.color}') # + slideshow={"slide_type": "slide"} dir(Circle) # + slideshow={"slide_type": "slide"} import matplotlib.pyplot as plt # %matplotlib inline # + slideshow={"slide_type": "slide"} class Circle(object): def __init__(self,radius,color): self.radius = radius self.color = color def add_radius(self,r): self.radius = self.radius + r return self.radius def change_color(self,c): self.color = c return self.color def draw_circle(self): plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color)) plt.axis('scaled') plt.show() # + slideshow={"slide_type": "slide"} RedCircle = Circle(1,'red') print(f'RedCircle radius {RedCircle.radius}') print(f'RedCircle color {RedCircle.color}') # + slideshow={"slide_type": "slide"} RedCircle.draw_circle() # + slideshow={"slide_type": "slide"} # Create a new Rectangle class for creating a rectangle object class Rectangle(object): # Constructor def __init__(self, width=2, height=3, color='r'): self.height = height self.width = width self.color = color # Method def draw_rectangle(self): plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color)) plt.axis('scaled') plt.show() # + slideshow={"slide_type": "slide"} SkinnyBlueRectangle = Rectangle(2, 10, 'blue') print(f'SkinnyBlueRectangle height {SkinnyBlueRectangle.height}') print(f'SkinnyBlueRectangle width {SkinnyBlueRectangle.width}') print(f'SkinnyBlueRectangle color {SkinnyBlueRectangle.color}') # + slideshow={"slide_type": "slide"} SkinnyBlueRectangle.draw_rectangle()
notebooks/03_Fundamentals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # RBM training $ L=100$ - Dataset with 29 different temperatures # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import stats from sklearn.model_selection import train_test_split import seaborn as sns import torch import torch.nn as nn torch.manual_seed(12) import sys sys.path.append('../modules') from rbm import RBM from mc_ising2d import IsingMC L = 100 # - # ## Loading Ising data # # The first step is to load the Ising data generated by Monte Carlo procedure. The file `L100_Ising2D_MC.pkl` was constructed considering a square lattice $L\times L$, with $L=100$. There are 1000 spins configurations for each of 29 different temperatures $ T/J = 1, 1.1, 1.2, ..., 3.5 $ and $ T/J = 2.259, 2.269, 2.279$. ising_data = pd.read_pickle('../data/L100_Ising2D_MC.pkl') ising_data.head() ising_data['temp'].value_counts() # In the thermodynamic limit $( L^2 \rightarrow \infty )$, the isotropic Ising model in a square lattice was analytically solved by [Lars Onsager](https://en.wikipedia.org/wiki/Lars_Onsager) in [1944](https://journals.aps.org/pr/abstract/10.1103/PhysRev.65.117). In this limit, the model show spontaneous magnetization for $ T < T_c$, with $T_c$ given by # # $$ \frac{T_c}{J} = \frac{2}{\log\left(1 + \sqrt{ 2}\right)} \approx 2.269185 \;. $$ # # With the class `IsingMC` we can check some thermodynamics quantities. IsingMC(L= 100).plot_thermodynamics(spin_MC= ising_data, Tc_scaled= False) data = ising_data.drop(columns= ['energy', 'magn']) # ### Constructing training and test sets # # Using `train_test_split` from [`sklearn`](http://scikit-learn.org/) it is easy to split the data into training and test sets. Since `train_test_split` is a random process and our data has 1000 samples for each of the 29 temperatures values, we split the data for each temperature in order to avoid the possibility of a biased split towards some temperature value. # + train_data_ = pd.DataFrame() test_data_ = pd.DataFrame() for _, temp in enumerate(data['temp'].value_counts().index.tolist()): train_data_T, test_data_T = train_test_split(data[data['temp'] == temp], test_size= 0.2, random_state= 12) train_data_ = pd.concat([train_data_, train_data_T]) test_data_ = pd.concat([test_data_, test_data_T]) # - # ### Training the model # # Our code implementing a Restricted Boltzmann Machine is written a python class called `RBM` which is imported from `rbm.py`. # # For simplification, the units have no bias and the RBM stochasticity parameter, represented below by $T$ is set to unity, as usual in most practical applications. Note that we set `use_cuda=True`, which makes use of [CUDA tensor types](https://pytorch.org/docs/stable/cuda.html), implementing GPU computation. If a GPU is not available, one should just set `use_cuda=False`. # + training_set = torch.Tensor(list(train_data_['state'])) training_set = training_set[torch.randperm(training_set.size()[0])] test_set = torch.Tensor(list(test_data_['state'])) lr = 0.001 k_learning = 1 batch_size = 100 nb_epoch = 2000 k_sampling = 1 rbm = RBM(num_visible= training_set.shape[1], num_hidden= training_set.shape[1], bias= False, T= 1.0, use_cuda= True) rbm.learn(training_set= training_set, test_set= test_set, lr= lr, nb_epoch= nb_epoch, batch_size= batch_size, k_learning= k_learning, k_sampling = k_sampling, verbose= 1) # - # ### Saving the trained model # + nb_epoch= rbm.num_train_epochs() Nv= training_set.shape[1] Nh= training_set.shape[1] name = 'RBM_model_T_complete_nv%d_nh%d_lr%.1E_k%d_bsize%d_nepochs%d' % (Nv, Nh, lr, k_learning, batch_size, nb_epoch) PATH = '../RBM_trained_models/'+ name + '.pt' torch.save(rbm, PATH) # - # ### Weights distribution # + W, v, h = rbm.parameters() del v del h torch.cuda.empty_cache() # + W_ = W.cpu().numpy().reshape((W.shape[0]*W.shape[1])) # Plot normalized histogram plt.hist(W_, bins= 1000, density= True) # Maximum and minimum of xticks to compute the theoretical distribution x_min, x_max = min(plt.xticks()[0]), max(plt.xticks()[0]) domain = np.linspace(x_min, x_max, len(W_)) # Fitting a normal distribution muW_, sigmaW_ = stats.norm.fit(W_) plot_pdf = stats.norm.pdf(domain, muW_, sigmaW_) # Fitting the PDF in the interval plt.plot(domain, plot_pdf, linewidth= 2.5, label= '$\mu= %f$ \n$\sigma$ = %f' % (muW_, sigmaW_ )) plt.title('Fitting a Normal Distribution for the weights ${\cal W}$') plt.xlim([-1, 1]) plt.legend() plt.show() # -
training_RBM/.ipynb_checkpoints/RBM_L100_train_dataset_V_complete-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/KathyRoma/DS-Unit-2-Kaggle-Challenge/blob/master/module2-random-forests/LS_DS_222_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="KxXJOuXf3YYD" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 2, Module 2* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Random Forests # # ## Assignment # - [ ] Read [“Adopting a Hypothesis-Driven Workflow”](http://archive.is/Nu3EI), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge. # - [ ] Continue to participate in our Kaggle challenge. # - [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. # - [ ] Try Ordinal Encoding. # - [ ] Try a Random Forest Classifier. # - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # ## Stretch Goals # # ### Doing # - [ ] Add your own stretch goal(s) ! # - [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection. # - [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/category_encoders/). # - [ ] Get and plot your feature importances. # - [ ] Make visualizations and share on Slack. # # ### Reading # # Top recommendations in _**bold italic:**_ # # #### Decision Trees # - A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_ # - [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2) # - [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/) # - [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html) # - [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) # # #### Random Forests # - [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods # - [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html) # - _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_ # # #### Categorical encoding for trees # - [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/) # - [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/) # - _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_ # - _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_ # - [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study) # - [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html) # # #### Imposter Syndrome # - [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/) # - [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068) # - ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html) # - _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_ # # # ### More Categorical Encodings # # **1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings: # # - **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package. # - **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/category_encoders/ordinal.html). # - **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](https://contrib.scikit-learn.org/category_encoders/onehot.html). # - **Binary Encoding:** We can use [category_encoders.BinaryEncoder](https://contrib.scikit-learn.org/category_encoders/binary.html). # # # **2.** The short video # **[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals. # # Category Encoders has multiple implementations of this general concept: # # - [CatBoost Encoder](https://contrib.scikit-learn.org/category_encoders/catboost.html) # - [Generalized Linear Mixed Model Encoder](https://contrib.scikit-learn.org/category_encoders/glmm.html) # - [James-Stein Encoder](https://contrib.scikit-learn.org/category_encoders/jamesstein.html) # - [Leave One Out](https://contrib.scikit-learn.org/category_encoders/leaveoneout.html) # - [M-estimate](https://contrib.scikit-learn.org/category_encoders/mestimate.html) # - [Target Encoder](https://contrib.scikit-learn.org/category_encoders/targetencoder.html) # - [Weight of Evidence](https://contrib.scikit-learn.org/category_encoders/woe.html) # # Category Encoder's mean encoding implementations work for regression problems or binary classification problems. # # For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example: # # ```python # encoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting # X_train_encoded = encoder.fit_transform(X_train, y_train=='functional') # X_val_encoded = encoder.transform(X_train, y_val=='functional') # ``` # # For this reason, mean encoding won't work well within pipelines for multi-class classification problems. # # **3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification. # # ```python # dirty_cat.TargetEncoder(clf_type='multiclass-clf') # ``` # It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090). # # However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe. # # **4. [Embeddings](https://www.kaggle.com/colinmorris/embedding-layers)** can work well with sparse / high cardinality categoricals. # # _**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categoricals. It’s an active area of research and experimentation — maybe you can make your own contributions!**_ # + [markdown] id="Z5sl224A3YYE" colab_type="text" # ### Setup # # You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below). # + colab_type="code" id="o9eSnDYhUGD7" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + colab_type="code" id="QJBD4ruICm1m" outputId="721de50f-a8a2-492c-9a02-1267ea863a7b" colab={"base_uri": "https://localhost:8080/", "height": 35} import pandas as pd from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') train.shape, test.shape # + id="VNGtaZLtWFo5" colab_type="code" colab={} import numpy as np # + id="oMQnJTh_2GN2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 765} outputId="c8395432-60fd-4292-e998-863253a03d46" train.sample(10) # + id="SEi_KMxn3YYM" colab_type="code" colab={} train, val = train_test_split(train, train_size=0.80, test_size=0.20, stratify=train['status_group'], random_state=42) def wrangle(X): """Wrangle train, validate, and test sets in the same way""" # Prevent SettingWithCopyWarning X = X.copy() # About 3% of the time, latitude has small values near zero, # outside Tanzania, so we'll treat these values like zero. X['latitude'] = X['latitude'].replace(-2e-08, 0) # When columns have zeros and shouldn't, they are like null values. # So we will replace the zeros with nulls, and impute missing values later. # Also create a "missing indicator" column, because the fact that # values are missing may be a predictive signal. cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'gps_height', 'population'] for col in cols_with_zeros: X[col] = X[col].replace(0, np.nan) X[col+'_MISSING'] = X[col].isnull() # Drop duplicate columns duplicates = ['quantity_group', 'payment_type'] X = X.drop(columns=duplicates) # Drop recorded_by (never varies) and id (always varies, random) unusable_variance = ['recorded_by', 'id'] X = X.drop(columns=unusable_variance) # Convert date_recorded to datetime X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) # Extract components from date_recorded, then drop the original column X['year_recorded'] = X['date_recorded'].dt.year X['month_recorded'] = X['date_recorded'].dt.month X['day_recorded'] = X['date_recorded'].dt.day X = X.drop(columns='date_recorded') # Engineer feature: how many years from construction_year to date_recorded X['years'] = X['year_recorded'] - X['construction_year'] X['years_MISSING'] = X['years'].isnull() # return the wrangled dataframe return X train = wrangle(train) val = wrangle(val) test = wrangle(test) # + colab_type="code" id="m2HppBvZgLVP" colab={} # The status_group column is the target target = 'status_group' # Get a dataframe with all train columns except the target train_features = train.drop(columns=[target]) # Get a list of the numeric features numeric_features = train_features.select_dtypes(include='number').columns.tolist() # Get a series with the cardinality of the nonnumeric features cardinality = train_features.select_dtypes(exclude='number').nunique() # Get a list of all categorical features with cardinality <= 50 categorical_features = cardinality[cardinality <= 50].index.tolist() # Combine the lists features = numeric_features + categorical_features # + colab_type="code" id="aXmK2brXgLVR" colab={} # Arrange data into X features matrix and y target vector X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] # + colab_type="code" id="57yyygsdgLVW" outputId="794ebdd6-bbf1-4840-e738-ed48414d8595" colab={"base_uri": "https://localhost:8080/", "height": 124} # %%time # WARNING: the %%time command sometimes ha quirks/bugs import category_encoders as ce from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(strategy='median'), RandomForestClassifier(random_state=0) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="IureAgJ849wR" colab_type="code" outputId="ab03edb8-32b3-4cb7-e741-ea9eab3b3963" colab={"base_uri": "https://localhost:8080/", "height": 35} encoder = pipeline.named_steps['onehotencoder'] encoded = encoder.transform(X_train) print('X_train shape after encoding', encoded.shape) # + id="0FHOyHv15co3" colab_type="code" outputId="a2705047-7451-4e27-a1e1-f71ffe944806" colab={"base_uri": "https://localhost:8080/", "height": 607} # %matplotlib inline import matplotlib.pyplot as plt # Get feature importances rf = pipeline.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, encoded.columns) # Plot top n feature importances n = 20 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(); # + colab_type="code" id="b8d_WJtcgLVZ" colab={} # Re-arrange data into X features matrix and y target vector, so # we use *all* features, including the high-cardinality categoricals X_train = train.drop(columns=target) y_train = train[target] X_val = val.drop(columns=target) y_val = val[target] X_test = test # + id="bPvh-nh2-eTf" colab_type="code" outputId="768a69e0-a002-4489-ad80-daa3f7f9d85b" colab={"base_uri": "https://localhost:8080/", "height": 69} # %%time # Ordinal encoding pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), RandomForestClassifier(random_state=9, n_jobs=-1) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="g7NAfUxf9ZD2" colab_type="code" colab={} # I try to improve accuracy by adjusting classifier's parameters args_values={'criterion':['gini', 'entropy'] ,'n_jobs':[-1] ,'min_samples_leaf':[1,3,5] , 'max_depth':[20,30,50]} # + id="szO5yWVy9zR9" colab_type="code" colab={} from itertools import product # + id="gEgTtHh89VdQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 812} outputId="bee8a0e4-fe7a-4c19-bad0-ad7347e64cd2" # I create a list of possible combinations of parameters I chose args_combs=[dict(zip(args_values, v)) for v in product(*args_values.values())] np.random.seed(42) np.random.shuffle(args_combs) print(len(args_combs)) args_combs # + id="JLDKdtVl-bts" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="0f32c1fe-b104-4cd5-e8fb-cd824a95b57f" # I count accuracy for different combination of parameters to see what gives me higher accuracy for i,ar in enumerate (args_combs): arg_comb=args_combs[i] pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), RandomForestClassifier(**arg_comb)) pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val), i, arg_comb) # + id="462YNO0FgMXJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="71b00421-7d50-4fdc-954a-036a8dcc512b" # This combination of parameters gives me the highest score pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), RandomForestClassifier(criterion = 'entropy', n_jobs=-1, min_samples_leaf=1, max_depth=30) ) # Fit on train, score on val pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) # + id="_FD8HdO9lXTE" colab_type="code" colab={} y_pred = pipeline.predict(X_test) submission = sample_submission.copy() submission['status_group'] = y_pred submission.to_csv('ER_KC_forest.csv', index=False) # + id="itwBcIb-l_Ez" colab_type="code" colab={} from google.colab import files files.download('ER_KC_forest.csv')
module2-random-forests/LS_DS_222_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Predict Wada Scores and compare with LI toolbox # ### import modules # + # %matplotlib inline import os import pickle import numpy as np import pandas as pd from scipy import stats from sklearn import metrics import matplotlib.pyplot as plt import seaborn as sns sns.set_style('ticks') # - # ### get data wilkeDf = pd.read_csv('../data/processed/li_tool/allWilkeData.csv',index_col=[0,1],header=0) wilkeDf.shape wilkeDf.head() # ## predict clinical judgements using Wilke LIs # ### split data into training and testing my_nest = pickle.load( open('../models/my_nest.p','rb')) my_test = pickle.load( open('../models/my_test.p','rb')) df_nest = wilkeDf.loc[my_nest].sort_index() df_test = wilkeDf.loc[my_test].sort_index() df_nest.shape,df_test.shape # ### get features and labels clfDict = pickle.load(open('../models/li_tool_clf.p','r')) myLabeler = pickle.load( open('../models/myLabeler.p','rb') ) # ### decide if inconclusive data should be included def dropInc(X,y,myLabeler): # here, we use the labels (y) to find and exclude inconclusive cases noInc = np.where(y!=myLabeler.transform(['inconclusive'])[-1]) # both X and y need to be changed X = X[noInc[0]] y = y[noInc[0]] return X,y X = np.array(df_test[df_test.columns[-1]]) y = np.array([myLabeler.transform([df_test.index.levels[0][x]]) for x in df_test.index.labels[0]]) print X.shape,y.shape Xclean,yclean = dropInc(X,y,myLabeler) print Xclean.shape,yclean.shape # ### predict the test data def makeYPreds(X_validate,clf,scaler,myLabeler): y_pred = [] for x in X_validate: # when there is no LI, we consider the case to be inconclusive if np.isnan(x).any(): pred = myLabeler.transform(['inconclusive'])[-1] # otherwise we apply the actual classifier else: scaledX = scaler.transform( x.reshape(1,-1) ) thisPred = clf.predict_proba(scaledX)[-1] pred = np.where(thisPred==thisPred.max())[-1][-1] # if there are only three classes, we tweak the labels if thisPred.shape[-1]==3 and pred>0: pred+=1 y_pred.append(pred) return np.array(y_pred) # Example: c = 'bootstrap' thisClf = clfDict[c][False] clf = thisClf['clf'] scaler = thisClf['scaler'] X_validate = df_test[c].values y_validate = myLabeler.transform([df_test[c].index.levels[0][x] for x in df_test[c].index.labels[0]]) y_pred = makeYPreds(X_validate,clf,scaler,myLabeler) y_pred # ### evaluate the results and make confusion matrix def makePred(df_test,c,clfDict,myLabeler,myDrop=True): thisClf = clfDict[c][myDrop] clf = thisClf['clf'] scaler = thisClf['scaler'] X_validate = df_test[c].values y_pred = makeYPreds(X_validate,clf,scaler,myLabeler) y_lab = myLabeler.inverse_transform(y_pred) df = pd.DataFrame(y_lab) df.index = df_test.index df.columns = [c] return df predsDf = pd.DataFrame() for c in clfDict.keys(): thisDf = makePred(df_test,c,clfDict,myLabeler,myDrop=True) predsDf = pd.concat([predsDf,thisDf],axis=1) predsDf.tail() def makeCm(df,c,myLabeler): # predictions of left-out data y_pred = df[c] y_true = [df.index.levels[0][x] for x in df.index.labels[0]] # confusion matrix -- input y and y_pred -- result: true in rows, pred in columns cm = metrics.confusion_matrix(y_true,y_pred) # put into df cmDf = pd.DataFrame(cm) # get the naming right try: theseLabels = np.unique(y_true) cmDf.index = theseLabels cmDf.columns = theseLabels except: theseLabels = myLabeler.classes_ cmDf.index = theseLabels cmDf.columns = theseLabels # make better order theseLabels = ['left','bilateral','right','inconclusive'] cmDf = cmDf.reindex(index=theseLabels) cmDf = cmDf.reindex(columns=theseLabels) cmDf.index = [['True label']*cmDf.shape[0],theseLabels] cmDf.columns = [['Predicted label']*cmDf.shape[1],theseLabels] return cmDf cmDf = makeCm(predsDf,'fixed-count',myLabeler) cmDf def makeDiagCm(cmDf,idx): # get diagonal diagDf = pd.DataFrame(np.diagonal(cmDf)).T diagDf.index=idx diagDf.columns=cmDf.columns return diagDf makeDiagCm(cmDf,[0]) def getAcc(cmDf): # get absolute count of hits (diagonal) n_diag = makeDiagCm(cmDf,[['total'],['total']]).sum(axis=1).values[-1] # get absolute count of all test data n_total = cmDf.sum().sum() # get proportion prop = n_diag/float(n_total) return pd.DataFrame([prop],index=['acc']).T getAcc(cmDf) def makeBigAccDf(predsDf,myLabeler,myDrop=True): # empty df to write to bigAccDf = pd.DataFrame() # for all LIs for c in predsDf.columns: # get the classifier and scaler objects associated with this LI, cmDf = makeCm(predsDf,c,myLabeler) #print cmDf,nInc # get the number of remaining conclusive cases if myDrop: # remove the true inconclusive cases dropDf = cmDf.drop('inconclusive',level=1,axis=0) # get number of cases that were classified as inconclusive nInc = dropDf['Predicted label']['inconclusive'].sum() # we ignore the predicted inconclusive cases in the 3-class case only, # for the four-class case they are of interest dropDf = dropDf.drop('inconclusive',level=1,axis=1) # get the number of conclusive cases n = dropDf.sum().sum() accDf = getAcc(dropDf) accDf.set_value(accDf.index[0],'nInc',nInc) accDf.set_value(accDf.index[0],'n',n) else: accDf = getAcc(cmDf) n = cmDf.sum().sum() nInc = predsDf.shape[0]-n accDf.set_value(accDf.index[0],'nInc',nInc) accDf.set_value(accDf.index[0],'n',n) accDf.index = [c] # add to big df bigAccDf = pd.concat([bigAccDf,accDf]) bigAccDf.sort_values(by='acc',ascending=False,inplace=True) return bigAccDf cmDf bigAllAccDf = makeBigAccDf(predsDf,myLabeler,myDrop=True) bigAllAccDf # ### add own predictions def makeDf(dimStr, dropStr, my_split=my_test): df = pd.DataFrame() for pGroup, pName in my_split: csv = '../data/processed/csv/meanTable_%s.csv' % pName pDf = pd.read_csv(csv, index_col=[0, 1], header=0) pDf = pDf.loc[dimStr, :].loc[[dropStr], :] pDf.index = [[pGroup], [pName]] df = pd.concat([df, pDf]) df.sort_index(inplace=True) df.drop(['L+R', 'L-R', 'LI'], axis=1, inplace=True) if dropStr == 'drop': df.drop('inconclusive', axis=0, inplace=True) df.drop('inconclusive', axis=1, inplace=True) return df testDf = makeDf('2d', 'full') twoAllDf = pd.DataFrame(testDf.idxmax(axis=1)) twoAllDf.columns = ['2d'] twoAllDf.sort_index(inplace=True) twoAllDf.tail() twoAllDf.shape predsDf = pd.concat([predsDf,twoAllDf],axis=1) predsDf.drop(['bootstrap-weighted','fixed-mean','bootstrap-trimmed'],axis=1,inplace=True) predsDf.tail() predsDf.iloc[:,1:3].stack()[predsDf.iloc[:,1:3].stack()=='inconclusive'] bigAllAccDf = makeBigAccDf(predsDf,myLabeler,myDrop=False) bigAllAccDf bigConcAccDf = makeBigAccDf(predsDf,myLabeler,myDrop=True) bigConcAccDf # ### plot figure # #### get guessing rate # for the four classes pGuess = round(wilkeDf.groupby(level=0).count().max().max()/float(wilkeDf.shape[0]),2) pGuess # only for conclusive cases wilkeConcDf = wilkeDf.drop('inconclusive',level=0) pGuessConc = round(wilkeConcDf.groupby(level=0).count().max().max()/float(wilkeConcDf.shape[0]),2) pGuessConc # #### compute confidence interval for binomial distribution from scipy.stats import beta def binom_interval(success, total, confint=0.95): #https://gist.github.com/paulgb/6627336 quantile = (1 - confint) / 2. lower = beta.ppf(quantile, success, total - success + 1) upper = beta.ppf(1 - quantile, success + 1, total - success) return np.array([lower, upper]) binom_interval(.9*100,100,confint=0.95) def binomCI(x,n,alpha): thisCi = stats.binom.interval(alpha,n,x) return np.array(thisCi)/float(n) # Example: binomCI(0.9,100,alpha=0.95) print stats.binom_test(90,100,p=0.82) print stats.binom_test(90,100,p=0.83) # #### compute confidence intervals for all predictions def addCIs(bigDf,alpha): d = {} for i in bigDf.index: thisDf = bigDf.loc[i] thisAcc = thisDf['acc'] thisN = thisDf['n'] #print i,thisAcc,thisN thisCI = binom_interval(thisAcc*thisN,thisN,confint=alpha) d[i] = thisCI df = pd.DataFrame(d).T df.columns = ['lowCI','upCI'] ciDf = pd.concat([bigDf,df],axis=1) ciDf.sort_values(by='acc',inplace=True) return ciDf # Example: ciDf = addCIs(bigConcAccDf,alpha=0.84) ciDf # #### make the plot sns.set_style('white') sns.set_context('poster') def makePlot(df,pGuess,ax): ciDf = addCIs(df,alpha=0.95) yRange = range(ciDf.shape[0]) xVals = ciDf.acc.values xErr = abs(ciDf[['lowCI','upCI']].values.T - ciDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=4,color=(0.6,0.6,1)) myDf = ciDf.loc[ [x for x in ciDf.index if '2d' in x] ] yRange = [n for n,x in enumerate(ciDf.index) if '2d' in x] xVals = myDf.acc.values xErr = abs(myDf[['lowCI','upCI']].values.T - myDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=4,color=(1,0.6,0.6)) myDf = ciDf.loc[ [x for x in ciDf.index if '+' in x] ] yRange = [n for n,x in enumerate(ciDf.index) if '+' in x] xVals = myDf.acc.values xErr = abs(myDf[['lowCI','upCI']].values.T - myDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=4,color=(1,0.6,0.6)) ciDf = addCIs(df,alpha=0.84) yRange = range(ciDf.shape[0]) xVals = ciDf.acc.values xErr = abs(ciDf[['lowCI','upCI']].values.T - ciDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=6,color=(0.5,0.5,1)) myDf = ciDf.loc[ [x for x in ciDf.index if '2d' in x] ] yRange = [n for n,x in enumerate(ciDf.index) if '2d' in x] xVals = myDf.acc.values xErr = abs(myDf[['lowCI','upCI']].values.T - myDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=6,color=(1,0.5,0.5)) myDf = ciDf.loc[ [x for x in ciDf.index if '+' in x] ] yRange = [n for n,x in enumerate(ciDf.index) if '+' in x] xVals = myDf.acc.values xErr = abs(myDf[['lowCI','upCI']].values.T - myDf.acc.values) ax.errorbar(xVals,yRange,xerr=xErr,marker='o',linewidth=0,elinewidth=6,color=(1,0.5,0.5)) ax.set_yticks( range(ciDf.shape[0]) ) ax.set_yticklabels( ciDf.index ) ax.axvline(pGuess,linewidth=1,linestyle='dashed',c='k') ax.set_xlabel('accuracy') return ax sns.set_style('ticks') # + fig,(ax1,ax2) = plt.subplots(2,1,figsize=(6,6)) ax1 = makePlot(bigConcAccDf,pGuessConc,ax1) ax1.set_title('without inconclusive cases',y=1.1) ax1.set_xlim(0.6,1.05) ax1.set_ylim(-0.5,3.5) ax2 = makePlot(bigAllAccDf,pGuess,ax2) ax2.set_title('with inconclusive cases',y=1.1) ax2.set_xlim(0.6,1.05) ax2.set_ylim(-0.5,3.5) sns.despine(left=True,offset=10,trim=True) plt.tight_layout() fig.savefig('../reports/figures/15-comparison-with-li-tool.png',dpi=300,bbox_inches='tight') plt.show() # - # ## make Wada Predictions # ### get wada wadaDataDf = pd.read_csv('../data/processed/csv/wada_clean.csv',index_col=[0],header=0) wadaDataDf = wadaDataDf.loc[:,['wada']] wadaDataDf.loc[:,'name'] = wadaDataDf.index wadaDataDf.tail() wadaDataDf.shape # #### make sure no wada patients were in the train set # # Here, we do not want to get any output for n in [x[1] for x in my_nest]: for j in wadaDataDf.index: if j in n: print n,j # #### make sure all wada patients are in the test set # # Here, we do not want to get any output for j in wadaDataDf.index: in_test = False for n in [x[1] for x in my_test]: if j in n: in_test = True if not in_test: print j # ### get all the information into one table predsDf['name'] = [x.split('_')[0] for x in predsDf.index.get_level_values(1)] predsDf['ixName'] = [x for x in predsDf.index.get_level_values(1)] predsDf['clin'] = [x for x in predsDf.index.get_level_values(0)] predsDf.index = pd.MultiIndex.from_arrays([predsDf.clin,predsDf.ixName]) wadaPredsDf = pd.merge(predsDf,wadaDataDf,left_on='name',right_on='name') wadaPredsDf.index = pd.MultiIndex.from_arrays([wadaPredsDf['wada'],wadaPredsDf['name']]) wadaPredsDf.sort_index(inplace=True) wadaPredsDf.tail() # ### distribution of values wadaPredsDf.groupby('clin').count()['ixName'] wadaPredsDf.groupby('clin').count()['ixName'].sum() wadaPredsDf.shape wadaUniquePats = np.unique(wadaPredsDf.index.get_level_values(1)).shape[-1] wadaUniquePats # + [markdown] variables={" wadaPredsDf.shape[0] ": {}, " wadaUniquePats ": {}} # For validation, {{ wadaPredsDf.shape[0] }} fMRI data sets of {{ wadaUniquePats }} patients who also underwent Wada testing were used. # - # #### counts of the 3 groups wadaCountDf = pd.DataFrame( wadaPredsDf.groupby(level=1).first().groupby('wada').count()['ixName'] ) wadaCountDf # #### percentages of the 3 groups (wadaCountDf/wadaCountDf.sum()*100).round(0) # ### get all inconclusive cases wadaIncPreds = wadaPredsDf[wadaPredsDf['clin']=='inconclusive'].copy() wadaIncPreds.drop(['name','ixName','clin','wada'],axis=1,inplace=True) wadaIncPreds.dropna(inplace=True) wadaIncPreds.shape wadaIncPreds.tail() # #### stats for inconclusive bigIncAccDf = makeBigAccDf(wadaIncPreds,myLabeler,myDrop=True) bigIncAccDf # guessing rate wadaIncDf = wadaPredsDf[wadaPredsDf['clin']=='inconclusive'] pWadaIncGuess = (wadaIncDf['wada']=='left').sum()/float(wadaIncDf.shape[0]) pWadaIncGuess # ### same thing for conclusive cases wadaConcPreds = wadaPredsDf[wadaPredsDf['clin']!='inconclusive'].copy() wadaConcPreds.drop(['name','ixName','clin','wada'],axis=1,inplace=True) wadaConcPreds.dropna(inplace=True) print wadaConcPreds.shape bigConcAccDf = makeBigAccDf(wadaConcPreds,myLabeler,myDrop=True) bigConcAccDf wadaConcDf = wadaPredsDf[wadaPredsDf['clin']!='inconclusive'] pWadaConcGuess = (wadaConcDf['wada']=='left').sum()/float(wadaConcDf.shape[0]) pWadaConcGuess # ### plot # + minX,maxX = .0,1.05 fig,(ax1,ax2) = plt.subplots(2,1,figsize=(6,7)) ax1 = makePlot(bigIncAccDf-(10**-10),pWadaIncGuess,ax1) ax1.set_title('Wada prediction: only inconclusive cases',y=1.1) ax1.set_ylim(-0.5,3.5) ax1.set_xlim(minX,maxX) ax2 = makePlot(bigConcAccDf-(10**-10),pWadaConcGuess,ax2) ax2.set_xlim(minX,maxX) ax2.set_ylim(-0.5,3.5) ax2.set_title('Wada prediction: only conclusive cases',y=1.1) sns.despine(left=True,offset=10,trim=True) plt.tight_layout() plt.show() # - # ### Whole sample pWadaGuess = (wadaPredsDf['wada']=='left').sum()/float(wadaPredsDf.shape[0]) pWadaGuess # #### combine both approaches in the unflitered sample (conc+inconc) wadaAllPreds = wadaPredsDf.copy() wadaAllPreds.drop(['name','ixName','clin','wada'],axis=1,inplace=True) wadaAllPreds.shape bigAllAccDf = makeBigAccDf(wadaAllPreds,myLabeler,myDrop=True) bigAllAccDf # + wadaSelectPreds = wadaPredsDf.copy() wadaSelectPreds = wadaSelectPreds[wadaSelectPreds['2d']!='inconclusive'] wadaSelectPreds.drop(['name','ixName','clin','wada'],axis=1,inplace=True) wadaSelectPreds.shape bigSelectAccDf = makeBigAccDf(wadaSelectPreds,myLabeler,myDrop=True) bigSelectAccDf.index = ['2d+%s'%x for x in bigSelectAccDf.index ] bigSelectAccDf.drop('2d+2d',axis=0,inplace=True) bigSelectAccDf # - bigBigAccDf = pd.concat([bigAllAccDf,bigSelectAccDf]).sort_values(by='acc') bigBigAccDf # ### plot # + minX,maxX = .0,1.05 fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 10)) ax1 = plt.subplot2grid((17, 1), (0, 0), rowspan=5) ax1 = makePlot(bigConcAccDf-(10**-10),pWadaConcGuess,ax1) ax1.set_xlim(minX,maxX) ax1.set_ylim(-0.5,3.5) ax1.set_title('without inconclusive cases',y=1.1) ax2 = plt.subplot2grid((17, 1), (5, 0), rowspan=7) ax2 = makePlot(bigBigAccDf-(10**-10),pWadaGuess,ax2) ax2.set_xlim(minX,maxX) ax2.set_ylim(-0.5,6.5) ax2.set_xlabel('') ax2.set_title('with inconclusive cases',y=1.1) ax3 = plt.subplot2grid((17, 1), (12, 0), rowspan=5) ax3 = makePlot(bigIncAccDf-(10**-10),pWadaIncGuess,ax3) ax3.set_title('inconclusive cases only',y=1.1) ax3.set_ylim(-0.5,3.5) ax3.set_xlabel('') ax3.set_xlim(minX,maxX) sns.despine(left=True,offset=10,trim=True) plt.tight_layout() #plt.suptitle('Wada prediction',y=1.05,x=0.64) fig.savefig('../reports/figures/15-wada.png',dpi=300,bbox_inches='tight') plt.show() # - # ## show mistakes # # Plot the instances where left and right were confused myBigMistakes = pd.DataFrame() sideList = np.array(['left','right']) for side in sideList: otherSide = sideList[sideList!=side][-1] print side,otherSide thisBigMistakes = wadaPredsDf.loc[side][wadaPredsDf.loc[side]['2d']==otherSide] thisBigMistakes.index = [[side]*thisBigMistakes.shape[0],thisBigMistakes.index] myBigMistakes = pd.concat([myBigMistakes,thisBigMistakes]) myBigMistakes from nilearn import plotting for i in range(myBigMistakes.shape[0]): print '\n***************************' thisDf = myBigMistakes.iloc[i,:] thisFile = '../data/raw/nii/tMap_%s.nii'%thisDf['ixName'] plotting.plot_stat_map(thisFile,threshold=3,title=thisDf['name'],cut_coords=(-50,15,15)) plt.show() print myBigMistakes
notebooks/15-mw-validation-with-wada.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Performance Analysis with Alphalens # ## Imports & Settings import warnings warnings.filterwarnings('ignore') # + from pathlib import Path from collections import defaultdict from time import time import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from alphalens.tears import (create_returns_tear_sheet, create_summary_tear_sheet, create_full_tear_sheet) from alphalens import plotting from alphalens import performance as perf from alphalens import utils # - sns.set_style('whitegrid') np.random.seed(42) idx = pd.IndexSlice DATA_STORE = Path('..', 'data', 'assets.h5') # ## Alphalens Analysis # ### Get trade prices def get_trade_prices(tickers): return (pd.read_hdf(DATA_STORE, 'quandl/wiki/prices') .loc[idx['2006':'2017', tickers], 'adj_open'] .unstack('ticker') .sort_index() .shift(-1) .tz_localize('UTC')) trade_prices = get_trade_prices(tickers) trade_prices.info() # ### Load factors factors = (pd.concat([pd.read_hdf('data.h5', 'factors/common'), pd.read_hdf('data.h5', 'factors/formulaic') .rename(columns=lambda x: f'alpha_{int(x):03}')], axis=1) .dropna(axis=1, thresh=100000) .sort_index()) factors.info() tickers = factors.index.get_level_values('ticker').unique() alpha = 'alpha_054' factor = (factors[alpha] .unstack('ticker') .stack() .tz_localize('UTC', level='date') .sort_index()) # ### Generate Alphalens input data factor_data = utils.get_clean_factor_and_forward_returns(factor=factor, prices=trade_prices, quantiles=5, max_loss=0.35, periods=(1, 5, 10)).sort_index() factor_data.info() # ### Compute Metrics # + mean_quant_ret_bydate, std_quant_daily = perf.mean_return_by_quantile( factor_data, by_date=True, by_group=False, demeaned=True, group_adjust=False, ) mean_quant_rateret_bydate = mean_quant_ret_bydate.apply( rate_of_return, base_period=mean_quant_ret_bydate.columns[0], ) compstd_quant_daily = std_quant_daily.apply(std_conversion, base_period=std_quant_daily.columns[0]) alpha_beta = perf.factor_alpha_beta(factor_data, demeaned=True) mean_ret_spread_quant, std_spread_quant = perf.compute_mean_returns_spread( mean_quant_rateret_bydate, factor_data["factor_quantile"].max(), factor_data["factor_quantile"].min(), std_err=compstd_quant_daily, ) # - mean_ret_spread_quant.mean().mul(10000).to_frame('Mean Period Wise Spread (bps)').join(alpha_beta.T).T # ### Plot spread and cumulative returns # + fig, axes = plt.subplots(ncols=3, figsize=(20, 5)) mean_quant_ret, std_quantile = mean_return_by_quantile(factor_data, by_group=False, demeaned=True) mean_quant_rateret = mean_quant_ret.apply(rate_of_return, axis=0, base_period=mean_quant_ret.columns[0]) plot_quantile_returns_bar(mean_quant_rateret, ax=axes[0]) factor_returns = perf.factor_returns(factor_data) title = "Factor Weighted Long/Short Portfolio Cumulative Return (1D Period)" plotting.plot_cumulative_returns(factor_returns['1D'], period='1D', freq=pd.tseries.offsets.BDay(), title=title, ax=axes[1]) plotting.plot_cumulative_returns_by_quantile(mean_quant_ret_bydate['1D'], freq=pd.tseries.offsets.BDay(), period='1D', ax=axes[2]) fig.tight_layout() fig.savefig('figures/alphalens', dpi=300) # - # ### Create Tearsheet create_summary_tear_sheet(factor_data)
24_alpha_factor_library/05_alphalens_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="cdRFJhJrBgOK" # !pip install dicom2nifti # + id="X-tBTlI8BrEy" import dicom2nifti import nibabel import matplotlib.pyplot as plt # + id="J3NpL5CyB8MX" import os from google.colab import drive drive.mount('/content/drive') # + id="y3BRXPtxCArG" os.chdir("/content/drive/My Drive/DLMIP/") os.listdir() # + id="CskIrNJnBvm-" path_to_dicom = 'SE000001/' dicom2nifti.convert_directory(path_to_dicom, '.') # + id="RUImrJdFCWDV" nifti = nibabel.load('201_t2w_tse.nii.gz') print(nifti) # + colab={"base_uri": "https://localhost:8080/"} id="qBS5K_aDFmcW" outputId="ab600c37-a1f6-43c5-8329-60ec9ed2b334" nifti.header['qoffset_x'] # + id="_EjFs5MDGYyn" image_array = nifti.get_fdata() # + colab={"base_uri": "https://localhost:8080/", "height": 595} id="UhJb9bzIGkWk" outputId="422c73af-680c-4e14-a4cf-7c528af7b222" fig , axis = plt.subplots(3,3,figsize=(10,10)) slice_counter = 0 for i in range(3): for j in range(3): axis[i][j].imshow(image_array[:,:,slice_counter],cmap='gray') slice_counter+=1 # + id="61Nz12MnIiK4" image_array_processed = image_array * (image_array>300) # + colab={"base_uri": "https://localhost:8080/", "height": 538} id="-qaq-_LPZh8-" outputId="80396af8-a91f-43d2-f42f-69d963c74900" plt.figure() plt.imshow(image_array[:,:,20], cmap='gray') plt.figure() plt.imshow(image_array_processed[:,:,20], cmap= 'gray') # + id="bkrB1SeKZiyo" processed_nifti = nibabel.Nifti1Image(image_array_processed, nifti.affine) # + id="jwrZboaMcq4F" nibabel.save(processed_nifti,'201_t2w_tse_processed.nii.gz') # + id="DAde8KkEgir4" nifti_2 = nibabel.load('201_t2w_tse_processed.nii.gz') image_array2 = nifti_2.get_fdata() # + colab={"base_uri": "https://localhost:8080/", "height": 595} id="7PPReEGugH73" outputId="2eebeec2-f42f-4284-bef5-db4bc782d0a3" fig , axis = plt.subplots(4,4,figsize=(10,10)) slice_counter = 0 for i in range(4): for j in range(4): axis[i][j].imshow(image_array2[:,:,slice_counter],cmap='gray') slice_counter+=1 # + id="ow97CIGQKTA3"
NIfTI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import libpysal as ps from mgwr.gwr import GWR, MGWR from mgwr.sel_bw import Sel_BW import geopandas as gp import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd #Load Georgia dataset and generate plot of Georgia counties (figure 1) georgia_data = pd.read_csv(ps.examples.get_path('GData_utm.csv')) georgia_shp = gp.read_file(ps.examples.get_path('G_utm.shp')) fig, ax = plt.subplots(figsize=(10,10)) georgia_shp.plot(ax=ax, **{'edgecolor':'black', 'facecolor':'white'}) georgia_shp.centroid.plot(ax=ax, c='black') # + #Prepare Georgia dataset inputs g_y = georgia_data['PctBach'].values.reshape((-1,1)) g_X = georgia_data[['PctFB', 'PctBlack', 'PctRural']].values u = georgia_data['X'] v = georgia_data['Y'] g_coords = list(zip(u,v)) g_X = (g_X - g_X.mean(axis=0)) / g_X.std(axis=0) g_y = g_y.reshape((-1,1)) g_y = (g_y - g_y.mean(axis=0)) / g_y.std(axis=0) # + #Calibrate GWR model gwr_selector = Sel_BW(g_coords, g_y, g_X) gwr_bw = gwr_selector.search(bw_min=2) print(gwr_bw) gwr_results = GWR(g_coords, g_y, g_X, gwr_bw).fit() # - gwr_results.params[0:5] gwr_results.localR2[0:10]
notebooks/GWR_Georgia_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise Sheet 02 (Connectionist Neurons and Multi Layer Perceptrons) # # ## Group: ALT # # ### Exercise H2.1: Connectionist Neuron # + import math import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns # %matplotlib inline # + #importing and visualizing the data ds = pd.read_csv('applesOranges.csv') ds.columns = ['x1','x2','y'] ds.head() # - # y= 0 indicates that the sample is that of an “apple”. # # y= 1 assigns an observation to “orange”. # #### a) Plot the data in a scatter plot (x2vs.x1). Mark the points with different colors to indicate the type of each object. # + #Separating apples and oranges y0 = ds[ds.y == 0] #apples y1 = ds[ds.y == 1] #oranges fig = plt.figure() ax1 = fig.add_subplot(111) ax1.scatter(y0.x1,y0.x2,s=10, c='r', marker="s", label='apple') ax1.scatter(y1.x1,y1.x2,s=10, color='orange', marker="o" ,label='orange') plt.xlabel('$x_1$') plt.ylabel('$x_2$', rotation='horizontal') plt.title("Plot of $x_2$ vs $x_1$") plt.legend(loc='best'); plt.show() # - # #### b) For each of these weight w # (i) determine % correct classifications ρ of the corresponding neuron and # # (ii) plot a curve showing ρ as a function of γ # + #parameters to be used in the connectionist neuron theta = 0. gamma = np.linspace(0,180,19) gamma_rad = np.radians(gamma) w = np.vstack((np.cos(gamma_rad),np.sin(gamma_rad))).T #prepend the bias w = np.insert(w, 0, theta, axis=1) inputs = np.vstack((ds.x1,ds.x2)).T #prepend the bias x0 = 1 inputs = np.insert(inputs, 0, 1., axis=1).T tot_input = np.matmul(w,inputs) # + y_tot = np.heaviside(tot_input, 1) y_tot #each line in y_tot corresponds to the predicted value of all y from the data #for that specific pair of weights #First line: first pair of weights applied to the entire data yT = np.asarray(ds.y) #True values of y # + equals = (yT==y_tot) equals #which of the calculated outputs are equal to the real outputs # + rho_partial = np.sum(equals, axis =1) rho = (rho_partial/200.)*100 #i-th entry corresponds to the % of right results of the i-th pair of weights plt.plot(gamma,rho) plt.xlabel('$\gamma$ $(^{o})$') plt.ylabel(r'$\rho$ (%)', rotation='horizontal') plt.title(r"Plot of $\rho$ vs $\gamma$") plt.show() # - rho # #### c) Out of the 19 weight vectors from above, pick the w that yields the best performance. # w that yields best performance max_arg = np.argmax(rho) max_w = w[max_arg] print(max_w[1:]) # #### Now,vary the bias θ∈[−3,3] and pick the value of θ that gives the best performance # + theta_vec = -(np.linspace(-3,3,19)) w_line = np.tile(max_w[1:], (19, 1)) theta_w = np.column_stack((theta_vec,w_line)) theta_w # + #multiplying the inputs and the matrix of weights tot_input_theta = np.matmul(theta_w,inputs) #inserting into the heaviside function y_tot_theta = np.heaviside(tot_input_theta, 1) y_tot_theta # + #comparing with the true value for y equals_theta = (yT==y_tot_theta) equals_theta # + #determining the correct classifications rho_partial_theta = np.sum(equals_theta, axis =1) rho_theta = (rho_partial_theta/200.)*100 rho_theta # + #choosing the theta that maximizes the percentage of correct classifications max_arg_theta = np.argmax(rho_theta) max_w_theta = theta_w[max_arg_theta] -max_w_theta[0] # - # Which means that higher performance is achieved with: # # w = [0.93969262 , 0.34202014] and # # $\theta$ = 0.333. # #### d) Plot the data points and color them according to the predicted classification when using the w and θ that led to the highest performance. Plot the weight vector w in the same plot. How do you interpret your results? input_end = np.matmul(max_w_theta,inputs) y_pred = np.heaviside(input_end, 1) # + data_pred = np.vstack((ds.x1,ds.x2, y_pred)) apples = np.array((data_pred[0][data_pred[2]==0],data_pred[1][data_pred[2]==0])) oranges = np.array((data_pred[0][data_pred[2]==1],data_pred[1][data_pred[2]==1])) # + fig2 = plt.figure() ax2 = fig2.add_subplot(111) ax2.scatter(apples[0],apples[1],s=10, c='r', marker="s", label='apple') ax2.scatter(oranges[0],oranges[1],s=10, color='orange', marker="o" ,label='orange') plt.xlabel('$x_1$') plt.ylabel('$x_2$', rotation='horizontal') plt.title("Plot of $x_2$ vs $x_1$ with the predicted classifications") plt.arrow(0,0, max_w_theta[1],max_w_theta[2],length_includes_head=True, head_width=0.08, head_length=0.2 ) plt.legend(loc='best'); plt.show() # - # _Interpretation:_ # The weight vector $\mathbf{w}$ is the normal vector of the hyperplane. Therefore, $\mathbf{w}$ represents the orientation of the hyperplane. As per convention, $\mathbf{w}$ points in the direction of y_pred = 1. # #### e) Find the best combination of w and θ by exploring all combinations of γ and θ (within a reasonable range and precision). Compute and plot the performance of all combinations in a heatmap. # + size = 100 gamma = np.linspace(0,180,size) gamma_rad = np.radians(gamma) w = np.vstack((np.cos(gamma_rad),np.sin(gamma_rad))).T theta_vec = np.linspace(-3,3,size) # + from matplotlib.ticker import FormatStrFormatter combined = np.zeros((size,size)) for i in range(size): for j in range(size): weight = np.insert(w[i], 0, -theta_vec[j], axis=0) temp_input = np.matmul(weight,inputs) temp_y = np.heaviside(temp_input, 1) temp_equals = (yT==temp_y) temp_performance = np.sum(temp_equals, axis =0) performance = (temp_performance/200.)*100 combined[i][j] = performance num_ticks = 5 # the index of the position of yticks yticks = np.linspace(0, size - 1, num_ticks, dtype=np.int) xticks = np.linspace(0, size - 1, num_ticks, dtype=np.int) # the content of labels of these yticks xticklabels = [np.round(theta_vec[idx]) for idx in yticks] yticklabels = [np.round(gamma_rad[idx]) for idx in xticks] ax = sns.heatmap(combined, yticklabels=yticklabels, xticklabels=xticklabels) ax.set_yticks(yticks) ax.set_xticks(xticks) ax.set_ylabel('$\gamma$(rad)') ax.set_xlabel(r'$\theta$') ax.set_title(r'Performance of all combinations of $\gamma$ and $\theta$') plt.show() # - # + result = np.where(combined == np.amax(combined)) listOfCordinates = list(zip(result[0], result[1])) np.amax(combined) # - # The best performance is 92% correct classifications. # + best_weight = w[listOfCordinates[0][0]] best_theta = theta_vec[listOfCordinates[0][1]] best_weight # - -best_theta # The combination that yields the best performance is: # # w = [0.70147489, 0.71269417] # # $\theta$ = 0.333 # #### f) Can the grid-search optimization procedure used in (e) be applied to any classification problem? Discuss potential problems and give an application example in which the above method must fail. # # XOR problem? For a classification problem that is not linearly separable we need a multilayer perceptron, but this means we would have a matrix of weights and a bias for each layer and searching through all the possible combinations has a high computational cost. # ### Exercise H2.2: Multilayer Perceptrons (MLP) # #### a) Create 50 independent MLPs with Nhid= 10 hidden units by sampling for each MLP a set of random parameters {w211i,w10i1,bi}, i= 1,...,10. # + weight21 = np.random.standard_normal((50,10)) weight10 = np.random.normal(loc=0.0, scale=2.0, size=(50,10)) bi = np.random.uniform(low=-2., high=2.0, size=(50,10)) #each row of the above is the parameter for one MLP def mlp(w21,w10,b,x): transf = np.tanh(x*w10 - b) nodes = w21*transf y = np.sum(nodes,axis=1) return y ############################################################################## x = np.linspace(-2,2,50) y_tot = [mlp(weight21,weight10,bi,x[i]) for i in range(len(x))] x_expand = np.ones((50,len(x)))*x for i in range(len(x)): plt.plot(x_expand[:,i], y_tot[i], '.') plt.xlabel("x") plt.ylabel("y(x)") plt.title("Response of each MLP for different values of x, with $w_{i1}^{10}$ ~ N(0,2)") plt.show() # + t_2 = list(zip(*y_tot)) for i in range(50): plt.plot(x, t_2[i]) plt.xlabel("x") plt.ylabel("y(x)") plt.title("Response of the 50 MLP, y(x) with $w_{i1}^{10}$ ~ N(0,2)") plt.show() # - # #### c) Repeat this procedure using a different intialization scheme for the weights of the hidden neurons: w10i1∼N(0,0.5). What difference can you observe? # + weight105 = np.random.normal(loc=0.0, scale=0.5, size=(50,10)) #changing the weights of the hidden neurons y_tot_5 = [mlp(weight21,weight105,bi,x[i]) for i in range(len(x))] for i in range(50): plt.plot(x_expand[:,i], y_tot_5[i], '.') plt.xlabel("x") plt.ylabel("y(x)") plt.title("Response of each MLP for different values of x, with $w_{i1}^{10}$ ~ N(0,0.5)") plt.show() # - t = list(zip(*y_tot_5)) for i in range(50): plt.plot(x, t[i]) plt.xlabel("x") plt.ylabel("y(x)") plt.title("Response of the 50 MLP, y(x), with $w_{i1}^{10}$ ~ N(0,0.5)") plt.show() # # What differences can we observe? # # ## There are less abrupt changes in the values of y(x) when we use $w_{i1}^{10}$ ~ N(0,0.5). But y(x) is still in the same range of values. # #### d)Compute the mean squared error (MSE) between each of these 2×50 (50 from each of the above two initialization procedures) input-output functions and the functiong(x) =−x.For each of the two initialization procedures, which MLP approximates g best? Plot y(x) for these two MLPs. # + gT = -x #true values of the function t = list(zip(*y_tot_5)) t_2 = list(zip(*y_tot)) MSE = np.zeros((50,2)) for j in range(50): MSE[j][0] = (1/len(gT))*np.sum((gT - t_2[j])**2) MSE[j][1] = (1/len(gT))*np.sum((gT - t[j])**2) #Finding the minimimum MSE min_2 = np.min(MSE[:,0]) #min MLP with N(0,2) min_5 = np.min(MSE[:,1]) #min MLP with N(0,0.5) print(min_2,min_5) # + #The minimum corresponds to the MLP mlp_2 = np.argmin(MSE[:,0]) mlp_5 = np.argmin(MSE[:,1]) print(mlp_2,mlp_5) # - plt.plot(x, t_2[mlp_2], label='~N(0,2)') plt.plot(x, t[mlp_5], label='~N(0,0.5)') plt.plot(x,gT,label='g(x)') plt.xlabel("x") plt.ylabel("y(x)") plt.legend() plt.title("y(x) for the 2 MLP with min(MSE)") plt.show()
ALT_Laura.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_csv("../data/COVID-19_Reported_Patient_Impact_and_Hospital_Capacity_by_State_Timeseries.csv") df.describe() df.head() #geocoded state, shortage, cols = ["date", "state", "inpatient_beds_utilization", "inpatient_beds_utilization_numerator", "inpatient_beds_utilization_denominator", "percent_of_inpatients_with_covid", "inpatient_bed_covid_utilization", "adult_icu_bed_covid_utilization", "adult_icu_bed_utilization", "deaths_covid", "icu_patients_confirmed_influenza"] df = df[cols] # + #https://healthdata.gov/dataset/COVID-19-Diagnostic-Laboratory-Testing-PCR-Testing/j8mb-icvb #https://healthdata.gov/Health/COVID-19-Public-Therapeutic-Locator/rxn6-qnx8 #https://healthdata.gov/dataset/COVID-19-State-and-County-Policy-Orders/gyqz-9u7n # - grouped_df = df.groupby(by=["date", "state"]).sum() grouped_df.head(20) import matplotlib.pyplot as plt print(set(df['state'])) df_s = df[df['state'] == 'IA'] df_s.plot(x="date", y="adult_icu_bed_covid_utilization") # + # Use staff shortage as color for another graph. Example: plot cases across time as points and color the points with staff shortage as red and the others with green # - df = pd.read_csv("../data/COVID-19_Public_Therapeutic_Locator.csv") df.head() # + df['longitude'] = df.apply(lambda row: float(row['Geocoded Address'].split()[1][1:]), axis=1) df['latitude'] = df.apply(lambda row: float(row['Geocoded Address'].split()[2][:-1]), axis=1) # - import json
notebooks/.ipynb_checkpoints/Medical-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} # update pip and setuptools import sys # !{sys.executable} -m pip install --upgrade pip # !{sys.executable} -m pip install --upgrade setuptools # install lib_log_utils from pypi # !{sys.executable} -m pip install --upgrade lib_log_utils # install lib_log_utils from github # !{sys.executable} -m pip install --upgrade git+https://github.com/bitranox/lib_log_utils.git # + pycharm={"name": "#%%\n"} # here we set the width to 80 characters - we need to do it on python # because jupyter does not support to set environment variables an the commandline import os os.environ['LOG_UTIL_WIDTH']="80" # + pycharm={"name": "#%%\n"} # commandline examples # !log_util "test" # + pycharm={"name": "#%%\n"} # color test # !log_util --colortest # + pycharm={"name": "#%%\n"} # banner, multi line # !log_util -l verbose -b "multi line banners${IFS}made easy. And did we mention that log_util by default wraps the text ? That can be disabled." # + pycharm={"name": "#%%\n"} # get some help # !log_util -h # + pycharm={"name": "#%%\n"} # usage in python is easy as well ! (sorry no colors here in jupyter python console) from lib_log_utils import * setup_handler() LogSettings.width=40 banner_info('a banner in python') banner_info('another banner in python', width=80) log_info('have fun !')
lib_log_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import io import requests import matplotlib.pyplot as plt from scipy.optimize import curve_fit from scipy.special import erf url="https://covid19.isciii.es/resources/serie_historica_acumulados.csv" t=requests.get(url).text s=t.split("NOTA")[0] # + #df=pd.read_csv('serie_historica_acumulados.csv', parse_dates=True) # - df=pd.read_csv(io.StringIO(s), parse_dates=True) df.fillna(0, inplace=True) df['FECHA'] = pd.to_datetime(df['FECHA'],dayfirst=True) df # + #df=df[df['CCAA']=='GA'] # - df = df.groupby('FECHA')['Hospitalizados'].sum().to_frame() df=df.sort_values(by='Hospitalizados') df['Index'] = range(1, len(df) + 1) df=df.set_index('Index') df['Dif. Hospitalizados'] = df['Hospitalizados'] - df['Hospitalizados'].shift(1) df.fillna(0, inplace=True) df.drop('Hospitalizados', axis=1, inplace=True) df.plot() yData=df[df['Dif. Hospitalizados']>0].to_numpy() yData=yData.reshape(yData.size) xData=np.arange(yData.size) yData # + #Example of regression function - Polynomial #def func(x, a ,b, c, d): # return a*np.exp(b*x) # return a+ b*x + c*pow(x,2)+ d*pow(x,3) #+ e*pow(x,4) + f*pow(x,5) + g*pow(x,6) # + #Example of regression function #def func(x, amp, cen, wid): # return amp * np.exp(-(x-cen)**2 / wid) # - #Regression function - skew normal def func(x, sigmag, mu, alpha,a): c = 0 #normal distribution normpdf = (1/(sigmag*np.sqrt(2*np.pi)))*np.exp(-(np.power((x-mu),2)/(2*np.power(sigmag,2)))) normcdf = (0.5*(1+erf((alpha*((x-mu)/sigmag))/(np.sqrt(2))))) return 2*a*normpdf*normcdf + c popt, pcov = curve_fit(func, xData, yData, p0=(2,1,1,20000)) print(popt) print(pcov) xFit = np.arange(0.0, yData.size + 10, 1) plt.style.use('seaborn-white') plt.bar(xData, yData, align='center', alpha=0.7) plt.plot(xFit, func(xFit, *popt), 'r') plt.show() #Prediction for the next day: int(func(yData.size, *popt))
COVID-19-hospitalizados.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: aaai20-jup # language: python # name: aaai20-jup # --- from functools import partial import numpy as np def criterion(m_matrix, m_filter=None, a_filter=None, aggregation=partial(np.sum, axis=1)): c_matrix = np.ones(m_matrix.shape[0])*-1 c_matrix[m_filter] = aggregation(m_matrix[m_filter,a_filter]) return c_matrix a = np.zeros((10,8)) a b = a[0:2, :] c = a[[2,3], :] d = a[[4,5], ] b, c, d def parallel_composition(m_list): return b,c,d a import numpy as np import sklearn from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor # + X = np.random.randint(4, size=(100, 4)) dt1 = DecisionTreeClassifier() dt1.fit(X[:,:-1], X[:,-1]) dt1.desc_ids = [1,2,3] dt1.targ_ids = [4] X[:, -1] = X[:, -1]+1 dt2 = DecisionTreeClassifier() dt2.fit(X[:,:-1], X[:,-1]) dt2.desc_ids = [1,2,5] dt2.targ_ids = [4] dt3 = DecisionTreeClassifier() dt3.fit(X[:,:-1], X[:,-2:]) dt3.desc_ids = [1,2, 3] dt3.targ_ids = [3, 4] # + def _uniform_predict(f): def predict(*args, **kwargs): return f(*args, **kwargs).reshape(-1,1) return predict def _uniform_predict_proba(f): def predict_proba(*args, **kwargs): return [f(*args, **kwargs)] return predict_proba # - class standard_model: attributes = { "desc_ids", "targ_ids", "predict", "n_outputs_", "predict_proba", "classes_", "predict", } def __init__(self, m): self.model = m for a in self.attributes: v = getattr(m, a, None) if v is not None: setattr(self, a, v) return # + dt1.yorrick = 'AHA' s = standard_model(dt1) del dt1 # - s.predict(X[:, :-1]) def uniform_model(m): if m.n_outputs_ == 1: if isinstance(m, sklearn.tree.DecisionTreeClassifier): m.predict = _uniform_predict(m.predict) m.predict_proba = _uniform_predict_proba(m.predict_proba) m.classes_ = [m.classes_] m.n_classes_ = [m.n_classes_] if isinstance(m, sklearn.tree.DecisionTreeRegressor): m.predict = _uniform_predict(m.predict) return m m_list = [uniform_model(m) for m in (dt1, dt2, dt3)] getattr(m, 'predict_proba') setattr(m, 'YEAH', 3) m.YEAH m_list[0].predict_proba(X[:, :-1]) for m in m_list: pass hasattr(m, 'classes_') dt1.n_outputs_ dt2.n_classes_ f = _uniform_predict(dt1.predict) dt2.predict_proba = _uniform_predict(dt1.predict_proba) dt1.predict = _uniform_predict_proba(dt1.predict) dt1.predict(X[:, :-1]) dt1.predict dt1.desc_ids class ParallelComposition(): def __init__(self, m_list): self.desc_ids = [] self.targ_ids = [] self.m_list = m_list for m in m_list: self.desc_ids += m.desc_ids self.targ_ids += m.targ_ids return def predict(self, X): return def predict_proba(self, X): return def predict_numer(self, X): return m_list = [dt1, dt2, dt3] pc = ParallelComposition(m_list) pc.desc_ids, pc.targ_ids res_numer = np.zeros((X.shape[0], len(pc.targ_ids))) res_numer.shape y_preds = [np.atleast_2d(m.predict(X[:,:-1])).T for m in m_list] y_preds[-1].shape y_preds[0].shape np.vstack(y_preds).reshape(X.shape[0], -1) []+[3,5] res_numer = np.empty((1000)) pc.predict[0](X[:, :-1]) del dt1 pc.m_list[0] dt1.classes_, dt2.classes_ # + all_classes = np.unique(np.vstack([dt1.classes_, dt2.classes_])) _pad_proba(dt1.classes_, all_classes)(dt1.predict_proba(X[:,:-1])) # - # + # Helpers def _pad_proba(classes, all_classes): idx = _map_classes(classes, all_classes) def pad(X): R = np.zeros((X.shape[0], len(all_classes))) R[:, idx] = X return R return pad def _map_classes(classes, all_classes): sorted_idx = np.argsort(all_classes) matches = np.searchsorted(all_classes[sorted_idx], classes) return sorted_idx[matches] def _select_numeric(idx): def select(X): if len(X.shape) > 1: return X[:, idx] elif len(X.shape) == 1: return X return select def _select_nominal(idx): def select(X): if isinstance(X, list): return X[idx] elif isinstance(X, np.ndarray): return X return select # - def _uniform(f): def predict(*args, **kwargs): return return dt1.predict(X[:,:-1]) G = nx.DiGraph() G.add_nodes_from({'a', 'b', 'c'}) G.add_edges_from([('a', 'c', dict(color='red')), ('b', 'c')]) G.nodes G.edges(data=True) g = G.subgraph(['a', 'c']) g.nodes, g.edges g.edges(data=True) np.zeros(10).reshape(-1,1) partial(np.sum, axis=1) np.sum(np.ones((2,10)), axis=1) import pympler from pympler.asizeof import asizeof asizeof(3) asizeof()/10**6 a = np.zeros((1500,3000)) a[1,1] = 1 a[2,1] = 1 np.sparse(a) from scipy import sparse b=sparse.csr_matrix(a) b b*np.zeros(3000) np.save('arraysparse', b) np.save('array', a)
note/etc/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''base'': conda)' # language: python # name: python37664bitbaseconda70fb04b0bd9543d0a4d5588de79b26c5 # --- # << [第七章:高级深度学习最佳实践](Chapter7_Advanced_deep_learning_best_pratices.ipynb)|| [目录](index.md) || [第九章:总结](Chapter9_Conclusions.ipynb) >> # # 第八章:生成模型深度学习 # # > The potential of artificial intelligence to emulate human thought processes goes beyond # passive tasks such as object recognition, or mostly reactive tasks such as driving a car. It # extends well into creative activities. When I first made the claim that in a not-so-distant # future, most of the cultural content that we consume will be created with heavy help from # AIs, I was met with utter disbelief, even from long-time machine learning practitioners. # That was in 2014. Fast forward three years, and the disbelief has receded—at an # incredible speed. In the summer of 2015, you were entertained by Google’s Deep Dream # algorithm turning an image into a psychedelic mess of dog eyes and pareidolic artifacts; # in 2016 you used the Prisma application to turn your photos into paintings of various # styles. In the summer of 2016, a first experimental short movie, Sunspring , was directed # using a script written by a LSTM—complete with dialogue lines. Maybe you even # recently listened to music tentatively generated by a neural network. # # 人工智能来模拟人类思维过程除了前面那些被动任务,比方说目标识别,或者很多响应式任务,比方说车辆驾驶之外,还能拓展创造性活动的领域。当作者首次断言在不久的将来,大多数我们消费的文化内容都会在AI的帮助下完成,遇到了很多的怀疑,这些怀疑甚至来自多年的参与机器学习的研究人员。那是在2014年,仅仅三年后,这些怀疑开始逐渐散去。在2015年夏天,谷歌推出了一个Deep Dream算法能够将图像转换成具有魔幻色彩的狗眼睛和古董的图像,吸引了很多人的注意;在2016年用户可以使用Prisma应用来将自己的照片转换成不同风格的画像;在2016年夏天,一部实验性的短电影叫Sunspring被摄制出来,其中的剧本使用了LSTM生成。很有可能最近你听到的一些音乐也是由神经网络申城的。 # # > Granted, the artistic productions we have seen from AI so far are all fairly # low-quality. AI is not anywhere close to rivaling human screenwriters, painters and # composers. But replacing humans was always besides the point: artificial intelligence is # not about replacing our own intelligence with something else, it is about bringing into our # lives and work more intelligence, intelligence of a different kind. In many fields, but # especially in creative ones, AI will be used by humans as a tool to augment their own # capabilities: more augmented intelligence than artificial intelligence. # # 诚然我们目前看到的那些AI艺术创作的质量都还很低。AI距离与人类剧作家、画家和作曲家竞争还差距着十万八千里。但实际上AI的目标永远不是取代人类:人工智能不是为了将人类的只能取代变成另一种智能,而是为了为人类的生活和工作带来更多的智能,不同形式的只能。在许多领域中,特别是创造性领域中,AI将称为人类的工具并增强人类的能力:更像增强智能而不是人工智能。 # # > A large part of artistic creation consists of simple pattern recognition and technical # skill. And that is precisely the part of the process that many find less attractive, even # skippable. That’s where AI comes in. Our perceptual modalities, our language, our # artworks all have statistical structure. Learning this structure is precisely what deep # learning algorithms excel at. Machine learning models can learn the statistical "latent # space" of images or music or even stories, and they can then "sample" from this space, # creating new artworks with similar characteristics as what the model has seen in its # training data. Naturally, such sampling is hardly an act of artistic creation in itself. It is a # mere mathematical operation: the algorithm has no grounding in human life, human # emotions, our experience of the world; instead it learns from an "experience" that has # little in common with ours. It is only our interpretation, as human spectators, that will # give meaning to what the model generates. But in the hands of a skilled artist, # algorithmic generation can be steered to become meaningful—and beautiful. Latent # space sampling can become a brush that empowers the artist, augments our creative # affordances, expands the space of what we can imagine. What’s more, it can make # artistic creation more accessible by eliminating the need for technical skill and # practice—setting up a new medium of pure expression, factoring art apart from craft. # # 艺术创作中的一大部分都含有简单的模式识别和技术工作。这也是很多人认为不够有趣的地方,甚至可以跳过的部分。这些就是AI能够进入的部分。我们的感知模型,我们的语言,我们的艺术品都有着统计学结构。从这些结构中学习正是深度学习算法擅长之处。机器学习模型可以从图像、音乐或者甚至是故事中学习到统计学的潜在空间,然后就能在空间中取样,从而创作一件与模型训练数据具有相似特征的新艺术作品。很显然,这样的取样行为很难认为是一种艺术创作。它仅仅就是一个数学运算:使用的算法没有任何对人类生活、情感、世界观的认知,而是从“经验”中进行学习,并不具有我们的共情能力。它创造出来的作品只有通过人类观众的解读才能赋予意义。但是对于高超的艺术家来说,如果掌握了这种技巧,算法生成的作品可被引导到有意义和优美的方向。潜在空间取样可以成为艺术家的神奇画笔,增强我们的创造性灵感,扩展我们的想象空间。更加有用的是,它能通过消除对艺术家技巧和技艺训练的要求使得艺术创作变得更加容易,构建出一种全新的纯表达的媒介,将艺术领域和工艺领域分开。 # # > <NAME>, a visionary pioneer of electronic and algorithmic music, beautifully # expressed this same idea in the 1960s, in the context of the application of automation # technology to music composition: # # > # ``` # "Freed from tedious calculations, the composer is able to devote himself to the # general problems that the new musical form poses and to explore the nooks and crannies # of this form while modifying the values of the input data. For example, he may test all # instrumental combinations from soloists to chamber orchestras, to large orchestras. With # the aid of electronic computers the composer becomes a sort of pilot: he presses the # buttons, introduces coordinates, and supervises the controls of a cosmic vessel sailing in # the space of sound, across sonic constellations and galaxies that he could formerly # glimpse only as a distant dream." # ``` # # <NAME>作为一个电子和算法音乐的先驱者,在60年代就在自动化音乐谱曲应用方面做过相关的描述: # # ``` # “将作曲家从枯燥乏味的计算当中释放出来,能够让他们更加专注于曲目的共性问题,如一种新的音乐形式,以及在这种形式下来探索各种细枝末节,通过修改输入数据来得到最理想的结果。例如,作曲家可以测试所有的演奏形式,从独奏到小乐队到交响乐团。有了计算机帮助的作曲家就像某种航天员:他按下按钮,输入坐标,然后监控着宇宙飞船在音乐空间中飞行的轨迹,从而能够穿越各种星座甚至星系,而这之前,可能这些地方只能通过望远镜匆匆一瞥。” # ``` # # > In this chapter, we will explore under various angles the potential of deep learning to # augment artistic creation. We will review sequence data generation (which can be used to # generate text or music), Deep Dreams, and image generation using both Variational # Auto-Encoders and Generative Adversarial Networks. We will get your computer to # dream up content never seen before, and maybe, we will get you to dream too, about the # fantastic possibilities that lie at the intersection of technology and art. # # 在本章中我们会从多个角度介绍深度学习在增强艺术创作上的能力。我们会涵盖序列数据生成(可以用来创作文字或音乐),Deep Dreams,以及图像生成的两种方式变分自动编码和生成对抗网络。本章会让你的计算机创作出之前从未想象过的成果,也有可能本章会让读者也开始梦想未来这种科技与艺术结合之后的奇妙世界。 # # > You will find five sections in this chapter: # # > - Text generation with LSTM, where you will use the recurrent networks you discovered in # Chapter 7 to dream up a pastiche of Nietzschean philosophy, character by character. # - Deep Dreams, where you will find out what dreams look like when all you know of the # world is the ImageNet dataset. # - Neural style transfer, where you will learn to apply the style of a famous painting to your # vacation pictures. # - Variational Autoencoders, where you find out about "latent spaces" of images, and how # to use them for creating new images. # - Adversarial Networks—deep networks that fight each other in a quest to produce the # most realistic pictures possible. # # > Let’s get started. # # 你可以在本章中学习到下面5方面内容: # # - 使用LSTM生成文本,你会使用我们在第七章中学习的循环网络来模仿生成尼采的哲学文章,一篇接一篇。 # - Deep Dreams,你会看到如果世界是由ImageNet数据集组成的话,它将会变成什么样子。 # - 神经风格转移,你可以学习到如何将名画作的风格应用到你自己的照片上。 # - 变分自动编码,你可以学习如何找到潜在空间,以及如何使用潜在空间创作新图像。 # - 对抗网络,深度网络能够互相对抗以产生最接近真实的照片。 # # 让我们开始这一章。 # ## 8.1 使用LSTM生成文本 # # > In this section, we present how recurrent neural networks can be used to generate # sequence data. We will use text generation as an example, but the exact same techniques # can be generalized to any kind of sequence data: you could apply it to sequences of # musical notes in order to generate new music, you could apply it to timeseries of brush # stroke data (e.g. recorded while an artist paints on an iPad) to generate paintings # stroke-by-stroke, and so on. # # 在本节中我们将介绍循环神经网络用来生成序列数据的方法。我们会使用文本生成作为一个例子,但是相同的技巧能够应用在任何序列数据生成任务上:你可以将它应用在一系列音符上以产生乐谱,你可以将它应用在一个时序的画笔描绘数据上(例如一个画家在iPad上作画的记录)来一笔一笔的产生画作,等等。 # # > Sequence data generation is no way limited to artistic content generation, either. It # has been successfully applied to speech synthesis, and dialog generation for chatbots. The # "smart reply" feature that Google released in 2016, capable of automatically generating a # selection of quick replies to your emails or text messages, is powered by similar # techniques. # # 序列数据生成不仅限于艺术内容生成,它还被成功的应用到了语音生成和对话机器人领域。谷歌在2016年发布的“smart reply”特性,能够为你的电子邮件或文字短信息自动产生快速的回复,也是使用类似的技术。 # ### 8.1.1 生成循环网络简史 # # > In late 2014, few people had ever heard the abbreviation "LSTM", even in the machine # learning community. Successful applications of sequence data generation with recurrent # networks only started appearing in the mainstream in 2016. But these techniques actually # have a fairly long history, starting with the development of the LSTM algorithm by # Hochreiter in 1997. This new algorithm was used early on to generate text character by # character. # # 在2014年底的时候,即使在机器学习社区中也很少人听说过缩写“LSTM”。使用循环网络生成序列数据的成功应用直到2016年才开始进入主流。但其实这项技术实际上有着很长的历史,可以回溯到1997年Hochreiter发明LSTM的时候。当时这个新算法用来实现字符层级的文本生成。 # # > In 2002, <NAME>, then at Schmidhuber’s lab in Switzerland, applied LSTM to # music generation for the first time, with promising results. <NAME> is now a # researcher at Google Brain, and in 2016 he started a new research group there, called # Magenta, focused on applying modern deep learning techniques to produce engaging # music. Sometimes, good ideas take fifteen years to get started. # # 瑞士Schmidhuber实验室的<NAME>在2002年第一次将LSTM应用到了音乐生成,获得了不错的结果。<NAME>现在是谷歌Brain的一名研究人员,他在2016年成立了一个新的研究小组,叫做Magenta,专注于应用现代深度学习技术来生成优秀的音乐。有的时候,一个好的想法需要15年才能开始实践。 # # > In the late 2000s and early 2010, <NAME> did important pioneering work on # using recurrent networks for sequence data generation. In particular, his 2013 work on # applying Recurrent Mixture Density Networks to generate human-like handwriting using # timeseries of pen positions, is seen by some as a turning point. This specific application # of neural networks at that specific moment in time captured for me the notion of # "machines that dream" and was a significant inspiration around the time I started # developing Keras. <NAME> left a similar commented-out remark hidden in a 2013 # LateX file uploaded to the preprint server Arxiv.org : "generating sequential data is the # closest computers get to dreaming" . Several years later, we have come to take a lot of # these developments for granted, but at the time, it was hard to watch Graves' # demonstrations and not walk away awe-inspired by the possibilities. # # 在00年代末和10年代初的时候,<NAME>在使用循环网络来生成序列数据方面做了许多重要的领先贡献。特别要指出的是,他在2013年在笔触时序数据使用循环混合全连接网络来生成人类笔迹的实验,经常被视为一个转折点。这个神经网络的应用当时正好与作者的“能梦想的机器”观点迎合,因此成为了作者开发Keras的一个重要激励。<NAME>在2013年提交到预付印平台Arxiv.org上的论文中,使用Latex注释了一句话,表达了相同的观点:“生成序列数据是最接近计算机能梦想的方式”。许多年以后,我们已经将这方面的进展视作习以为常,但在当时,很难不被Grave给我们展现的内容惊呆,然后以令人敬畏的态度来面对未来的这种可能性。 # # > Since then, recurrent neural networks have been successfully used for music # generation, dialogue generation, image generation, speech synthesis, molecule design, # and were even used to produce a movie script that was then cast with real live actors. # # 从那之后,循环神经网络已经被成功的运用到了音乐生成、对话生成、图像生成、语音生成、高分子设计,甚至还被运用到产生由真实演员出演的电影剧本之中。 # ### 8.1.2 我们该如何产生序列数据? # # > The universal way to generate sequence data in deep learning is to train a network # (usually either a RNN or a convnet) to predict the next token or next few tokens in a # sequence, using the previous tokens as input. For instance, given the input "the cat is on # the ma" , the network would be trained to predict the target "t" , the next character. As # usual when working with text data, "tokens" are typically words or characters, and any # such network that can model the probability of the next token given the previous ones is # called a language model . A language model captures the latent space of language, i.e. its # statistical structure. # # 在深度学习中生成序列数据一个通用方法是训练一个模型(通常是一个RNN或CNN)来预测序列中的下一个标记或者下几个标记,使用前面的标记作为输入。例如,给定输入“the cat is on the ma”,网络可能被训练来预测得到目标“t”,也就是下一个字符。通常当处理文本数据时,“标记”会是单词或字符,这样的网络可以根据之前的标记获得下一个标记的概率,被称为语言模型。语言模型能够感知到语言的潜在空间,也就是它的统计学结构。 # # > Once we have such a trained language model, we can sample from it, i.e. generate # new sequences: we would feed it some initial string of text (called "conditioning data"), # ask it to generate the next character or the next word (we could even generate several # tokens at once), then add the generated output back to the input data, and repeat the # process many times (see Figure 8.1). This loop allows to generate sequences of arbitrary # length that reflect the structure of the data that the model was trained on, i.e. sequences # that look almost like human-written sentences. In our case, we will take a LSTM layer, # feed it with strings of N characters extracted from a text corpus, and train it to predict # character N+1 . The output of our model will be a softmax over all possible characters: a # probability distribution for the next character. This LSTM would be called a # "character-level neural language model". # # 我们有了这样的训练过的语言模型之后,我们就可以从中取样,也就是生成新的序列:我们可以将一些初始化的文本字符串输入给模型(被称为“条件数据”),然后让模型生成下一个字符或者下一个单词(甚至可以一次生成多个标记),然后将生成的输出放回输入数据中,多次重复这个过程(参见图8-1)。这个循环能够产生任意长度的序列数据,能够反映模型训练得到的统计学结构,也就是说获得一个几乎类似人类生成的序列数据。在我们的场景中,我们会使用一个LSTM层,用文本语料库中提取的N个字符作为输入,然后训练模型能够预测第N+1个字符。模型的输出会是所有可能字符的softmax结果:就是下一个字符的概率分布。这个LSTM层被称为“字符级神经语言模型”。 # # ![language model](imgs/f8.1.jpg) # # 图8-1 使用语言模型生成字符级文本的过程 # ### 8.1.3 取样策略的重要性 # # > When generating text, the way we pick the next character is crucially important. A naive # approach would be "greedy sampling", consisting in always choosing the most likely # next character. However, such an approach would result in very repetitive and predictable # strings that don’t look like coherent language. A more interesting approach would consist # in making slightly more surprising choices, i.e. introducing randomness in the sampling # process, for instance by sampling from the probability distribution for the next character. # This would be called "stochastic sampling" (you recall that "stochasticity" is what we call # "randomness" in this field). In such a setup, if "e" has a probability 0.3 of being the next # character according to the model, we would pick it 30% of the time. Note that greedy # sampling can itself be cast as sampling from a probability distribution: one where a # certain character has probability 1 and all others have probability 0. # # 当生成文本时,我们选取下一个字符的方式是非常重要的。一个原始的解决方法是“贪婪取样”,也就是永远选择最大似然值的下一个字符。但是这样的做法会导致非常重复和可预测的字符串,使得语义看起来不连贯。一个更有趣的方法包括在取样中使用一些更加惊奇的策略,或者说在其中引入一些随机性,比方说在选取下一个字符时使用概率分布来取样。这被称为“随机取样”。在这个方案中,如果“e”根据模型计算有着0.3的概率,我们会在30%的时间中选择它。值得一提的是贪婪取样也算是随机取样的一种:只不过其中一个字符的概率为1而其他字符的概率都是0。 # # > Sampling probabilistically from the softmax output of the model is neat, as it allows # even unlikely characters to be sampled some of the time, generating more # interesting-looking sentences and even sometimes showing creativity by coming up with # new, realistic-sounding words that didn’t occur in the training data. But there is one issue # with this strategy: it doesn’t offer a way to control the amount of randomness in the # sampling process. # # 从模型softmax的输出中使用随机取样是很灵活的,因为它某些时候能够选取那些不太可能的字符,从而生成更加有趣的句子,甚至有时还能生成一些新奇的听起来很真实的单词,即使它们没有出现在训练数据中。但是这里还有一个问题:它没有提供一个方法来控制取样过程中的随机程度。 # # > Why would we want more or less randomness? Consider an extreme case: pure # random sampling, i.e. drawing the next character from a uniform probability distribution, # where every character is equally likely. This scheme would have maximum randomness; # in other words, this probability distribution would have maximum "entropy". Naturally, it # would not produce anything interesting. At the other extreme, greedy sampling, which # doesn’t produce anything interesting either, has no randomness whatsoever: the # corresponding probability distribution has minimum entropy. Sampling from the "real" # probability distribution, i.e. the distribution that is output by the model’s softmax # function, constitutes an intermediate point in between these two extremes. However, # there are many other intermediate points of higher or lower entropy that one might want # to explore. Less entropy will give the generated sequences a more predictable structure # (and thus they will potentially be more realistic-looking) while more entropy will result # in more surprising and creative sequences. When sampling from generative models, it is # always good to explore different amounts of randomness in the generation process. Since # the ultimate judge of the interestingness of the generated data is us, humans, # interestingness is highly subjective and there is no telling in advance where the point of # optimal entropy lies. # # 为什么我们需要更多或者更少的随机性?考虑一个极端的情景:完全随机取样,也就是按照平均概率分布来选取下一个字符,那么每个字符都具有相同的似然。这个情境中有着最大的随机性;或者说,这个概率分布有着最大的“熵”。很显然它不会生成任何有趣的东西,同样的另一种极端,贪婪取样,也不会生成任何有趣的东西:这时的概率分布有着最小的熵。从“真实”的概率分布中采样,也就是从模型的softmax激活函数的输出分布中进行采样,使用了这两个极端之间的一个中间点。然而这两个极端之间还存在着很多其他的更高熵或者更低熵的点可以探索。低熵的点会带来更加可预测的生成序列结构(并且它们应该看起来更加真实)而高熵的点会带来更加令人惊奇和创造性的生成序列。当从生成模型中进行采样时,探索各种可能的随机性永远是个好主意。因为最终判定生成数据的有趣程度的人是我们自己,人类,有趣性是高度具有主观性的因此没有方法提前知道哪个点的熵是最合适的。 # # > In order to control the amount of stochasticity in the sampling process, let’s introduce # a parameter called "softmax temperature" that characterizes the entropy of the probability # distribution used for sampling, or in other words, that characterizes how surprising or # predictable our choice of next character will be. Given a temperature value, a new # probability distribution is computed from the original one (the softmax output of the # model) by reweighting it in the following way: # # 为了能够控制取样过程中的随机性,我们会引入一个参数叫做“softmax温度”用来表示取样时的概率分布熵,或者也可以说,用来表示下一个字符的选择有多出乎意料或者可预测。给定一个温度值后,就可以按照原始分布(模型的softmax输出值)和温度值计算得到一个新的概率分布,如下: # + import numpy as np def reweight_distribution(original_distribution, temperature=.5): ''' 根据温度重新计算概率分布来控制熵的大小 参数: original_distribution: 一个1D概率Numpy向量,总和应该为1 temperature: 计算新的概率分布的熵因子 返回: 原始概率分布经过重新计算后得到的新的概率分布 ''' distribution = np.log(original_distribution) / temperature distribution = np.exp(distribution) # 经过运算后,概率分布的总和可能不再为1,我们需要将其正规化 return distribution / np.sum(distribution) # - # > Higher "temperatures" result in sampling distributions of higher entropy, that will # generate more surprising and unstructured generated data, while a lower temperature will # result in less randomness and much more predictable generated data. # # 更高的“温度”会获得更高熵的取样分布,也就是生成更加意料不到和非结构化数据,而更低的温度会获得更少随机性也就是更加可预测的数据。 # # ![diff entropy on same distribution](imgs/f8.2.jpg) # # 图8-2 在相同的softmax分布上进行重新分布:高温度=高确定性,低温度=高随机性 # # 8.1.4 实现字符级LSTM文本生成 # # > Let’s put these ideas in practice in a Keras implementation. The first thing we need is a # lot of text data that we can use to learn a language model. You could use any sufficiently # large text file or set of text files—Wikipedia, the Lord of the Rings, etc. In this example # we will use some of the writings of Nietzsche, the late-19th century German philosopher # (translated to English). The language model we will learn will thus be specifically a # model of Nietzsche’s writing style and topics of choice, rather than a more generic model # of the English language. # # 下面让我们在实践中使用Keras来实现上面的想法。第一步我们需要很多文本数据来学习一个语言模型。你可以使用任何足够大的文本文件或者全套的文本文件如维基百科、指环王等。在本例中,我们会使用尼采的一些著作(英文翻译版),他是19世纪晚期德国的哲学家。这样得到的语言模型将会具有尼采的写作风格和主题选择,而不是更加通用的英语模型。 # # #### 准备数据 # # > Let’s start by downloading the corpus and converting it to lowercase: # # 让我们首先下载语料库并将其转换成小写: # + from tensorflow import keras path = keras.utils.get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt') text = open(path).read().lower() len(text) # - # > Next, we will extract partially-overlapping sequences of length maxlen , one-hot # encode them and pack them in a 3D Numpy array x of shape (sequences, maxlen, # unique_characters) . Simultaneously, we prepare a array y containing the # corresponding targets: the one-hot encoded characters that come right after each # extracted sequence. # # 接下来,我们会提取长度为maxlen的部分重叠的序列,然后进行one-hot编码并且打包成一个形状为(序列, maxlen, 独立字符)的一个3D Numpy数组中。同时,我们还需要准备一个目标y向量:也是每个提取到的序列后出现的字符相对应的one-hot编码。 # + # 提取字符序列的长度 maxlen = 60 # 取样新序列的步长值 step = 3 # 下面这个列表保存提取出来的序列 sentences = [] # 下面这个列表保存目标的字符(下一个字符) next_chars = [] for i in range(0, len(text) - maxlen, step): sentences.append(text[i: i + maxlen]) next_chars.append(text[i + maxlen]) print('Number of sequences:', len(sentences)) # 语料库中不同字符的集合 chars = sorted(list(set(text))) print('Unique characters:', len(chars)) # 下面是一个字典值,将不同字符映射成语料库中的序号 char_indices = dict((char, chars.index(char)) for char in chars) # 下一步是将这些字符进行one-hot编码 print('Vectorization...') x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) y = np.zeros((len(sentences), len(chars)), dtype=np.bool) for i, sentence in enumerate(sentences): for t, char in enumerate(sentence): x[i, t, char_indices[char]] = 1 y[i, char_indices[next_chars[i]]] = 1 # - # #### 构建网络 # # > Our network is a single LSTM layer followed by a Dense classifier and softmax over all # possible characters. But let us note that recurrent neural networks are not the only way to # do sequence data generation; 1D convnets also have proven extremely successful at it in # recent times. # # 我们使用一个LSTM层然后跟着一个全连接分类器,在所有可能的字符上进行softmax运算。不过这里需要提出的是,循环神经网络并不是生成序列数据的唯一选择,1D卷积网络最近在这个领域也被证明会非常成功。 # + from tensorflow.keras import layers from tensorflow.keras.models import Sequential model = Sequential() model.add(layers.LSTM(128, input_shape=(maxlen, len(chars)))) model.add(layers.Dense(len(chars), activation='softmax')) # - # > Since our targets are one-hot encoded, we will use categorical_crossentropy as # the loss to train the model: # # 因为这里的目标是one-hot编码的,所以我们会使用`categorical_crossentropy`作为损失函数来训练模型: # + from tensorflow.keras.optimizers import RMSprop optimizer = RMSprop(lr=0.01) model.compile(loss='categorical_crossentropy', optimizer=optimizer) # - # #### 训练语言模型并且使用它来取样 # # > Given a trained model and a seed text snippet, we generate new text by repeatedly: # # > 1. Drawing from the model a probability distribution over the next character given the # text available so far # 2. Reweighting the distribution to a certain "temperature" # 3. Sampling the next character at random according to the reweighted distribution # 4. Adding the new character at the end of the available text # # 给定一个训练好的模型和一个种子文本片段,我们可以不断的生成新的文本: # # 1. 从模型中获得目前文本序列的下一个字符的概率分布。 # 2. 使用一个给定的“温度”重新得到一个新的分布。 # 3. 使用新的分布对下一个字符进行取样。 # 4. 将新取样的字符加入到文本的末尾。 # # > This is the code we use to reweight the original probability distribution coming out of # the model, and draw a character index from it (the "sampling function"): # # 下面是我们对概率分布进行重新权重然后获取下一个字符序号的代码(也就是“取样函数”): def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) # > Finally, this is the loop where we repeatedly train and generated text. We start # generating text using a range of different temperatures after every epoch. This allows us # to see how the generated text evolves as the model starts converging, as well as the # impact of temperature in the sampling strategy. # # 最后是下面的循环用来重复的训练和生成文本。我们在每次epoch之后都重新生成一个温度值。这能够让我们观察到生成文本是如何随着模型收敛进行变化的,同时看到温度对取样策略的影响。 # + import random import sys for epoch in range(1, 60): print('epoch', epoch) # 使用选取的文本数据 model.fit(x, y, batch_size=128, epochs=1) # Select a text seed at random start_index = random.randint(0, len(text) - maxlen - 1) original_text = text[start_index: start_index + maxlen] print('--- Generating with seed: "' + original_text + '"') for temperature in [0.2, 0.5, 1.0, 1.2]: generated_text = original_text print('------ temperature:', temperature) print(generated_text, end='') # We generate 400 characters for i in range(400): sampled = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(generated_text): sampled[0, t, char_indices[char]] = 1. preds = model.predict(sampled, verbose=0)[0] next_index = sample(preds, temperature) next_char = chars[next_index] generated_text += next_char generated_text = generated_text[1:] print(next_char, end='') print() # - # > Here is what we get at epoch 20, long before the model has fully converged. We used # the random seed text "new faculty, and the jubilation reached its climax when kant". # # 当第20次迭代时,模型还未完全收敛。我们使用的种子文本是“e variety among germans--pardon # me for stating the fact that”。 # # > With temperature=0.2: # # 当温度为0.2时,生成的文本是: # # ``` # e variety among germans--pardon # me for stating the fact that the world and the develop of the spirit and the state of the conscience of the spirit and the morality of the sense of the same time in the spirit and that the strength and the spirit and the state of the spirit and the sense of the spirit and the special proposed the suffering the sure of the conscience, and the sense of the conscience. the spirit and the conscience. the sense of the same time a # ``` # # > With temperature=0.5: # # 当温度是0.5时: # # ``` # e variety among germans--pardon # me for stating the fact that we think and the desirable conscience. # # # 14 # # =a thing all the domain of the precisely all the wors as ssquention and in the special spirit and the species of the demonstration of explom, the chate and hastor and conscience of self-place of the sureropened and class of the sportis, and the fact and the puring in the states and art of the will to be conscience of the belief of the states of the sen # ``` # # > With temperature=1.0: # # 当温度是1.0时: # # ``` # e variety among germans--pardon # me for stating the fact that art origin the sarrowered here stoom age repeatable for difference on thoughts," "he taikee in a count and sissian talegnd themselves, the tetiour, the # tradition to hompened all the regream; # enthrne," the inners of his own toings for all general gald us sind in b onwemon, but as conscienced that the order of # the same tentifoundance of the precisetementing, as unreligious by destrains and !f # ``` # # > With temperature=1.2: # # 当温度是1.2时: # # ``` # e variety among germans--pardon # me for stating the fact that when the stasterment the be; insonsist" to fragrion become dol afluwhking # like indemonedgutory," "the # -are hate on "youghle culture afforne of allowple, his 'much-countencely acjoses"y. # hom, he # visits dutues to black it is no polleatian paltitice of the spirit of a favoured it # naturaless "many # things--in harms and even-blound because obndion to sacrangablay, nual path. # # 124. he proby have been t # ``` # # > At epoch 60, the model has mostly converged and the text starts looking significantly # more coherent: # # 在第48次迭代之后,模型已经基本上收敛了,因此产生的文本看起来更加的相关:(译者注:此处选择了损失最小的迭代来示例,而不是原文中的60,实际上迭代次数只有59次) # # > With temperature=0.2: # # 当温度是0.2时,生成的文本是: # # ``` # necessary for the purpose is # a little vivisection of the germans to be all the same to the same to the suppose something the state of the same to the same truth of the prooth and man and the state of the most present destination of the sense of the fact the world of the state of the greatest states and the same to the disposition of the same truth and man and the supposing and the supposed and interpretation of the same to the same interestion of the same # ``` # # > With temperature=0.5: # # 当温度是0.5时: # # ``` # necessary for the purpose is # a little vivisection of the german, # of the spirit, and at present of all of life. # # 15. the most problem of their life man earl one of the freedoms of villogion of the heart to the dignous # interpretated the world the last the most interpretation and distinction, and the soul. the sense of the feelings is the contain even something and finer indianicn, and also indianicn of the early enough silence of a more growthing. the happin # ``` # # > With temperature=1.0: # # 当温度是1.0时: # # ``` # necessary for the purpose is # a little vivisection of the germans and, good"; and that interestion of attertion. # # # 110 # # =constantly valition the primordiagants. then inglinihorar, and solitudes up a reter--in the suppose of the community. the reason, allity for is a people is person to mys. the a # regarded odeaty # nationally # tomes result purpose right en of gratition. eagerated, hono # mineffing # seed--the # indiance # called # under cultive # original and moment, # indis # ``` # # > With temperature=1.2: # # 当温度是1.2时: # # ``` # necessary for the purpose is # a little vivisection of the germans--nature height. # # 126. no oight intempt-pretallents, to hidd-so purpose: "worlo of own asjrature, such although, caruses? have happent love affordness of all pariac". # # # 105 # atere tautised merules of fine indust ones; not. they gives gie 'menver ion one # by sole thingies of the through religios of different individuais intowar tro-first--the pleasion and condition # of my mints, with it; he ones # f # ``` # > As you can see, a low temperature results in extremely repetitive and predictable text, # but where local structure is highly realistic: in particular, all words (a word being a local # pattern of characters) are real English words. With higher temperatures, the generated # text becomes more interesting, surprising, even creative; it may sometimes invent # completely new words that sound somewhat plausible (such as "eterned" or # "troveration"). With a high temperature, the local structure starts breaking down and most # words look like semi-random strings of characters. Without a doubt, here 0.5 is the most # interesting temperature for text generation in this specific setup. Always experiment with # multiple sampling strategies! A clever balance between learned structure and randomness # is what makes generation interesting. # # 正如你看到的结果,较低的温度会导致非常重复和可预测的文本,但是生成的结果局部模式高度现实化:特别的是所有的单词(一个单词就是字符的局部模式)都是真是的英语单词。而使用较高的温度产生的文本就变得更加有趣,让人无法意料和具有创造性的,这种情况下有时候会发明一些全新的单词,看起来像是英文,又不是英文(例如“eterned”或者“troveration”)。在高温度下,文本的局部模式开始被打破,而大多数的单词看起来像是半随机字符组成的字符串。仔细观察可知,这里0.5的温度是最有意思的。在这种任务中,一定要多尝试多种取样策略。在学习到的结构和随机性之间选取一个最合适的平衡点。 # # > Note that by training a bigger model, longer, on more data, you can achieve generated # samples that will look much more coherent and realistic than ours. But of course, don’t # expect to ever generate any meaningful text, other than by random chance: all we are # doing is sampling data from a statistical model of which characters come after which # characters. Language is a communication channel, and there is a distinction between # what communications are about, and the statistical structure of the messages in which # communications are encoded. To evidence this distinction, here is a thought experiment: # what if human language did a better job at compressing communications, much like our # computers do with most of our digital communications? Then language would be no less # meaningful, yet it would lack any intrinsic statistical structure, thus making it impossible # to learn a language model like we just did. # # 这里还需要指明,如果你使用一个更大的模型,更长的片段,更多的数据,你就能够获得更加合理和真实的生成结果。但是当然不要期望这样能生成任何有意义的文本:我们现在做的所有事情只是从序列中按照字符出现的规律得到的模型中取样数据而已。语言是一个沟通渠道,在沟通渠道和信息编码成的统计学结构之间有着一道鸿沟。我们可以用下面这个思想实验来证明这点:如果人类语言在通信压缩上比现在做的好得多,就像我们使用计算机进行数字压缩通信那样,会出现什么情况?那么我们的语言中的信息量并不会变得更少,但是却会丢失了很多内在的统计学结构,因此使得这样的语言无法像我们前面那样训练一个语言模型出来。 # #### 小结一下 # # > - We can generate discrete sequence data by training a model to predict the next tokens(s) # given previous tokens. # - In the case of text, such a model is called a "language model" and could be based on # either words or characters. # - Sampling the next token requires balance between adhering to what the model judges # likely, and introducing randomness. # - One way to handle this is the notion of softmax temperature . Always experiment with # different temperatures to find the "right" one. # # - 我们能够通过训练一个模型来通过前面的标记生成下一个标记,从而生成离散的序列数据。 # - 在文本领域,这样的模型被称为“语言模型”,模型可以建立在单词或者字符上。 # - 下一个标记的取样需要在模型的分布概率和引入随机性之间进行取舍。 # - 处理这个问题的一个办法是使用softmax温度。多实验各种的温度来找到“合适”的那个值。 # ## 8.2 Deep Dream # # > "Deep Dream" is an artistic image modification technique that leverages the # representations learned by convolutional neural networks. It was first released by Google # in the summer of 2015, as an implementation written using the Caffe deep learning # library (this was several months before the first public release of TensorFlow). It quickly # became an Internet sensation thanks to the trippy pictures it could generate, full of # algorithmic pareidolia artifacts, bird feathers and dog eyes—a by-product of the fact that # the Deep Dream convnet was trained on ImageNet, where dog breeds and bird species # are vastly over-represented. # # “Deep Dream”是一个艺术图像编辑技巧,它利用了卷积神经网络学习到的表现形式。Deep Dream是谷歌在2015年夏天首次发布的,当时使用的是Caffe深度学习框架(也就是在TensorFlow首次公开发布的几个月前)实现的。因为它能生成具有迷幻色彩的图像因此很快就成为互联网上的热点,它创造的图像使用的是鸟类羽毛和狗的眼睛,这些都是Deep Dream卷积网络从ImageNet中训练得到的,然后通过一种奇幻的算法将它们组合起来。 # # ![deep dream example](imgs/f8.3.jpg) # # 图8-3 Deep Dream生成图像的例子 # > The Deep Dream algorithm is almost identical to the convnet filter visualization # technique that we introduced in Chapter 5, consisting in running a convnet "in reverse", # i.e. doing gradient ascent on the input to the convnet in order to maximize the activation # of a specific filter in an upper layer of the convnet. Deep Dream leverages this same idea, # with a few simple differences: # # > - With Deep Dream, we try to maximize the activation of entire layers rather than that of a # specific filter, thus mixing together visualizations of large numbers of features at once. # - We start not from a blank, slightly noisy input, but rather from an existing image—thus # the resulting feature visualizations will latch unto pre-existing visual patterns, distorting # elements of the image in a somewhat artistic fashion. # - The input images get processed at different scales (called "octaves"), which improves the # quality of the visualizations. # # Deep Dream算法基本上与我们在第五章介绍的卷积网络过滤器可视化技术相同,不过是“反向”运行卷积网络,也就是在输入上进行梯度上升从而最大化卷积网络上层特定过滤器的激活输出。Deep Dream充分利用了这个办法,不过有一些简单的区别: # # - 在Deep Dream当中,我们尝试最大化整个层次的激活输出而不是特定的过滤器,因此可以一次性混合大量的视觉元素。 # - 我们不是从一个空白带有少量噪音的输入开始,而是从一个现有的图像开始,因此生成的视觉特征会锁定在已经存在的视觉模式上,然后以某种艺术形式对这张图像元素进行扭曲。 # - 输入的图像会使用不同的缩放进行处理(被称为“音阶”),这样能改进生成的视觉效果质量。 # # > Let’s make our own Deep Dreams. # # 下面让我们来构建自己的Deep Dreams。 # ### 8.2.1 在Keras中实现Deep Dream # # > We will start from a convnet pre-trained on ImageNet. In Keras, we have many such # convnets available: VGG16, VGG19, Xception, ResNet50... albeit the same process is # doable with any of these, your convnet of choice will naturally affect your visualizations, # since different convnet architectures result in different learned features. The convnet used # in the original Deep Dream release was an Inception model, and in practice Inception is # known to produce very nice-looking Deep Dreams, so we will use the InceptionV3 model # that comes with Keras. # # 我们会从在ImageNet上预训练的卷积网络开始。在Keras中,有着很多可用的预训练网络:VGG16,VGG19,Xception,ResNet50.....尽管这些模型都可以采取同样的处理过程,但对于卷积网络模型的选择肯定会影响最终的视觉结果,因为不同的卷积网络结构导致不同的认知特征。最早发布的Deep Dream中使用的Inception模型,而且在实践中Inception能够产生非常漂亮的Deep Dreams,所有我们将会使用Keras内置的InceptionV3模型。 # + from tensorflow.keras.applications import InceptionV3 from tensorflow.keras import backend as K # 我们不会重新训练这个模型,因此我们会禁用所有训练相关动作 K.set_learning_phase(0) # 下面构建一个InceptionV3模型,不引入其顶端的分类器 model = InceptionV3(weights='imagenet', include_top=False) # - # > Next, we compute the "loss", the quantity that we will seek to maximize during the # gradient ascent process. In Chapter 5, for filter visualization, we were trying to maximize # the value of a specific filter in a specific layer. Here we will simultaneously maximize the # activation of all filters in a number of layers. Specifically, we will maximize a weighted # sum of the L2 norm of the activations of a set of high-level layers. The exact set of layers # we pick (as well as their contribution to the final loss) has a large influence on the visuals # that we will be able to produce, so we want to make these parameters easily configurable. # Lower layers result in geometric patterns, while higher layers result in visuals in which # you can recognize some classes from ImageNet (e.g. birds or dogs). We’ll start from a # somewhat arbitrary configuration involving four layers—but you will definitely want to # explore many different configurations later on: # # 下一步我们会计算“损失”,也就是在梯度上升过程中我们需要用来找到最大值的度量。在第五章可视化分类中,我们尝试过在特定层次的特定过滤器上最大化这个值。现在我们需要同时在多个层次的所有过滤器上最大化。特别的我们会最大化一组高阶层的激活L2范数的加权和。这些被选中的层次(因为它们对于最终损失的作用)对于生成的视觉特征有着巨大的影响,因此我们希望这些参数容易进行配置。在网络中,低阶的层次识别的是地理模式特征,而高阶层次负责识别那些从ImageNet(如鸟或狗)中获得视觉特征。我们会使用一个任意的四层结构作为开始,读者肯定在完成后会希望探索更多可能的配置: # 下面定义一个字典,表示各个层次对于总重损失的贡献权重 # 这里使用的层次名称是内置的InceptionV3模型的层次名称 # 你可以通过`model.summary()`来查看 layer_contributions = { 'mixed2': 0.2, 'mixed3': 3., 'mixed4': 2., 'mixed5': 1.5, } # > Now let’s define a tensor that contains our loss, i.e. the weighted sum of the L2 norm # of the activations of the layers listed above. # # 下面定义一个张量包含这我们的损失,也就是上面这些层级激活的L2范数的权重和。 # + # 对于每个关键层次获得相应的名字 layer_dict = dict([(layer.name, layer) for layer in model.layers]) # 定义损失值 loss = K.variable(0.) for layer_name in layer_contributions: # 将相关层次的激活值L2范数加到损失值上 coeff = layer_contributions[layer_name] activation = layer_dict[layer_name].output # 将激活张量的边缘去除以避免边际效应 scaling = K.prod(K.cast(K.shape(activation), 'float32')) loss.assign_add(coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :]))) / scaling # - # > Now we can set up the gradient ascent process: # # 现在我们就可以设置梯度上升过程了: # # 译者注:以下代码在使用了tensorflow v1兼容后仍然无法运行,希望大家能够提供建议修改下面代码使之能运行。 # + import tensorflow as tf tf.compat.v1.disable_eager_execution() # 用来保存生成的图像 dream = model.input # 按照损失值计算图像的梯度 grads = K.gradients(loss, dream)[0] # 标准化梯度值 grads /= K.maximum(K.mean(K.abs(grads)), 1e-7) # 定义函数用来计算损失值和梯度,以及梯度上升函数 outputs = [loss, grads] fetch_loss_and_grads = K.function([dream], outputs) def eval_loss_and_grads(x): outs = fetch_loss_and_grads([x]) loss_value = outs[0] grad_values = outs[1] return loss_value, grad_values def gradient_ascent(x, iterations, step, max_loss=None): for i in range(iterations): loss_value, grad_values = eval_loss_and_grads(x) if max_loss is not None and loss_value > max_loss: break print('...Loss value at', i, ':', loss_value) x += step * grad_values return x # - # > Finally, here is the actual Deep Dream algorithm. # # > First, we define a list of "scales" (also called "octaves") at which we will process the # images. Each successive scale is larger than previous one by a factor 1.4 (i.e. 40% # larger): we start by processing a small image and we increasingly upscale it (Figure 8.4). # # 最终来到真正的Deep Dream算法。 # # 首先我们定义一系列的“缩放比例”(也叫作“音阶”),用来处理图像。每个后续的比例都是前一个的1.4倍(也就是大40%):我们从小的图像开始处理然后慢慢增大它(参见图8-4)。 # # ![Deep Dream Process](imgs/f8.4.jpg) # # 图8-4 Deep Dream过程:一系列的缩放比例(音阶)以及在大尺寸图像上进行细节插入 # > Then, for each successive scale, from the smallest to the largest, we run gradient # ascent to maximize the loss we have previously defined, at that scale. After each gradient # ascent run, we upscale the resulting image by 40%. # # 然后对于每个缩放比例,从最小尺寸到最大尺寸,我们运行梯度增强来令前面定义的损失值最大化。每次梯度增强完成后,我们将结果图像放大40%。 # # > To avoid losing a lot of image detail after each successive upscaling (resulting in # increasingly blurry or pixelated images), we leverage a simple trick: after each upscaling, # we reinject the lost details back into the image, which is possible since we know what the # original image should look like at the larger scale. Given a small image S and a larger # image size L, we can compute the difference between the original image (assumed larger # than L) resized to size L and the original resized to size S—this difference quantifies the # details lost when going from S to L. # # 为了避免在每次放大过程中丢失许多的图像细节(因为这会导致图像模糊和像素化),我们还需要应用一个简单技巧:在每次放大后,我们将这些丢失的细节重新插入到图像中,因为我们有着大尺寸下的原始图像,所以这种做法很自然。给定一个小尺寸图像S和一个大尺寸图像L,我们能够计算得到原始图像(假设比L要大)缩放到尺寸L的变化值和原始尺寸缩放到S的变化值,通过这些变化值可以得到从S到L的细节损失值。 # + import numpy as np # 修改下面的超参数能够获得不同的艺术效果 step = 0.01 # 梯度增强系数 num_octave = 3 # 音阶数量 octave_scale = = 1.4 # 相邻音阶的尺寸系数 iterations = 20 # 每个音阶的梯度增强迭代次数 # 如果损失值超过10,我们就停止迭代,放置结果变得过于奇幻 max_loss = 10. # 下面设定你用来进行Deep Dream的原始图像路径 base_image_path = '...' # 将原始图像装载到Numpy数组中 img = preprocess_image(base_image_path) # 我们设置一个元组的列表,用来存储我们需要进行梯度增强的不同尺寸 original_shape = img.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) # 反序列表,因为需要升序排列 successive_shapes = successive_shapes[::-1] # 将原始图像缩小到最小图像尺寸上 original_img = np.copy(img) shrunk_original_img = resize_img(img, successive_shapes[0]) for shape in successive_shapes: print('Processing image shape', shape) img = resize_img(img, shape) img = gradient_ascent(img, iterations=iterations, step=step, max_loss=max_loss) upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape) same_size_original = resize_img(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img img += lost_detail shrunk_original_img = resize_img(original_img, shape) save_img(img, fname='dream_at_scale_' + str(shape) + '.png') save_img(img, fname='final_dream.png') # - # > Note that the code above leverages the following straightforward auxiliary Numpy # functions, which all do just as their name suggests. They require to have SciPy installed. # # 注意上面的代码直接使用了Numpy的一些辅助函数,功能就如它们名称所暗示那样。这些函数需要按照SciPy。 # + import scipy from tensorflow.keras.preprocessing import image def resize_img(img, size): img = np.copy(img) factors = (1, float(size[0]) / img.shape[1], float(size[1]) / img.shape[2], 1) return scipy.ndimage.zoom(img, factors, order=1) def save_img(img, fname): pil_img = deprocess_image(np.copy(img)) scipy.misc.imsave(fname, pil_img) def preprocess_image(image_path): # 打开,缩放和格式化图像到合适的张量的函数 img = image.load_img(image_path) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = inception_v3.preprocess_input(img) return img def deprocess_image(x): # 将装了转换回图像的函数 if K.image_data_format() == 'channels_first': x = x.reshape((3, x.shape[2], x.shape[3])) x = x.transpose((1, 2, 0)) else: x = x.reshape((x.shape[1], x.shape[2], 3)) x /= 2. x += 0.5 x *= 255. x = np.clip(x, 0, 255).astype('uint8') return x # - # > Note that because the original InceptionV3 network was trained to recognize concepts # in images of size 299x299, and given that the process involves downscaling the images # by a reasonable factor, our Deep Dream implementation will produce much better results # on images that are somewhere between 300x300 and 400x400. Regardless, it is still # possible to run the same code on images of any size and any ratio. # # 这里要注意因为原始的Inception V3网络是在图像尺寸299x299上训练出来的,因此它是在这个尺寸上捕获的图像特征,上面的过程含有将图像缩小到某个比例的操作,所以我们的Deep Dream实现会在300x300到400x400大小的图像上表现更好的结果。不过,上面的实现仍然能够在任何尺寸和比例的图像上运行。 # # > Starting from this photograph (taken in the small hills between the San Francisco bay # and the Google campus), we obtain the following Deep Dream: # # 作者使用下面这张原始照片(在三藩市湾区和谷歌园区之间的一个小山谷拍摄),我们获得了下面的Deep Dream: # # ![deep dream example](imgs/f8.5.jpg) # # 图8-5 我们的Deep Dream实现的一个例子 # > I strongly suggest that you explore what you can do by adjusting which layers you are # using in your loss. Layers that are lower in the network contain more local, less abstract # representations and will lead to more geometric-looking dream patterns. Layers # higher-up will lead to more recognizable visual patterns based on the most common # objects found in ImageNet, such as dog eyes, bird feathers, and so on. You can use # random generation of the parameters in our layer_contributions dictionary in order # to quickly explore many different layer combinations. # # 作者强烈建议读者探索一下通过调整使用哪些层次用来作为损失值。网络中的低端层次包含着一些更加局部更少抽象的表现形式,并且会得到更加具有集合形式的dream图像模式。而高端的层次会得到那些更加可识别的视觉模式,也就是在ImageNet中可以观察到的目标,如狗眼睛,鸟羽毛等。你可以使用随机生成的参数来调整`layer_contributions`字典的值,从而快速的探索许多不同的层次损失值组合。 # # > Here is a range of results obtained using different layer configurations, from an image # of a delicious homemade pastry: # # 下面是部分使用不同层次配置获得的结果,都是从一张可口的糕点照片中生成的: # # ![different layer configurations](imgs/f8.6.jpg) # # 图8-6 使用不同的层次作为损失值获得的图像 # ### 8.2.2 小结 # # > - Deep Dream consists in running a network "in reverse" to generate inputs based on the # representations learned by the convnet. # - The results produced are fun, and share some similarity with the visual artifacts induced # in humans by the disruption of the visual cortex via psychedelics. # - Note that the process is not specific to image models, nor even to convnets. It could be # done for speech, music, and more. # # - Deep Dream使用一种“反向”的方法来让网络基于从卷积网络中学习到的表现形式来生成图像。 # - 生成的结果通过在图像中插入一下奇幻的视觉元素造成人眼视觉的隔断来形成有趣的效果。 # - 要说明的是这个过程不仅对图像模型有效,甚至不仅针对卷积网络。它可以用来对演讲、音乐等进行处理。 # ## 8.3 神经风格迁移 # # > Besides Deep Dream, another major development in deep learning-driven image # modification that happened in the summer of 2015 is neural style transfer, introduced by # <NAME> et al. The neural style transfer algorithm has undergone many refinements # and spawned many variations since its original introduction, including a viral smartphone # app, called Prisma. For simplicity, this section focuses on the formulation described in # the original paper. # # 除了Deep Dream,还有一种深度学习技术驱动的图像修改的主要应用,出现在2015年夏天,叫做神经风格迁移,由<NAME>首次提出。神经风格迁移算法在这之后经历了多次改良并且孵化出很多的变体,这里面包括一个爆款智能手机应用Prisma。为了简单起见,本小节专注于原始论文中描述的方法。 # # > Neural style transfer consists in applying the "style" of a reference image to a target # image, while conserving the "content" of the target image: # # 神经风格迁移包含着将一个参考图像的“风格”应用到目标图像上,并且保留目标图像的“内容”: # # ![neural style transfer](imgs/f8.7.jpg) # # 图8-7 神经风格迁移的例子 # > What is meant by "style" is essentially textures, colors, and visual patterns in the # image, at various spatial scales, while the "content" is the higher-level macrostructure of # the image. For instance, blue-and-yellow circular brush strokes are considered to be the # "style" in the above example using Starry Night by Van Gogh, while the buildings in the # Tuebingen photograph are considered to be the "content". # # “风格”本质上就是图像中的纹理、颜色和视觉模式,而“内容”是图像中高层次的宏结构。例如上面梵高的《星空》中的蓝黄交错的笔法就被认为是“风格”,而图宾根照片中的建筑物就被认为是“内容”。 # # > The idea of style transfer, tightly related to that of texture generation, has had a long # history in the image processing community prior to the development of neural style # transfer in 2015. However, as it turned out, the deep learning-based implementations of # style transfer offered results unparalleled by what could be previously achieved with # classical computer vision techniques, and triggered an amazing renaissance in creative # applications of computer vision. # # 风格转移的原理与纹理生成紧密相关,实际上在2015年出现神经风格迁移之前已经在图像处理领域存在了很久。然而由于基于深度学习技术实现的风格迁移的出现,人们发现其产生的结果与传统的计算机视觉技术得到的结果不可同日而语,因此再度引发了这个领域的一次爆发。 # # > The key notion behind implementing style transfer is same idea that is central to all # deep learning algorithms: we define a loss function to specify what we want to achieve, # and we minimize this loss. We know what we want to achieve: conserve the "content" of # the original image, while adopting the "style" of the reference image. If we were able to # mathematically define content and style, then an appropriate loss function to minimize # would be the following: # # 实现风格迁移的关键与所有的深度学习算法的核心点一致:定义损失函数来设定我们需要达到的目标,然后尽可能的最小化损失。我们这里的目标是:尽可能保留原始图像的“内容”而尽可能应用参考图像的“风格”。如果我们能够在数学上定义内容和风格,那么需要最小化的损失函数如下: # # ```python # loss = distance(style(reference_image) - style(generated_image)) + # distance(content(original_image) - content(generated_image)) # ``` # # > Where distance is a norm function such as the L2 norm, content is a function that # takes an image and computes a representation of its "content", and style is a function # that takes an image and computes a representation of its "style". # # 这里的`distance`是一个计算范数的函数,例如L2范数,`content`是一个从图像中获取并计算它内容表现形式的函数,`style`是一个从图像中获取并计算风格表现形式的函数。 # # > Minimizing this loss would cause style(generated_image) to be close to # style(reference_image) , while content(generated_image) would be close to # content(generated_image) , thus achieving style transfer as we defined it. # # 最小化这个损失会使得风格(生成图像)尽量接近(参考图像),而内容(生成图像)尽量接近(原始图像),因此达到我们定义的风格迁移目标。 # # > A fundamental observation made by Gatys et al is that deep convolutional neural # networks offer precisely a way to mathematically defined the style and content # functions. Let’s see how. # # Gatys在他的论文中提出了一个基本结论,就是深度卷积神经网络能够精确的定义我们需要的风格和内容函数。下面我们来看看如何实现。 # ### 8.3.1 内容损失 # # > As you already know, activations from earlier layers in a network contain local # information about the image, while activations from higher layers contain increasingly # global and abstract information. Formulated in a different way, the activations of the # different layers of a convnet provide a decomposition of the contents of an image over # different spatial scales. Therefore we expect the "content" of an image, which is more # global and more abstract, to be captured by the representations of a top layer of a # convnet. # # 正如你已经了解的,网络中前面层次的激活含有图像的局部信息,而上面层次的激活含有全局和抽象的信息。让我们换一种表述形式,卷积网络中不同层次的激活提供了在不同空间尺度上对图像内容分解的一种方式。因此我们我们希望获得一张图像的内容,也就是更加全局和抽象的信息,应该从卷积网络中的顶层中获得。 # # > A good candidate for a content loss would thus be to consider a pre-trained convnet, # and define as our loss the L2 norm between the activations of a top layer computed over # the target image and the activations of the same layer computed over the generated # image. This would guarantee that, as seen from the top layer of the convnet, the # generated image will "look similar" to the original target image. Assuming that what the # top layers of a convnet see is really the "content" of their input images, then this does # work as a way to preserve image content. # # 计算内容损失的一个很好的办法是使用一个预训练卷积网络,将我们的损失定义为网络最顶层计算得到的原始图像激活值与生成图像激活值的L2范数。这样能够保证对于最顶层来说,生成图像会和原始图像相似。因为我们假设卷积网络最顶层观察的是图像的“内容”,所以这样就能更好的保存图像内容。 # ### 8.3.2 风格损失 # # > While the content loss only leverages a single higher-up layer, the style loss as defined in # the Gatys et al. paper leverages multiple layers of a convnet: we aim at capturing the # appearance of the style reference image at all spatial scales extracted by the convnet, not # just any single scale. # # 对于内容损失来说,我们只使用了最顶层,然而Gatys等人在论文中定义的风格损失将需要使用卷积网络的多个层次:因为这里的目标是能够捕获参考图像中所有空间尺度上的风格表现,而不是单一的空间尺度。 # # > For the style loss, the Gatys et al. paper leverages the "Gram matrix" of a layer’s # activations, i.e. the inner product between the feature maps of a given layer. This inner # product can be understood as representing a map of the correlations between the features # of a layer. These feature correlations capture the statistics of the patterns of a particular # spatial scale, which empirically corresponds to the appearance of the textures found at # this scale. # # 对于风格损失,Gatys的论文使用了一个层激活的“格拉姆矩阵”,也就是给定层次的特征图的内积。这个内积的结果可以理解为层次的特征之间的相关性。这种特征的相关性捕获了特定空间尺度上的统计学模式,其实也就是在该尺度上观察到的纹理表现形式。 # # > Hence the style loss aims at preserving similar internal correlations within the # activations of different layers, across the style reference image and the generated image. # In turn, this guarantees that the textures found at different spatial scales will look similar # across the style reference image and the generated image. # # 因此风格损失的目标就是尽量保持不同层次激活的内部相关性,使得生成图像和参考图像的激活表现尽量一致。达到后,就能使得生成图像的风格看起来与参考图像相似。 # ### 8.3.3 简而言之 # # > In short, we can use a pre-trained convnet to define a loss that will: # # > - Preserve content by maintaining similar high-level layer activations between the target # content image and the generated image. The convnet should "see" both the target image # and the generated image as "containing the same things". # - Preserve style by maintaining similar correlations within activations for both low-level # layers and high-level layers. Indeed, feature correlations capture textures : the generated # and the style reference image should share the same textures at different spatial scales. # # 简而言之我们可以使用预训练的卷积网络来定义损失,以达到: # # - 在原始图像和生成图像之间保持相似的高层激活结果。卷积网络应该能够在两个图像上都“观测”相同的内容。 # - 通过在参考图像和生成图像之间保持相似的底层和高层激活结果的相关性来保持风格。实际上特征相关性代表着纹理:也就是生成图像和参考图像应该共享了不同空间尺度的相同纹理特征。 # # > Now let’s take a look at a Keras implementation of the original 2015 neural style # transfer algorithm. As you will see, it shares a lot of similarities with the Deep Dream # implementation we developed in the previous section. # # 下面我们来看一下在Keras中实现原始的2015神经风格迁移算法。你将会看到,下面的方法与上一节中的Deep Dream实现上有许多的相似之处。 # ### 8.3.4 Keras中的神经风格迁移 # # > Neural style transfer can be implemented using any pre-trained convnet. Here we will use # the VGG19 network, used by Gatys et al in their paper. VGG19 is a simple variant of the # VGG16 network we introduced in Chapter 5, with three more convolutional layers. # # 神经风格迁移可以使用任何的预训练卷积网络来实现。这里我们使用Gatys论文中用的那个VGG19网络。VGG19是我们在第五章中介绍过的VGG16网络的简单变体,只是多加了三个卷积层。 # # > This is our general process: # # > - Set up a network that will compute VGG19 layer activations for the style reference # image, the target image, and the generated image at the same time. # - Use the layer activations computed over these three images to define the loss function # described above, which we will minimize in order to achieve style transfer. # - Set up a gradient descent process to minimize this loss function. # # 主要的过程包括: # # - 构建一个网络,能够同时计算参考图像,原始目标图像和生成图像在VGG19层次上的激活。 # - 使用上面计算得到的层激活来定义前面介绍的损失函数,需要在训练中最小化这个值达到风格迁移的目标。 # - 设置梯度下降过程来最小化并进行训练。 # # > Let’s start by defining the paths to the two images we consider: the style reference # image and the target image. To make sure that all images processed share similar sizes # (widely different sizes would make style transfer more difficult), we will later resize # them all to a shared height of 400px. # # 首先我们定义两个图像的路径:风格参考图像和原始目标图像。为了保证所有图像都有着相似的大小(有着巨大尺寸差别的图像会使得风格迁移变得更加困难),我们会将两张图像都缩放到高度为400px。 # + from tensorflow.keras.preprocessing.image import load_img, img_to_array # 原始目标图像路径 target_image_path = 'img/portrait.jpg' # 风格参考图像路径 style_reference_image_path = 'img/transfer_style_reference.jpg' # 生成图像的尺寸 width, height = load_img(target_image_path).size img_height = 400 img_width = int(width * img_height / height) # - # > We will need some auxiliary functions for loading, pre-processing and # post-processing the images that will go in and out of the VGG19 convnet: # # 我们下面需要一些工具函数用来对输入输出VGG19卷积网络的图像进行装载、预处理、后处理: # + import numpy as np from tensorflow.keras.applications import vgg19 def preprocess_image(image_path): img = load_img(image_path, target_size=(img_height, img_width)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg19.preprocess_input(img) return img def deprocess_image(x): # 使用像素均值来规范化 x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x # - # > Let’s set up the VGG19 network. It takes as input a batch of three images: the style # reference image, the target image, and a placeholder that will contain the generated # image. A placeholder is simply a symbolic tensor, the values of which are provided # externally via Numpy arrays. The style reference and target image are static, and thus # defined using K.constant , while the values contained in the placeholder of the # generated image will change over time. # # 然后构建VGG19网络。它将三张图像作为一个批次输入:风格参考图像、原始目标图像和一个作为生成图像的置位符。置位符就是一个符号化的张量,它的值通过外部Numpy数组来提供。因为风格参考图像和原始目标图像都是静态的,因此可以使用`K.constant`来定义,而置位符代表的生成图像会随着时间不断发生变化。 # + from tensorflow.keras import backend as K target_image = K.constant(preprocess_image(target_image_path)) style_reference_image = K.constant(preprocess_image(style_reference_image_path)) # 下面的置位符表示生成的图像 combination_image = K.placeholder((1, img_height, img_width, 3)) # 我们将三张图像合并成一个批次 input_tensor = K.concatenate([target_image, style_reference_image, combination_image], axis=0) # 构建VGG19网络,使用三张图像作为输入,模型会使用ImageNet数据集权重作为预训练权重值 model = vgg19.VGG19(input_tensor=input_tensor, weights='imagenet', include_top=False) print('Model loaded.') # - # > Let’s define the content loss, meant to make sure that the top layer of the VGG19 # convnet will have a similar view of the target image and the generated image: # # 定义内容损失,用来保证VGG19卷积网络的顶层对原始目标图像和生成图像有着相似的结果: def content_loss(base, combination): return K.sum(K.square(combination - base)) # > Now, here’s the style loss. It leverages an auxiliary function to compute the Gram # matrix of an input matrix, i.e. a map of the correlations found in the original feature # matrix. # # 下面就是风格损失。它使用一个工具函数来计算输入矩阵的格拉姆矩阵,也就是在原始特征矩阵中得到的相关性地图。 # + def gram_matrix(x): features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram def style_loss(style, combination): S = gram_matrix(style) C = gram_matrix(combination) channels = 3 size = img_height * img_width return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) # - # > To these two loss components, we add a third one, the "total variation loss". It is # meant to encourage spatial continuity in the generated image, thus avoiding overly # pixelated results. You could interpret it as a regularization loss. # # 在这两个损失模块基础上,我们增加了第三个,“总体差异损失”。这是用来提升生成图像的空间连续性的,从而避免产生过于像素化的结果。你可以理解为一个规范化后的损失。 def total_variation_loss(x): a = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, 1:, :img_width - 1, :]) b = K.square(x[:, :img_height - 1, :img_width - 1, :] - x[:, :img_height - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # > The loss that we minimize is a weighted average of these three losses. To compute the # content loss, we only leverage one top layer, the block5_conv2 layer, while for the style # loss we use a list of layers than spans both low-level and high-level layers. We add the # total variation loss at the end. # # 最终我们需要最小化的损失是这三个损失值的加权平均。计算内容损失时我们只需要使用最顶层,也就是`block5_conv2`层,而计算风格损失时我们需要使用一个层次的列表,涵盖了底层到高层。最后我们将总体差异损失加在后面。 # # > Depending on the style reference image and content image you are using, you will # likely want to tune the content_weight coefficient, the contribution of the content loss # to the total loss. A higher content_weight means that the target content will be more # recognizable in the generated image. # # 取决于你在使用的风格参考图像和内容图像,你可能需要调整`content_weight`系数,它代表着内容损失在整体损失中占的比重。更高的`content_weight`代表着生成图像中的内容具有更高的辨识度。 # + # 定义个将层次名称映射到激活输出张量的字典 outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) # 内容损失计算的层次名称 content_layer = 'block5_conv2' # 风格损失计算的层次名称列表 style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] # 三个损失值所占的权重比例 total_variation_weight = 1e-4 style_weight = 1. content_weight = 0.025 # 下面将所有的损失值相加,合成到一个loss损失值中 loss = K.variable(0.) layer_features = outputs_dict[content_layer] target_image_features = layer_features[0, :, :, :] combination_features = layer_features[2, :, :, :] loss += content_weight * content_loss(target_image_features, combination_features) for layer_name in style_layers: layer_features = outputs_dict[layer_name] style_reference_features = layer_features[1, :, :, :] combination_features = layer_features[2, :, :, :] sl = style_loss(style_reference_features, combination_features) loss += (style_weight / len(style_layers)) * sl loss += total_variation_weight * total_variation_loss(combination_image) # - # > Finally, we set up the gradient descent process. In the original Gatys et al. paper, # optimization is performed using the L-BFGS algorithm, so that is also what we will use # here. This is a key difference from the Deep Dream example in the previous section. The # L-BFGS algorithms comes packaged with SciPy. However, there are two slight # limitations with the SciPy implementation: # # > - It requires to be passed the value of the loss function and the value of the gradients as two # separate functions. # - It can only be applied to flat vectors, whereas we have a 3D image array. # # 最后一步就是设置梯度下降过程。在Gatys的论文中,优化使用的是`L-BFGS`算法,因此我们这里也选择它。这是与之前Deep Dream例子的一个关键区别。L-BFGS算法被打包在SciPy库中。然而,SciPy实现的算法有两个局限性: # # - 它需要将损失函数和梯度值作为两个独立的参数代入。 # - 它只能应用在铺平的向量上,而这里我们有的是一个3D图像数组。 # # > It would be very inefficient for us to compute the value of the loss function and the # value of gradients independently, since it would lead to a lot of redundant computation # between the two. We would be almost twice slower than we could be by computing them # jointly. To by-pass this, we set up a Python class named Evaluator that will compute # both loss value and gradients value at once, will return the loss value when called the first # time, and will cache the gradients for the next call. # # 如果我们分别独立计算损失函数值和梯度值的话将会非常的低效,因为这会导致两者之间产生许多冗余的计算操作。这会使得整个计算时间比联合计算它们要多几乎一倍。为了避免这一点,我们会构造一个Python类叫做`Evaluator`,它会同时计算损失值和梯度值,然后在第一次调用时返回损失值,并将梯度值缓存起来留待第二次调用。 # + # 通过损失值计算生成图像的梯度值 grads = K.gradients(loss, combination_image)[0] # Function to fetch the values of the current loss and the current gradients fetch_loss_and_grads = K.function([combination_image], [loss, grads]) class Evaluator(object): def __init__(self): self.loss_value = None self.grads_values = None def loss(self, x): assert self.loss_value is None x = x.reshape((1, img_height, img_width, 3)) outs = fetch_loss_and_grads([x]) loss_value = outs[0] grad_values = outs[1].flatten().astype('float64') self.loss_value = loss_value self.grad_values = grad_values return self.loss_value def grads(self, x): assert self.loss_value is not None grad_values = np.copy(self.grad_values) self.loss_value = None self.grad_values = None return grad_values evaluator = Evaluator() # - # > Finally, we can run the gradient ascent process using SciPy’s L-BFGS algorithm, # saving the current generated image at each iteration of the algorithm (here, a single # iteration represents 20 steps of gradient ascent): # # 一切准备好后,我们就可以使用Scipy的L-BFGS算法来运行梯度增强过程,过程中我们会保存每次算法迭代完成后的生成图像(这里,一次迭代代表着20次梯度增强过程): # + from scipy.optimize import fmin_l_bfgs_b from scipy.misc import imsave import time result_prefix = 'my_result' iterations = 20 # 运行L-BFGS算法来最小化损失 # 初始化状态是原始目标图像 # 注意`scipy.optimize.fmin_l_bfgs_b`只能应用在铺平的向量上 x = preprocess_image(target_image_path) x = x.flatten() for i in range(iterations): print('Start of iteration', i) start_time = time.time() x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x, fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) # 保存生成的图像 img = x.copy().reshape((img_height, img_width, 3)) img = deprocess_image(img) fname = result_prefix + '_at_iteration_%d.png' % i imsave(fname, img) end_time = time.time() print('Image saved as', fname) print('Iteration %d completed in %ds' % (i, end_time - start_time)) # - # > Here’s what we get: # # 运行之后我们可以得到: # # ![sample images](imgs/f8.8.jpg) # # 图8-8 风格迁移的一些生成图像 # > Keep in mind that what this technique achieves is merely a form of image # re-texturing, or texture transfer. It will work best with style reference images that are # strongly textured and highly self-similar, and with content targets that don’t require high # levels of details in order to be recognizable. It would typically not be able to achieve # fairly abstract feats such as "transferring the style of one portrait to another". The # algorithm is closer to classical signal processing than to AI, so don’t expect it to work # like magic! # # 这里还需要说明的是,这个技术仅仅是一种将图像重新绘制纹理的过程,或者是纹理转移。因此它会在风格参考图像具有强烈纹理风格或者高度自相似纹理风格,以及内容目标图像不需要高度细节才能够识别的情况下,能够工作的最良好。它无法实现一些很常见的抽象任务比方说“将一张肖像的风格迁移到另一张肖像上”。这里的算法更接近传统信号处理而不是AI,因此别期望它像变魔术一样生成图像。 # # > Additionally, do note that running this style transfer algorithm is quite slow. # However, the transformation operated by our setup is simple enough that it can be # learned by a small, fast feedforward convnet as well—as long as you have appropriate # training data available. Fast style transfer can thus be achieved by first spending a lot of # compute cycles to generate input-output training examples for a fixed style reference # image, using the above method, and then training a simple convnet to learn this # style-specific transformation. Once that is done, stylizing a given image is instantaneous: # it’s a just a forward pass of this small convnet. # # 并且也需要了解运行这样的风格迁移算法很慢。然而我们这里使用的迁移操作还是比较简单的,因此可以通过一个小型的快速的前向传播卷积网络来进行学习,前提只要你有合适的训练数据。所以快速风格迁移能够通过预先训练生成特定输入输出训练样本上的固定风格参考图像的模型来完成,然后针对每个特定的风格转换都训练一个独立的简单卷积网络。完成之后,对给定图像的风格迁移就是瞬间完成:因为它仅需要对一个小型卷积网络做一次前向传播运算。 # ### 8.3.5 小结 # # > - Style transfer consists in creating a new image that preserves the "contents" of a target # image while also capturing the "style" of a reference image. # - "Content" can be captured by the high-level activations of a convnet. # - "Style" can be captured by the internal correlations of the activations of different layers # of a convnet. # - Hence deep learning allows style transfer to be formulated as an optimization process # using a loss defined with a pre-trained convnet. # - Starting from this basic idea, many variants and refinements are possible! # # - 风格迁移包含着创建一张新的图像,其中保留了目标图像的“内容”以及参考图像的“风格”。 # - “内容”可以从卷积网络的高层激活结果中获得。 # - “风格”可以从卷积网络各个层次的激活结果内在相关性中获得。 # - 因此我们可以使用深度学习方法,在一个允许你了卷积网络上使用损失优化方式来完成风格迁移。 # - 从这些基础知识出发,可以得到很多风格迁移的变体和改良。 # ## 8.4 使用变分自动编码生成图像 # # > Sampling from a latent space of images to create entirely new images, or edit existing # ones, is currently the most popular and successful application of creative AI. In this # section and the next one, we review some of the high-level concepts pertaining to image # generation, alongside implementations details relative to the two main techniques in this # domain: Variational Autoencoders (VAEs) and Generative Adversarial Networks # (GANs). The techniques we present here are not specific to images—one could develop # latent spaces of sound, music, or even text, using GANs or VAEs—but in practice the # most interesting results have been obtained with pictures, and that is what we focus on # here. # # 从图像的潜空间中取样来创建完全新的图像或编辑已经存在的图像,目前在创造性AI领域已经称为最热门和成功的应用。在本节和下一节中,我们会介绍一些高层的图像生成概念,同时会专门阐述与之相关两种技术实现你:变分自动编码(VAE)和生成对抗网络(GAN)。这两节介绍的技巧不但可以应用在图像上,也可以将它们应用到声音、音乐或者文本的潜空间中,不过在实践中最有趣的结果还是来自图像,因此我们还是聚焦于此。 # ### 8.4.1 从图像潜空间取样 # # > The key idea of image generation is to develop a low-dimensional latent space of # representations (which naturally is a vector space, i.e. a geometric space), where any # point can be mapped to a realistic-looking image. The module capable of realizing this # mapping, taking as input a latent point and outputting an image, i.e. a grid of pixels, is # called a generator (in the case of GANs) or a decoder (in the case of VAEs). Once such a # latent space has been developed, one may sample points from it, either deliberately or at # random, and by mapping them to image space, generate images never seen before. # # 图像生成的关键在于能够找到图像的低维度潜空间的表现形式(也就是向量空间或者几何空间),空间中人和店都能够被映射成真实图像中的一个点。能够实现这样的映射,也就是将输入潜空间的点转换成图像输出,或者说是一个像素网格的模块,被称为生成器(在使用GAN的情况下)或者解码器(在使用VAE的情况下)。一旦找到了这样的潜空间,就可以从中取样,以指定的方式或者以随机的方式,将它们映射到图像空间,从而生成从未有过的图像。 # # ![latent space](imgs/f8.9.jpg) # # 图8-9 从图像的潜空间中学习然后取样获得新的图像 # > GANs and VAEs are simply two different strategies for learning such latent spaces of # image representations, with each its own characteristics. VAEs are great for learning # latent spaces that are well-structured, where specific directions encode a meaningful axis # of variation in the data. GANs generate images that can potentially be highly realistic, but # the latent space they come from may not have as much structure and continuity. # # GAN和VAE就是两种从图像表现形式中学习获得潜空间的不同策略,当然它们具有各自的特点。VAE在学习具有良好结构的图像潜空间时特别有效,这里特定方向编码会是图像中一个有意义的数据轴的变分。GAN可以产生高度真实的图像,但是它们学习的潜空间可能并没有良好的结构和连续性。 # # ![VAE continuous latent space](imgs/f8.10.jpg) # # 图8-10 <NAME>使用VAE学习得到的连续潜空间生成的图像 # ### 8.4.2 图像编辑中的概念向量 # # > We already hinted at the idea of a "concept vector" when we covered word embeddings # in Chapter 6. The idea is still the same: given a latent space of representations, or an # embedding space, certain directions in the space may encode interesting axes of variation # in the original data. In a latent space of images of faces, for instance, there may be a # "smile vector" s , such that if latent point z is the embedded representation of a certain # face, then latent point z + s is the embedded representation of the same face, smiling. # Once one has identified such a vector, is then becomes possible to edit images by # projecting them into the latent space, moving their representation in a meaningful way, # then decoding them back to image space. There are concept vectors for essentially any # independent dimension of variation in image space—in the case of faces, one may # discover vectors for adding sunglasses to a face, removing glasses, turning a male face # into female face, etc. # # 在第六章词嵌入中我们已经接触过“概念向量”的内容。这里的含义是一样的:给定表现形式的潜空间,或者一个嵌入空间,某些原始数据的空间中的方向可以被编码成有意义的轴。例如在人脸图像的潜空间中,可能会存在“微笑向量”,我们称为向量`s`,然后在某张脸谱图像中存在一个潜在点`z`,那么潜在点`z + s`就变成了同一张脸并且带着微笑的嵌入表现形式。一旦我们找到了这样的向量,那么通过将这个向量投射到潜空间中来对图像进行编辑就变得可能了,从而将表现形式朝着期望的方向移动,最后重新将其解码到图像空间中。在图像空间充满了这样的概念向量独立维度,在人脸例子中,就存在这发现戴了太阳眼镜、去除眼镜、将男性脸部换成女性脸部等。 # # > Here is an example of a "smile vector", a concept vector discovered by <NAME> # from the Victoria University School of Design in New Zealand, using VAEs trained on a # dataset of faces of celebrities (the CelebA dataset): # # 下面是一个“微笑向量”的例子,这是由新西兰维多利亚大学设计学院的Tom White发现的,他使用了VAE在一个名人脸谱数据集上训练得到: # # ![smile vector](imgs/f8.11.jpg) # # 图8-11 微笑向量 # ### 8.4.3 变分自动编码器 # # > Variational autoencoders, simultaneously discovered by Kingma & Welling in December # 2013, and Rezende, Mohamed & Wierstra in January 2014, are a kind of generative # model that is especially appropriate for the task of image editing via concept vectors. # They are a modern take on autoencoders—a type of network that aims to "encode" an # input to a low-dimensional latent space then "decode" it back—that mixes ideas from # deep learning with Bayesian inference. # # 变分自动编码器是Kingma和Welling在2013年12月份,Rezende、Mohamed和Wierstra在2014年1月份同时发现的,是一种特别合适通过概念向量来进行图像编辑任务的生成模型。它是自动编码器的一个现代方法,自动编码器是一种网络专注于将输入“编码”到一个低维度的潜空间,然后将其“解码”回去的机器学习方法,它融合了深度学习和贝叶斯推断。 # # > A classical image autoencoder takes an image, maps it to a latent vector space via an # "encoder" module, then decode it back to an output with the same dimensions as the # original image, via a "decoder" module. It is then trained by using as target data the same # images as the input images, meaning that the autoencoder learns to reconstruct the # original inputs. By imposing various constraints on the "code", i.e. the output of the # encoder, one can get the autoencoder to learn more or less interesting latent # representations of the data. Most commonly, one would constraint the code to be very # low-dimensional and sparse (i.e. mostly zeros), in which case the encoder acts as a way # to compress the input data into fewer bits of information. # # 一个经典的图像自动编码器接受一张图像输入,使用“编码器”模块将它映射到潜在向量空间,然后又重新把向量空间解码映射到原始维度的图像空间,这意味着自动编码器具有学习重构元时输入的能力。通过对“编码”引入不同的约束条件,也就是约束编码器的输出,能够让其学习到数据中一些有意义的潜空间表现形式。更普遍来说,通过将数据编码到很低维度且稀疏的空间(也就是大部分是0),这样就可以提供一种将输入数据压缩到更小数据量的信息之中。 # # ![autoencoder](imgs/f8.12.jpg) # # 图8-12 自动编码器,将输入x编码到低维度潜空间,实现压缩后重新解码到原始数据空间 # > In practice, such classical autoencoders don’t lead to particularly useful or # well-structured latent spaces. They’re not particularly good at compression, either. For # these reasons, they have largely fallen out of fashion over the past years. Variational # autoencoders, however, augment autoencoders with a little bit of statistical magic that # forces them to learn continuous, highly structured latent spaces. They have turned out to # be a very powerful tool for image generation. # # 在实践中,这样的传统自动编码器不会得到特别有用或者良好结构化的潜空间。它们在压缩方面也不会表现优异。因为这些原因,传统的自动编码器在过去几年已经逐渐不再流行。然而变分自动编码器,增广自动编码器,使用了一些统计学的技巧使得它们能够学习到连续的高度结构化的潜空间。因此两者已经成为图像生成非常强大的工具。 # # > A VAE, instead of compressing its input image into a fixed "code" in the latent space, # turns the image into the parameters of a statistical distribution: a mean and a variance. # Essentially, this means that we are assuming that the input image has been generated by a # statistical process, and that the randomness of this process should be taken into # accounting during encoding and decoding. The VAE then uses the mean and variance # parameters to randomly sample one element of the distribution, and decodes that element # back to the original input. The stochasticity of this process improves robustness and # forces the latent space to encode meaningful representations everywhere, i.e. every point # sampled in the latent will be decoded to a valid output. # # 在VAE中,不再使用将输入图像压缩到潜空间的一个固定“编码”,而是将图像转换成统计学分布的参数:均值和方差。从根本上来说,这意味着我们假定输入图像是由一个统计学过程生成的,因此这个过程中的随机性必须在编码和解码的时候纳入考虑之中。VAE使用均值和方差参数来在分布中进行随机取样,然后把元素解码到原始输入空间中。将随机性加入这个过程中极大改善了潜空间编码有意义变现形式的健壮性和能力,也就是说潜空间中采样的每个点都能正确的解码到输出中。 # # ![VAE](imgs/f8.13.jpg) # # 图8-13 VAE将图像映射到两个向量上,z_mean和z_log_sigma,它们能有效表示图像的概率分布,在分布中可以取样并解码到原始空间 # > In technical terms, here is how a variational autoencoder works. First, an encoder # module turns the input samples input_img into two parameters in a latent space of # representations, which we will note z_mean and z_log_variance . Then, we randomly # sample a point z from the latent normal distribution that is assumed to generate the input # image, via z = z_mean + exp(z_log_variance) * epsilon , where epsilon is a # random tensor of small values. Finally, a decoder module will map this point in the latent # space back to the original input image. Because epsilon is random, the process ensures # that every point that is close to the latent location where we encoded input_img ( z-mean # ) can be decoded to something similar to input_img , thus forcing the latent space to be # continuously meaningful. Any two close points in the latent space will decode to highly # similar images. Continuity, combined with the low dimensionality of the latent space, # forces every direction in the latent space to encode a meaningful axis of variation of the # data, making the latent space very structured and thus highly suitable to manipulation via # concept vectors. # # 用技术术语来描述变分自动编码的原理。首先编码器模块将输入图像编码到潜空间的两个参数上,我们使用`z-mean`和`z_log_variance`来表示。然后我们可以在潜空间正态分布上取样z点作为输入图像生成的假设,公式是$$z=z\_mean+e^{z\_log\_variance}*\epsilon$$ # 这里的$\epsilon$是一个随机的小数值张量。最后解码器模块会将潜空间的这个点应社会原始输入图像。因为$\epsilon$是随机的,这个过程能狗保证每个从输入图像编码中得到的取样点都能近似解码到输入图像附近,因此强制让潜空间变为连续有意义。任何潜空间的两个邻近点必然会解码得到高度相似的图像。连续性再加上潜空间的低维度特性,使得潜空间中的每个方向都能代表一个数据变化上有意义的轴,因此潜空间变得非常具有结构化特征,特别适合用概念向量来编辑图像。 # # > The parameters of a VAE are trained via two loss functions: first, a reconstruction # loss that forces the decoded samples to match the initial inputs, and a regularization loss, # which helps in learning well-formed latent spaces and reducing overfitting to the training # data. # # VAE的参数需要通过两个损失函数来训练:第一个是重建损失,用来令解码后的样本接近原始输入,另一个是正则化损失,用来帮助学习到良好结构的潜空间和减少对训练数据的过拟合。 # # > Let’s quickly go over a Keras implementation of a VAE. Schematically, it looks like # this: # # 让我们快速看一下VAE在Keras中的实现。简单来说,如下: # # ```python # # 将输入编码成一个均值和方差参数 # z_mean, z_log_variance = encoder(input_img) # # # 从概率分布中取样一个点 # z = z_mean + exp(z_log_variance) * epsilon # # # 然后将z解码回到原始图像空间 # reconstructed_img = decoder(z) # # # 实例化模型 # model = Model(input_img, reconstructed_img) # # # 然后使用两个损失函数来训练模型 # # 重建损失和正则化损失 # ``` # > Here is the encoder network we will use: a very simple convnet which maps the input # image x to two vectors, z_mean and z_log_variance . # # 下面是一个编码器网络:它由一个简单的卷积网络构成,将输入的图像x转换成两个向量,`z_mean`和`z_log_variance`。 # + import tensorflow.keras as keras from tensorflow.keras import layers from tensorflow.keras import backend as K from tensorflow.keras.models import Model import numpy as np img_shape = (28, 28, 1) batch_size = 16 latent_dim = 2 # 潜空间的维度:平面 input_img = keras.Input(shape=img_shape) x = layers.Conv2D(32, 3, padding='same', activation='relu')(input_img) x = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2, 2))(x) x = layers.Conv2D(64, 3, padding='same', activation='relu')(x) x = layers.Conv2D(64, 3, padding='same', activation='relu')(x) shape_before_flattening = K.int_shape(x) x = layers.Flatten()(x) x = layers.Dense(32, activation='relu')(x) z_mean = layers.Dense(latent_dim)(x) z_log_var = layers.Dense(latent_dim)(x) # - # > Here is the code for using z_mean and z_log_var , the parameters of the statistical # distribution assumed to have produced input_img , to generate a latent space point z . # Here, we wrap some arbitrary code (built on top of Keras backend primitives) into a # Lambda layer. In Keras, everything needs to be a layer, so code that isn’t part of a built-in # layer should be wrapped in a Lambda (or else, in a custom layer). # # 下面是使用`z_mean`和`z_log_var`的代码,两个假设用来生成输入图像的统计学分布参数。下面的代码取样潜空间的点z。这里我们将取样的函数代码(在Keras backend原语上构建)封装成一个Lambda层。在Keras中,任何东西都应该是一个层,因此所有不属于内建层的代码都应该封装到Lambda(或者自定义层)之中。 # + def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=1.) return z_mean + K.exp(z_log_var) * epsilon z = layers.Lambda(sampling)([z_mean, z_log_var]) # - # > This is the decoder implementation: we reshape the vector z to the dimensions of an # image, then we use a few convolution layers to obtain a final image output that has the # same dimensions as the original input_img . # # 然后是解码器实现:我们将z向量重新转换成一张图像,然后我们使用几个卷积层来获得与原始图像相同维度的输出图像。 # + # 解码器的输入我们会使用z decoder_input = layers.Input(K.int_shape(z)[1:]) # 使用正确数量的单元提升采样 x = layers.Dense(np.prod(shape_before_flattening[1:]), activation='relu')(decoder_input) # 恢复成铺平之前的图像形状 x = layers.Reshape(shape_before_flattening[1:])(x) # 下面使用与编码其相反的操作:加上一个`Conv2DTranspose`层以及相应的参数 x = layers.Conv2DTranspose(32, 3, padding='same', activation='relu', strides=(2, 2))(x) x = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(x) # 最后我们就获得了一个与原始输入相同尺寸的特征地图 # 然后定义解码器模型 decoder = Model(decoder_input, x) # 然后就可以将它应用到`z`上得到解码图像 z_decoded = decoder(z) # - # > The dual loss of a VAE doesn’t fit the traditional expectation of a sample-wise # function of the form loss(input, target) . Thus, we set up the loss by writing a # custom layer with internally leverages the built-in add_loss layer method to create an # arbitrary loss. # # VAE的双损失与常用的样本相关的函数形式`loss(input, target)`无法匹配。因此我们需要编写一个自定义的层来构建损失,在其内部使用内建的`add_loss`方法来获得任意的损失函数定义。 # + class CustomVariationalLayer(keras.layers.Layer): def vae_loss(self, x, z_decoded): x = K.flatten(x) z_decoded = K.flatten(z_decoded) xent_loss = keras.metrics.binary_crossentropy(x, z_decoded) kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(xent_loss + kl_loss) def call(self, inputs): x = inputs[0] z_decoded = inputs[1] loss = self.vae_loss(x, z_decoded) self.add_loss(loss, inputs=inputs) # 我们不会使用这个层来输出 return x # 使用输入和解码输出调用我们自定义的层次,来获取最终模型的输出 y = CustomVariationalLayer()([input_img, z_decoded]) # - # > Finally, we instantiate and train the model. Since the loss has been taken care of in # our custom layer, we don’t specify an external loss at compile time ( loss=None ), which # in turns means that we won’t pass target data during training (as you can see we only # pass x_train to the model in fit ). # # 最后构建和训练这个模型,因为损失已经在自定义层次中计算了,所以我们在编译模型时无需指定额外的损失函数(`loss=None`),这也意味着模型训练时不会传递目标数据参数给模型(下面的代码可以看到我们只传递了x_train到模型训练)。 # + from tensorflow.keras.datasets import mnist import tensorflow as tf tf.compat.v1.enable_eager_execution() vae = Model(input_img, y) vae.compile(optimizer='rmsprop', loss=None) vae.summary() # 在MNIST数据集上训练我们的VAE模型 (x_train, _), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_train = x_train.reshape(x_train.shape + (1,)) x_test = x_test.astype('float32') / 255. x_test = x_test.reshape(x_test.shape + (1,)) vae.fit(x=x_train, y=None, shuffle=True, epochs=10, batch_size=batch_size, validation_data=(x_test, None)) # - # > Once such a model is trained—e.g. on MNIST, in our case—we can use the decoder # network to turn arbitrary latent space vectors into images: # # 模型训练好了之后,比方说在MNIST数据集上,就可以使用解码器网络来在潜空间取样获得图像: # + import matplotlib.pyplot as plt from scipy.stats import norm # %matplotlib inline # 展示一个手写数字的2D流形 n = 15 # 15x15的网格 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) # 在单位正方形中的线性空间坐标通过正态分布的逆累积分布函数按照潜空间向量z获得 # 因为我们对潜空间的先验假设为正态分布 grid_x = norm.ppf(np.linspace(0.05, 0.95, n)) grid_y = norm.ppf(np.linspace(0.05, 0.95, n)) for i, yi in enumerate(grid_x): for j, xi in enumerate(grid_y): z_sample = np.array([[xi, yi]]) z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2) x_decoded = decoder.predict(z_sample, batch_size=batch_size) digit = x_decoded[0].reshape(digit_size, digit_size) figure[i * digit_size: (i + 1) * digit_size, j * digit_size: (j + 1) * digit_size] = digit plt.figure(figsize=(10, 10)) plt.imshow(figure, cmap='Greys_r') # - # ![decode numbers](imgs/f8.14.jpg) # # 图8-14 从潜空间中获得手写数字 # > The grid of sampled digits shows a completely continuous distribution of the different # digit classes, with one digit morphing into another as you follow a path through latent # space. Specific directions in this space have a meaning, e.g. there is a direction for # "four-ness", "one-ness", etc. # # 上面的数字网格完全展示了不同数字种类的连续分布,从一个数字变化到另外一个数字就像你在潜空间中沿着某个方向前进一样。在这个空间中特定的方向有着相应的意义,例如有一个方向表示“4”、“1”等。 # # > In the next section, we cover in detail the other major tool for generating artificial # images: generative adversarial networks (GANs). # # 在下一节中,我们会介绍另一个生成人工图像的主要工具:生成对抗网络(GAN)。 # ### 8.4.4 小结 # # > Image generation with deep learning is done by learning latent spaces that capture # statistical information about a dataset of images. By sampling points from the latent # space, and "decoding" them, one can generate never-seen-before images. There are two # major tools to do this: VAEs and GANs. # # > - VAEs result in highly structured, continuous latent representations. For this reason, they # work well for doing all sort of image edition in latent space, like face swapping, turning a # frowning face into a smiling face, and so on. They also work nicely for doing latent space # based animations, i.e. animating a walk along a cross section of the latent space, showing # a starting image slowly morphing into different images in a continuous way. # - GANs enable the generation of realistic single-frame images, but may not induce latent # spaces with solid structure and high continuity. # # 深度学习中的图像生成需要通过模型学习到捕获到图像数据集上的统计学信息的潜空间来实现。从潜空间中取样点,然后“解码”,就能生成之前不存在的图像。有两个主要的工具来完成这项任务:VAE和GAN。 # # - VAE能够获得高度结构化连续的潜空间。因此它能够完成各种各样的图像在潜空间进行编辑的工作,例如换脸、将皱眉表情变为微笑表情等等。它也能应用在实现潜空间动画上,例如在潜空间中沿着一个切面形成动画、展示一张初始图像然后连续渐变到其他图像上。 # - GAN能够生成单帧的真实图像,但是它的潜空间可能不是结构化和高度连续的。 # # > Most successful practical applications I have seen with images actually rely on VAEs, # but GANs are extremely popular in the world of academic research—at least circa # 2016-2017. You will find out how they work and how to implement one in the next # section. # # 很多成功的实际图像应用都依赖着VAE,但是GAN在学术领域却是异常流行,至少在2016-2017左右是这样。你可以在下一节看到GAN的工作原理。 # # > To play further with image generation, I suggest working with the CelebA dataset, # "Large-scale Celeb Faces Attributes". It’s a free-to-download image dataset with more # than 200,000 celebrity portraits. It’s great for experimenting with concept vectors in # particular. It beats MNIST for sure. # # 要进一步学习验证图像生成,作者建议使用CelebA数据集,这是一个“大规模名人脸谱数据集”。它可以免费下载,内含超过20万个名人肖像。它对于实验概念向量非常合适。肯定比MNIST数据集要好。 # ## 8.5 生成对抗网络简介 # # > Generative Adversarial Networks (GANs), introduced in 2014 by <NAME>, are an # alternative to VAEs for learning latent spaces of images. They enable the generation of # fairly realistic synthetic images by forcing the generated images to be statistically almost # indistinguishable from real ones. # # 生成对抗网络(GAN)是2014年由<NAME>提出的,它是除VAE外另一种学习图像潜空间的方法。它能生成相当真实的合成图像,通过让生成图像的统计学特征与真实图像基本一致来实现。 # # > An intuitive way to understand GANs is to imagine a forger trying to create a fake # Picasso painting. At first, the forger is pretty bad at the task. He mixes some of his fakes # with authentic Picassos, and shows them all to an art dealer. The art dealer makes an # authenticity assessment for each painting, and gives the forger feedback about what # makes a Picasso look like a Picasso. The forger goes back to his atelier to prepare some # new fakes. As times goes on, the forger becomes increasingly competent at imitating the # style of Picasso, and the art dealer becomes increasingly expert at spotting fakes. In the # end, we have on our hands some excellent fake Picassos. # # 理解GAN的一个直观方式是想象有一个伪造者尝试伪造毕加索的画作。一开始的时候伪造者很不擅长这个任务。他将自己伪造的作品混入毕加索的真迹当中展示给艺术鉴赏人士。鉴赏人对每幅画作进行真伪评价,然后反馈给伪造者评判毕加索真迹的信息。伪造者根据这些反馈信息,回到他的工作室重新绘制一些新的赝品。随着时间推进,伪造者越来越擅长仿制毕加索画作这项任务,而同时鉴赏人也在鉴别赝品领域变得越来越专业。最终,我们就能得到一些非常逼真的毕加索赝品。 # # > That’s what GANs are: a forger network network and an expert network, each being # trained to best the other. As such, a GAN is made of two parts: # # > - A generator network , which takes as input a random vector (a random point in the latent # space) and decodes it into a synthetic image. # - A discriminator network (also called adversary ), which takes as input an image (real or # synthetic), and must predict whether the image came from the training set or was created # by the generator network. # # 这就是GAN的构成:一个伪造者网络和一个专家网络,每一个都需要进行训练,以期能够打败另一个。所以GAN的组成包括: # # - 一个生成网络,接收随机向量作为输入(潜空间中的一个随机点)然后将它解码成一个合成图像。 # - 一个鉴别器网络(也叫作对抗网络),接收一张图像(真实或合成)作为输入,然后判断这张图像来自训练集还是由生成网络生成。 # # > The generator network is trained to be able to fool the discriminator network, and # thus it evolves towards generating increasingly realistic images as training goes on: # artificial images that look indistinguishable from real ones—to the extent that it is # impossible for the discriminator network to tell the two apart. Meanwhile, the # discriminator is constantly adapting to the gradually improving capabilities of the # generator, which sets a very high bar of realism for the generated images. Once training # is over, the generator is capable of turning any point in its input space into a believable # image. Unlike VAEs, this latent space has less explicit guarantees of meaningful # structure, and in particular, it isn’t continuous. # # 生成网络的训练目标是击败鉴别器网络,因此它会随着训练过程的推进而产生越发真实的图像:这些图像看起来无法与真实图像区分出来,最终目标是使得鉴别器网络无法分出真假。而同时鉴别器也在不断的从生成器中改进鉴别能力,这样就能不断提升鉴别生成图像真伪的标准。当训练完成后,生成器能够将任何潜空间的点转换成一张难以分辨真伪的图像。不同于VAE,这里的潜空间没有明确有意义的结构,或者更确切的说,它不是连续的。 # # ![GAN](imgs/f8.15.jpg) # # 图8-15 生成对抗网络原理 # > Remarkably, a GAN is a system where the optimization minimum isn’t fixed—unlike # in any other training setup you have encountered in this book before. Normally, gradient # descent consists in rolling down some hills in a static loss landscape. However, with a # GAN, every step taken down the hill changes the entire landscape by a bit. It’s a dynamic # system where the optimization process is seeking not a minimum, but rather an # equilibrium between two forces. For this reason, GANs are notoriously very difficult to # train—getting a GAN to work require lots of careful tuning of the model architecture and # training parameters. # # GAN不像本书之前介绍过的所有训练过程那样,它的最小优化值不是固定的。通常来说梯度下降就像是在一个静态的损失空间中下山一样。然而在GAN中,每次下山的一步都会稍微的改变整个损失空间一点。所以这是一个动态的系统,这里的优化目标不再是寻找一个最优最小值,而是在两股力量之间寻找平衡。正因为此,GAN具有非常高的训练难度,要训练出一个成功的GAN模型,需要许多精细的模型结构和训练参数的调整。 # # ![GAN example](imgs/f8.16.jpg) # # 图8-16 Mike Tyka使用多阶段GAN从人脸数据集上生成的图像。[Mike Tyka的网站](https://miketyka.com/) # ### 8.5.1 一个GAN的概要实现 # # > In what follows, we explain how to implement a GAN in Keras, in its barest form—since # GANs are quite advanced, diving deeply into the technical details would be out of scope # for us. Our specific implementation will be a deep convolutional GAN, or DCGAN: a # GAN where the generator and discriminator are deep convnets. In particular, it leverages # a Conv2DTranspose layer for image upsampling in the generator. # # 下面我们来介绍如何在Keras中实现一个GAN,当然是最原始的形式,因为GAN相当高深,深入到内部的技术细节将会超出本书的范围。我们这里的实现将会是深度卷积生成对抗网络,简称DCGAN:也就是生成器和鉴别器都是深度卷积网络的GAN。具体来说,它使用了`Conv2DTranspose`层来实现生成器的上采样。 # # > We will train our GAN on images from CIFAR10, a dataset of 50,000 32x32 RGB # images belong to 10 classes (5,000 images per class). To make things even easier, we # will only use images belonging to the class "frog". # # 我们会使用CIFAR10图像数据集来训练我们的GAN,这是一个有着5万张32x32 RGB图像的数据集,这些图像分别归属于10个不同的种类(每个类别5000张图像)。为了使得任务更加简单,我们仅仅使用那些类别是“青蛙”的图像。 # # > Schematically, our GAN looks like this: # # > - A generator network maps vectors of shape (latent_dim,) to images of shape (32, # 32, 3) . # - A discriminator network maps images of shape (32, 32, 3) to a binary score estimating # the probability that the image is real. # - A gan network chains the generator and the discriminator together: gan(x) = # discriminator(generator(x)) . Thus this gan network maps latent space vectors to # the discriminator’s assessment of the realism of these latent vectors as decoded by the # generator. # - We train the discriminator using examples of real and fake images along with # "real"/"fake" labels, as we would train any regular image classification model. # - To train the generator, we use the gradients of the generator’s weights with regard to the # loss of the gan model. This means that, at every step, we move the weights of the # generator in a direction that will make the discriminator more likely to classify as "real" # the images decoded by the generator. I.e. we train the generator to fool the discriminator. # # 总的来说我们的GAN就是如下的形式: # # - 一个生成器网络将形状为(latent_dim,)的向量解码成形状为(32, 32, 3)的图像。 # - 一个鉴别器网络将形状为(32, 32, 3)的图像输出成二分分类,估计图像为真的概率。 # - 一个GAN网络将生成器和鉴别器串联起来:`gan(x) = discriminator(generator(x))`。因此整个GAN网络将潜空间向量映射成鉴别器对其生成图像的真伪评估。 # - 我们使用真实的以及伪造的图像来训练鉴别器,同时包括这些图像的“真伪”标签,就像我们在训练一个普通的图像分类模型一样。 # - 为了训练生成器,我们使用整个GAN模型的损失来对生成器权重进行梯度运算。这意味着,每一次我们都将其权重朝着让鉴别器更容易认为图像为“真”的方向去移动一点点。这就是实际上训练生成器来欺骗鉴别器。 # ### 8.5.2 一些技巧 # # > Training GANs and tuning GAN implementations is notoriously difficult. There are a # number of known "tricks" that one should keep in mind. Like most things in deep # learning, it is more alchemy than science: these tricks are really just heuristics, not # theory-backed guidelines. They are backed by some level of intuitive understanding of # the phenomenon at hand, and they are known to work well empirically, albeit not # necessarily in every context. # # 训练和调参GAN实现起来是出了名的困难。这里有一些总结出来的“技巧”应该被记住。就像很多其他在深度学习中的技巧一样,它们更像炼金术而不是科学:这些技巧实际上都是启发性算法而非具有理论支持的准则。它们都是在实际实验中根据现象使用某种程度的直觉理解获得的,它们在很多场合下都工作良好,尽管并非每种环境中都需要。 # # > Here are a few of the tricks that we leverage in our own implementation of a GAN # generator and discriminator below. It is not an exhaustive list of GAN-related tricks; you # will find many more across the GAN literature. # # > - We use tanh as the last activation in the generator, instead of sigmoid , which would be # more commonly found in other types of models. # - We sample points from the latent space using a normal distribution (Gaussian # distribution), not a uniform distribution. # - Stochasticity is good to induce robustness. Since GAN training results in a dynamic # equilibrium, GANs are likely to get "stuck" in all sorts of ways. Introducing randomness # during training helps prevent this. We introduce randomness in two ways: 1) we use # dropout in the discriminator, 2) we add some random noise to the labels for the # discriminator. # - Sparse gradients can hinder GAN training. In deep learning, sparsity is often a desirable # property, but not in GANs. There are two things that can induce gradient sparsity: 1) max # pooling operations, 2) ReLU activations. Instead of max pooling, we recommend using # strided convolutions for downsampling, and we recommend using a LeakyReLU layer # instead of a ReLU activation. It is similar to ReLU but it relaxes sparsity constraints by # allowing small negative activation values. # - In generated images, it is common to see "checkerboard artifacts" caused by unequal # coverage of the pixel space in the generator. To fix this, we use a kernel size that is # divisible by the stride size, whenever we use a strided Conv2DTranpose or Conv2D in # both the generator and discriminator. # # 下面列出了我们的生成器和鉴别器GAN实现中使用到的一些技巧。这当然不是一份有关GAN技巧的完整列表,你可以在GAN相关的文献中找到更多的技巧。 # # - 我们使用`tanh`作为生成器最后的激活函数,而不是`sigmoid`,后者是其他模型中经常使用的激活函数。 # - 我们使用正态分布(高斯分布)来从潜空间中取样,而不是均匀分布。 # - 随机性能够更好地提供健壮性。因为GAN的训练结果是一个动态平台,所以GAN很容易在各种情况下卡住。在训练中引入随机性能够帮助避免这一点。我们使用两种方式引入随机性:1)在鉴别器中使用dropout,2)在鉴别器的标签中加入一些随机噪音。 # - 稀疏梯度会阻碍GAN的训练。在深度学习中稀疏性通常是希望的特点,但在GAN中不是这样。有两个做法会带来稀疏性:1)最大池化操作,2)线性整流单元激活。所以我们推荐使用步进卷积对图像进行下取样来取代最大池化,使用`LeakyReLU`层来取代`ReLU`激活。`LeakyReLU`类似于`ReLU`,但是它允许存在小数值的负数以减低稀疏性。 # - 在生成的图像中很容易观察到“棋盘效应”,这是由于生成器的在像素空间的不平衡导致的。为了修正这一点,我们使用的核大小能够被步进大小整除,在生成器和鉴别器中无论使用`Conv2DTranspose`还是`Conv2D`层时都保证这一点。 # # ![checkboard artifact](imgs/f8.17.jpg) # # 图8-17 棋盘效应,由于步进值和核大小值不匹配造成的像素空间不平衡,GAN中一个著名的坑 # ### 8.5.3 生成器 # # > First, we develop a generator model, which turns a vector (from the latent # space—during training it will sampled at random) into a candidate image. One of the # many issues that commonly arise with GANs is that the generator gets stuck with # generated images that look like noise. A possible solution is to use dropout on both the # discriminator and generator. # # 首先我们构建生成器模型,它能将一个向量(训练时从潜空间中随机取样获得)转换成一个候选图像。在GAN中有一个经常会碰到的问题就是生成器卡在不停生成噪音的阶段。一个可以采取的措施就是在鉴别器和生成器中都加上dropout层。 # + import tensorflow.keras from tensorflow.keras import layers import numpy as np latent_dim = 32 height = 32 width = 32 channels = 3 generator_input = keras.Input(shape=(latent_dim,)) # 首先将输入转换成一个16x16具有128个通道的特征地图 x = layers.Dense(128 * 16 * 16)(generator_input) x = layers.LeakyReLU()(x) x = layers.Reshape((16, 16, 128))(x) # 然后加入一个卷积层 x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # 上采样到32x32 x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x) x = layers.LeakyReLU()(x) # 在增加一些卷积层 x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(256, 5, padding='same')(x) x = layers.LeakyReLU()(x) # 产生一个32x31 1个通道的特征地图 x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x) generator = keras.models.Model(generator_input, x) generator.summary() # - # ### 8.5.4 鉴别器 # # > Then, we develop a discriminator model, that takes as input a candidate image (real or # synthetic) and classifies it into one of two classes, either "generated image" or "real # image that comes from the training set". # # 然后我们就来构建鉴别器模型,他接收一张候选图像(真实的或合成的)作为输入,并将其分为两类,“生成的图像”或“来自训练集的真实图像”。 # + discriminator_input = layers.Input(shape=(height, width, channels)) x = layers.Conv2D(128, 3)(discriminator_input) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Conv2D(128, 4, strides=2)(x) x = layers.LeakyReLU()(x) x = layers.Flatten()(x) # 加入一个dropout层,非常重要的技巧 x = layers.Dropout(0.4)(x) # 分类器层 x = layers.Dense(1, activation='sigmoid')(x) discriminator = keras.models.Model(discriminator_input, x) discriminator.summary() # 为了令训练逐渐稳定,我们在优化器中使用学习率衰减和梯度裁剪 discriminator_optimizer = keras.optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8) discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy') # - # ### 8.5.5 对抗网络 # # > Finally, we setup the GAN, which chains the generator and the discriminator. This is the # model that, when trained, will move the generator in a direction that improves its ability # to fool the discriminator. This model turns latent space points into a classification # decision, "fake" or "real", and it is meant to be trained with labels that are always "these # are real images". So training gan will updates the weights of generator in a way that # makes discriminator more likely to predict "real" when looking at fake images. Very # importantly, we set the discriminator to be frozen during training (non-trainable): its # weights will not be updated when training gan . If the discriminator weights could be # updated during this process, then we would be training the discriminator to always # predict "real", which is not what we want! # # 最后我们构建GAN,它将生成器和鉴别器串联在一起。这个模型的目标是当训练时,我们会将生成器的权重朝着改进它能更好欺骗鉴别器的方向移动。这个模型将潜空间的点转换成最终的分类预测,“赝品”或“真迹”,模型设计的宗旨就是使用“这些是真实的图像”这样的标签来进行训练。因此训练GAN会更新生成器的权重,期望更新后生成的合成图像更容易使得鉴别器认为是真的。非常重要的一点是,在训练过程中我们会冻结鉴别器权重(不可训练的):鉴别器的权重在训练GAN过程中不会更新。因为如果过程中更新了鉴别器的权重,最终我们会训练出永远预测为“真实”图像的鉴别器,这显然不是我们希望的。 # + # 设置鉴别器权重不可训练(仅对整个GAN模型而言) discriminator.trainable = False gan_input = keras.Input(shape=(latent_dim,)) gan_output = discriminator(generator(gan_input)) gan = keras.models.Model(gan_input, gan_output) gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8) gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy') # - # ### 8.5.6 如何训练我们的DCGAN # # > Now we can start training. To recapitulate, this is schematically what the training loop # looks like: # # 现在可以开始训练了。整个训练的循环过程如下: # # ```text # for each epoch: # * 从潜空间中取样点 (随机噪音). # * 使用这个随机噪音在生成器中生成图像 # * 将生成的图像混入真实图像中 # * 使用这些混合的图像来训练鉴别器,使用相应的目标标签,“真实”或者“合成” # * 从潜空间中取样新的随机点 # * 使用这些随机向量训练GAN,这时的目标标签使用的是“这些都是真实图像”,用来更新生成器的权重 # ``` # # > Let’s implement it: # # 让我们来实现它: # # 译者注,以下代码修改了图像输出目录以及定时保存的间隔。 # + import os from tensorflow.keras.preprocessing import image # 载入CIFAR10数据集 (x_train, y_train), (_, _) = keras.datasets.cifar10.load_data() # 选择其中的青蛙图像(类别6) x_train = x_train[y_train.flatten() == 6] # 规范化数据 x_train = x_train.reshape((x_train.shape[0],) + (height, width, channels)).astype('float32') / 255. iterations = 10000 batch_size = 20 save_dir = os.path.join(os.environ['HOME'], 'gan_output') # 开始训练的循环 start = 0 for step in range(iterations): # 从潜空间中随机取样点 random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # 将向量解码成合成图像 generated_images = generator.predict(random_latent_vectors) # 将合成图像混入真是图像 stop = start + batch_size real_images = x_train[start: stop] combined_images = np.concatenate([generated_images, real_images]) # 组装真是图像和合成图像的目标标签 labels = np.concatenate([np.ones((batch_size, 1)), np.zeros((batch_size, 1))]) # 在标签中加入随机噪音 - 非常重要的技巧 labels += 0.05 * np.random.random(labels.shape) # 训练鉴别器 d_loss = discriminator.train_on_batch(combined_images, labels) # 从潜空间中随机取样更多的点 random_latent_vectors = np.random.normal(size=(batch_size, latent_dim)) # 组装新的标签,说明“这些都是真实图像” misleading_targets = np.zeros((batch_size, 1)) # 训练生成器 (通过GAN模型,这时鉴别器的权重不可训练) a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets) start += batch_size if start > len(x_train) - batch_size: start = 0 # 定时保存或绘制图像 if step % 100 == 99: # 保存模型参数 gan.save_weights('gan.h5') # 打印指标 print('discriminator loss:', d_loss) print('adversarial loss:', a_loss) # 保存一张生成图像 img = image.array_to_img(generated_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png')) # 保存一张真是图像,用于做对比 img = image.array_to_img(real_images[0] * 255., scale=False) img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png')) # - # > When training, you may see your adversarial loss start increasing considerably while # your discriminative loss will tend to zero, i.e. your discriminator may end up dominating # your generator. If that’s the case, try reducing the discriminator learning rate and increase # the dropout rate of the discriminator. # # 当训练时,你有可能会看到你的对抗损失急剧增加而鉴别损失趋向于0,也就是说你的鉴别器开始完全支配你的生成器了。如果出现了这种情况,尝试减小鉴别器的学习率和增加鉴别器的dropout比率。 # # ![frog generated images](imgs/f8.18.jpg) # # 图8-18 图中每一列都有两张合成图像和一张真实图像,你可以肉眼识别吗。答案是真是图像分别在中间、顶部、底部、中间。 # ### 8.5.7 小结 # # > - GANs consist in a generator network coupled with a discriminator network. The # discriminator is trained to tell apart the output of the generator and real images from a # training dataset, while the generator is trained to fool the discriminator. Remarkably, the # generator nevers sees images from the training set directly; the information it has about # the data comes from the discriminator. # - GANs are difficult to train, because training a GAN is a dynamic process rather than a # simple descent process with a fixed loss landscape. Getting a GAN to train correctly # requires leveraging a number of heuristic tricks, as well as extensive tuning. # - GANs can potentially produce highly realistic images. However, unlike VAEs, the latent # space that they learn does not have a neat continuous structure, and thus may not be # suited for certain practical applications, such as image editing via latent space concept # vectors. # # - GAN包含着一个生成网络和一个鉴别器网络。鉴别器训练来对真实数据集图像和生成图像进行分类,而生成器训练来欺骗鉴别器。这里很重要的一点是,生成器从未直接接触训练集中的图像,它的信息完全来自于鉴别器的反馈信息。 # - GAN训练难度很高,因为训练GAN是一个动态过程,而不是传统的静态空间梯度下降过程。要使得GAN正确的训练需要使用一系列启发性技巧,和繁重的调参工作。 # - GAN可以生成高度真实的图像。然而不像VAE,它获得的潜空间并没有干净的连续结构,所以它也不能胜任某些应用场景,比如使用潜空间概念向量进行图像编辑。 # ## 8.6 总结:生成深度学习 # # > This is the end of the chapter on creative applications of deep learning, where deep nets # go beyond simply annotating existing content, and start generating their own. You have # just learned: # # > - How to generate sequence data, one timestep at a time. This is applicable to text # generation, but also to note-by-note music generation, or any other type of timeseries # data. # - How Deep Dreams work: by maximizing convnet layer activations through gradient # ascent in input space. # - How to perform style transfer, where a content image and a style image get combined to # produce interesting-looking results. # - What GANs and VAEs are, how they can be used for dreaming up new images, and how # latent space "concept vectors" could be used for image edition. # # 这里要结束本章,深度学习的创造性应用了,本章让你看到深度网络已经超越标记已经存在的内容范畴,进入到生成内容的范畴了。你在本章了解了: # # - 如何生成序列数据,一次产生一个数据。这广泛应用在文本生成上,不过也可以应用在音乐生成或其他类型的时间序列数据上。 # - Deep Dream是如何工作的:通过在输入空间上最大化梯度增强的激活结果。 # - 如何进行风格迁移,用来将内容图像和风格图像组合在一起生成很有趣的结果。 # - GAN和VAE是什么,它们是如何产生全新的图像的,还有潜空间“概念向量”如何用来进行图像编辑。 # # > These few techniques only cover the very basics of this fast-expanding field. There’s # a lot more to discover out there—generative deep learning would be deserving of an # entire book of its own. # # 这些技术仅仅覆盖了这个快速扩张领域的最基础部分。这个领域还有很多本章未阐述却值得发现的内容,生成深度学习这个主题完全可以写一本书。 # << [第七章:高级深度学习最佳实践](Chapter7_Advanced_deep_learning_best_pratices.ipynb)|| [目录](index.md) || [第九章:总结](Chapter9_Conclusions.ipynb) >>
Chapter8_Generative_deep_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # YOLOV3 training example # # + from yolo import YOLO, detect_video from PIL import Image import matplotlib.pyplot as plt from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body from keras.layers import Input import numpy as np import keras.backend as K from keras.layers import Input, Lambda from keras.models import Model from keras.optimizers import Adam from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss from yolo3.utils import get_random_data import tensorflow as tf from keras.utils import multi_gpu_model from keras.models import load_model # + def get_classes(classes_path): '''loads the classes''' with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def get_anchors(anchors_path): '''loads the anchors from a file''' with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo.h5'): '''create the training model''' image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \ num_anchors//3, num_classes+5)) for l in range(3)] model_body = yolo_body(image_input, num_anchors//3, num_classes) print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze darknet53 body or freeze all but 3 output layers. num = (185, -3)[freeze_body-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes): '''data generator for fit_generator''' n = len(annotation_lines) #print("annotation_lines: ", n) i = 0 while True: image_data = [] box_data = [] for b in range(batch_size): # if i==0: # np.random.shuffle(annotation_lines) #随机裁剪、旋转、变换颜色(hue)、变换饱和度(saturation), 变换曝光度(exposure shifts) # random 为 False的时候会出错 导致loss 为 non image, box = get_random_data(annotation_lines[i], input_shape, random=True) image_data.append(image) box_data.append(box) i = (i+1) % n image_data = np.array(image_data) box_data = np.array(box_data) # 每一个grid cell内的box是否含有object 以及如果含有的情况下的具体box信息 y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) yield [image_data, *y_true], np.zeros(batch_size) def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes): n = len(annotation_lines) if n==0 or batch_size<=0: return None return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes) def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2, weights_path='model_data/yolo-tiny.h5'): '''create the training model, for Tiny YOLOv3''' K.clear_session() # get a new session image_input = Input(shape=(None, None, 3)) h, w = input_shape num_anchors = len(anchors) y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \ num_anchors//2, num_classes+5)) for l in range(2)] model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes) print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes)) if load_pretrained: model_body.load_weights(weights_path, by_name=True, skip_mismatch=True) print('Load weights {}.'.format(weights_path)) if freeze_body in [1, 2]: # Freeze the darknet body or freeze all but 2 output layers. num = (20, len(model_body.layers)-2)[freeze_body-1] for i in range(num): model_body.layers[i].trainable = False print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers))) model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})( [*model_body.output, *y_true]) model = Model([model_body.input, *y_true], model_loss) return model # + annotation_path = 'model_data/train_coco.txt' log_dir = 'logs/000/' classes_path = 'model_data/coco_classes.txt' anchors_path = 'model_data/yolo_anchors.txt' class_names = get_classes(classes_path) num_classes = len(class_names) anchors = get_anchors(anchors_path) input_shape = (416,416) # multiple of 32, hw is_tiny_version = len(anchors)==6 # default setting if is_tiny_version: model = create_tiny_model(input_shape, anchors, num_classes, freeze_body=2, weights_path='model_data/downloaded_coco_tiny.h5') else: model = create_model(input_shape, anchors, num_classes, freeze_body=2, weights_path='model_data/downloaded_coco.h5') # make sure you know what you freeze logging = TensorBoard(log_dir=log_dir) checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) with open(annotation_path) as f: lines = f.readlines() #lines = lines[:10000] print ("Training data size: ", len(lines)) print(lines[0]) np.random.seed(10101) np.random.shuffle(lines) np.random.seed(None) val_split = 0.1 num_val = int(len(lines)*val_split) num_train = len(lines) - num_val # add gpu growth flags #tf_config.gpu_options.allow_growth = True #tf_config.gpu_options.per_process_gpu_memory_fraction = 0.1 #model = multi_gpu_model(model, gpus=2) if True: model.compile(optimizer=Adam(lr=1e-3), loss={ # use custom yolo_loss Lambda layer. 'yolo_loss': lambda y_true, y_pred: y_pred}) batch_size = 8 print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=50, initial_epoch=0, callbacks=[logging, checkpoint]) model.save_weights(log_dir + 'trained_weights_stage_1.h5') # Unfreeze and continue training, to fine-tune. # Train longer if the result is not good. if True: for i in range(len(model.layers)): model.layers[i].trainable = True model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change print('Unfreeze all of the layers.') batch_size = 8 # note that more GPU memory is required after unfreezing the body print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size)) model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes), steps_per_epoch=max(1, num_train//batch_size), validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes), validation_steps=max(1, num_val//batch_size), epochs=100, initial_epoch=50, callbacks=[logging, checkpoint, reduce_lr, early_stopping]) model.save_weights(log_dir + 'trained_weights_final.h5') # -
traning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # A number of common questions come up about basic numbers reporting for the final list. This notebook explores some ways that we can take our intermediate SGCN summary with the results of taxonomic authority consultation and answer those questions. Pandas grouping is particularly useful in this context. import pandas as pd sgcn_summary = pd.read_csv('sgcn_taxonomy_check.csv', low_memory=False) # Based on the taxonomic lookup process, we end up with final identified taxa at various levels of the taxonomic hierarchy. We record that detail in a taxonomic_rank property retrieved from the matching document in ITIS or WoRMS. In many cases, we want to report only on taxa identified at the species level, which we do in subsequent steps, but we should look at the distribution of the data across ranks first. for rank, group in sgcn_summary.groupby("taxonomic_rank"): print(rank, len(group)) # We may also want to limit our exploration to just those species that are included in the latest reporting period, 2015. This codeblock sets up a new dataframe filtered to only species reported in 2015. matched_species = sgcn_summary.loc[(sgcn_summary["taxonomic_rank"] == "Species") & (sgcn_summary["2015"].notnull())] print(len(matched_species)) # Now we can look at the distribution of species that were successfully aligned with taxonomic authorities (aka the National List) by the high level taxonomic group assigned based on the mapping of logical groups to higher level taxonomy. for tax_group, group in matched_species.groupby("taxonomic_group"): print(tax_group, len(group)) # We might also want to look further at what happened in the taxonomic matching process. We generated a field in the processing metadata that captures the overall method used in matching a submitted name string to a taxon identifier. # # * Exact Match - means that the submitted name was found to match exactly one valid ("accepted" in the case of ITIS plants) taxon # * Fuzzy Match - means that the original submitted name had a misspelling of some kind but that we were able to find it with a fuzzy search # * Followed Accepted TSN or Followed Valid AphiaID - means that the original submitted name string found a match to a taxon that is no longer considered valid and our process followed the taxonomic reference to retrieve a valid taxon for use # * Found multiple matches - means that our search on submitted name string found multiple matches for the name (often homynyms) but that only a single valid taxon was available to give us an acceptable match for match_method, group in matched_species.groupby("match_method"): print(match_method, len(group)) # If we really want to dig into the details, we can pull just the details for those cases where the submitted name string does not match the final valid scientific name we matched to in the taxonomic authority. This codeblock outputs a subset dataframe with just the pertinent details. matched_species.loc[matched_species["lookup_name"] != matched_species["valid_scientific_name"]][["lookup_name","valid_scientific_name","match_method"]]
Explore Summarized Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bs4 import BeautifulSoup as bs from splinter import Browser import pandas as pd import requests as req import time from selenium import webdriver # !which chromedriver executable_path = {"executable_path":"C:\\Windows\chromedriver"} browser = Browser("chrome", **executable_path, headless = False) nasa = "https://mars.nasa.gov/news/" browser.visit(nasa) # + html = browser.html soup = bs(html, "html.parser") headline = soup.find('div', class_='list_text').find(class_='content_title').find('a').text teaser = soup.find('div', class_='article_teaser_body').text print(headline) print(teaser) # - nasa_featured = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars" browser.visit(nasa_featured) # + html_featured = browser.html soup = bs(html_featured, "html.parser") jpl = "https://www.jpl.nasa.gov" feat_img = soup.find_all('img')[3]["src"] featured_image = jpl + feat_img print(featured_image) # - mars_facts = "https://space-facts.com/mars/" facts_table = pd.read_html(mars_facts) facts_table[0]
.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:scratch_env] # language: python # name: conda-env-scratch_env-py # --- # # This notebook processes CAFE c2 atmospheric daily data for building climatologies. Only the last 100 years are used. # + code_folding=[0] # Import packages ----- import pandas as pd import xarray as xr import numpy as np from ipywidgets import FloatProgress from dateutil.relativedelta import relativedelta # - # #### Initialise # + code_folding=[0] # Standard naming ----- fields = pd.DataFrame( \ {'name_CAFE': ['ucomp', 'vcomp', 'temp', 'sphum', 'hght', 'lwflx', 'shflx', 'tau_x', 'tau_y', 't_ref', 'q_ref', 'u_ref', 'v_ref', 't_surf', 'h500', 'precip', 'lwdn_sfc', 'olr', 'swdn_sfc', 'swup_toa'], 'name_std' : ['u', 'v', 'temp', 'sphum', 'gh', 'lwf', 'shf', 'tau_x', 'tau_y', 't_ref', 'q_ref', 'u_ref', 'v_ref', 't_s', 'h500', 'precip', 'lwf_dn_s', 'olr', 'swf_dn_s', 'swf_up_toa']} ) name_dict = fields.set_index('name_CAFE').to_dict()['name_std'] fields # - # #### Only use last 100 years # + code_folding=[0] # Loop over all paths ----- base = '/OSM/CBR/OA_DCFP/data2/model_output/CAFE/controls/c2/OUTPUT/' years = range(400,500) paths = [] for year in years: path = base + 'atmos_daily_0' + str(year) + '_01_01.nc' paths.append(path) ds = xr.open_mfdataset(paths, autoclose=True) \ .drop(['average_T1','average_T2','average_DT','time_bounds']) \ .rename(name_dict) if 'latb' in ds.dims: ds = ds.rename({'latb':'lat_2','lonb':'lon_2'}) # + code_folding=[0] # Use year 2016 as time ----- path = '/OSM/CBR/OA_DCFP/data/model_output/CAFE/forecasts/v1/yr2016/mn1/OUTPUT.1/atmos_daily*.nc' dataset = xr.open_mfdataset(path, autoclose=True) time_use = xr.concat([dataset.time[:59], dataset.time[60:366]],dim='time') time_ly = dataset.time[59] # + code_folding=[0] # Make month_day array of month-day ----- m = [str(ds.time.values[i].timetuple()[1]).zfill(2) + '-' for i in range(len(ds.time))] d = [str(ds.time.values[i].timetuple()[2]).zfill(2) for i in range(len(ds.time))] md = np.core.defchararray.add(m, d) # Replace time array with month_day array and groupby ----- ds['time'] = md clim = ds.groupby('time').mean(dim='time',keep_attrs=True) clim['time'] = time_use # + code_folding=[0] # Replicate Feb 28th as Feb 29th to deal with leap years ----- clim_ly = clim.copy().sel(time='2016-02-28') clim_ly['time'] = np.array([time_ly.values]) clim = xr.auto_combine([clim,clim_ly]).sortby('time') # + code_folding=[0] # Save the climatology ----- save_fldr = '/OSM/CBR/OA_DCFP/data/intermediate_products/pylatte_climatologies/' clim.to_netcdf(save_fldr + 'cafe.c2.atmos.400_499.clim.nc', mode = 'w', encoding = {'time':{'dtype':'float','calendar':'JULIAN', 'units':'days since 0001-01-01 00:00:00'}}) # -
bin/support/build_cafe_c2_atmos_climatology.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: qiskit # language: python # name: qiskit # --- # # Task 4 # # Find the lowest eigenvalue of the following matrix: # # $$ # \begin{pmatrix} # 1 & 0 & 0 & 0 \\ # 0 & 0 & -1 & 0 \\ # 0 & -1 & 0 & 0 \\ # 0 & 0 & 0 & 1 \\ # \end{pmatrix} # $$ # # using VQE-like circuits, created by yourself from scratch. # # 1) Decomposing the desired matrix into pauli operators # # Since we want to measure a VQE-like circuit, we need to decompose the matrix into their pauli components. We have that the pauli matrices are: # # $$ # \sigma_x = \begin{pmatrix} # 0 & 1 \\ # 1 & 0 # \end{pmatrix} \hspace{1em} , \hspace{1em} \sigma_y = \begin{pmatrix} # 0 & -i \\ # i & 0 # \end{pmatrix} \hspace{1em} , \hspace{1em} \sigma_z = \begin{pmatrix} # 1 & 0 \\ # 0 & -1 # \end{pmatrix} # $$ # # Thus to get the pauli component of a $2^N$ x $2^N$ matrix $A$, as stated on [stackoverflow](https://quantumcomputing.stackexchange.com/questions/8725/can-arbitrary-matrices-be-decomposed-using-the-pauli-basis), we have: # # $$ # A = \sum_{ij} \frac{1}{4} h_{ij} \hspace{0.5em} \sigma_i \otimes \sigma_j # $$ # # And the components $h_{ij}$ are: # # $$ # h_{ij} = \frac{1}{4} \mathrm{Tr} \bigg[ (\sigma_i \otimes \sigma_j) \cdot A \bigg] # $$ # # Therefore we create a function `get_components_from_matrix` that does this decomposition for us. import numpy as np import matplotlib.pyplot as plt import qiskit # + # The matrix that we want to decompose A = np.array([[1,0,0,0],[0,0,-1,0],[0,-1,0,0],[0,0,0,1]]) # Defining Pauli Matrices pauli_x = np.array([[0,1],[1,0]]) ; pauli_y = np.array([[0,1j],[-1j,0]]) ; pauli_z = np.array([[1,0],[0,-1]]) basis = {'I': np.eye(2), 'X': pauli_x, 'Y': pauli_y, 'Z': pauli_z} # + from itertools import product def get_components_from_matrix(A, basis): """ Decompose a matrix on a given basis, in our case we decompose in the pauli basis {I,X,Y,Z}. Args: A (list, np.array): Matrix that you want to decompose. basis (dict): dictionary with name of the basis as keys and the basis matrix as values. Output: components_dict (dict): Dictionary with the basis name as keys and component as values. """ assert len(A) == len(A[0]), "your matrix is not square" assert not np.abs(int(np.log2(len(A))) - np.log2(len(A))) > 0, "the lenght of your matrix is not a power of 2" repeat_times = int(np.log2(len(A))) components_dict = {} for (name_1, pauli_1),(name_2, pauli_2) in product(basis.items(), repeat=repeat_times): components_dict[name_1 + name_2] = np.trace(1/4.*np.kron(pauli_1,pauli_2) @ A) return { key : val for key,val in components_dict.items() if val != 0} # + tags=[] components = get_components_from_matrix(A, basis) print("Decomposition:") for name, comp in components.items(): print(f" {name} : {comp}") # + tags=[] print("Result of the decomposition of A:") decomp = 0.5*np.kron(np.eye(2), np.eye(2)) - 0.5*np.kron(pauli_x, pauli_x) - 0.5*np.kron(pauli_y, pauli_y) + 0.5*np.kron(pauli_z, pauli_z) print(f"""{decomp}""") # - # Now we know that the desired matrix, has the following pauli decomposition: # # $$ # A = 0.5 I \otimes I \ - \ 0.5 \sigma_x \otimes \sigma_x \ - \ 0.5 \sigma_y \otimes \sigma_y \ + \ 0.5 \sigma_z \otimes \sigma_z # $$ # # 2) Creating VQE circuit # # Now we want to construct the VQE circuti and measure the pauli factors in order to get the lowest eigenvalue of $A$. This is done in three parts: # - 1) Construct the VQE Ansatz, which is a parametrized quantum circuit; # - 2) Construct XX, YY, and ZZ measurements; # - 3) Vary VQE Ansatz parameters and measure the eigenvalue. # ## 2.1) Creating Ansatz # # In order to create the Variational Ansatz, we need to create a parametrized circuit. Here I chose to use Qiskit, therefore we need to use `parameter` from the `qiskit.circuit` library. # + from qiskit import QuantumCircuit, QuantumRegister from qiskit import execute, Aer from qiskit.circuit import Parameter theta = Parameter('θ') qr = QuantumRegister(2, name='vqe') qc = QuantumCircuit(qr) qc.h(qr[0]) qc.cx(qr[0], qr[1]) qc.rx(theta, qr[0]) qc.draw('mpl') # - # ## 2.2) Constructing XX, YY, and ZZ measurements # # After creating our Ansatz, we need to measure the expected values of our pauli variables: XX, YY, and ZZ. The ZZ measurement is the easiest one, because the measurements are given on the computational basis, which is the same as saying that it is in the Z basis. For XX and YY measurements we need to make a change of basis in order to measure it, this will be explained in their respective sections. # ### 2.2.1) Constructing ZZ measurement # # Since the standard measurements are already in the ZZ basis, we only need to copy the circuit and use the `.measure_all()` method. Below there is a example of a circuit doing the ZZ measurement. # + def measure_zz_circuit(circuit: qiskit.QuantumCircuit): """Measure the ZZ Component of the variational circuit. Args: circuit (qiskit.QuantumCircuit): Circuit that you want to measure the ZZ Component. Outputs: zz_meas (qiskit.QuantumCircuit): Circuit with ZZ measurement. """ zz_meas = circuit.copy() zz_meas.measure_all() return zz_meas zz_meas = measure_zz_circuit(qc) zz_meas.draw('mpl') # - # Now that we have created the ZZ circuit, we need a way to measure it. In this case we will do a sweep over the parametric variable $\theta$ and get the expected values for each value of the parameter. # # In order to calculate the expected value, we need to see how a ZZ measurement will affect a arbitrary qubit: # # $$ # Z \otimes Z(a ∣00\rangle + b∣01\rangle + c∣10 \rangle + d∣11\rangle ) = a∣00 \rangle − b∣01\rangle − c∣10\rangle + d∣11\rangle # $$ # # Thus in order to have the expectation value of Z, we have: # $$ # \langle \psi | Z\otimes Z | \psi \rangle = \mathrm{Pr}(00) - \mathrm{Pr}(01) - \mathrm{Pr}(10) + \mathrm{Pr}(11) # $$ # # Where $\mathrm{Pr}(ij)$ is the probability of measuring the variable $ij$. In order to obtain the probabilities, we need to fill in a zero when the probability doesn't show up because qiskit will not put this probability in the dictionary. def measure_zz(given_circuit: qiskit.QuantumCircuit, theta_range: [list, np.array], num_shots: int = 10000): """ Measure the ZZ expected value for a given_circuit. Args: given_circuit (qiskit.QuantumCircuit): The parametrized circuit that you want to calculate the ZZ expected value. theta_range (Union[list, np.array]): Range of the parameter that you want to sweep. num_shots (int): Number of shots for each circuit run. (default=10000) Returns: zz (np.array): Expected values for each parameter. """ zz_meas = measure_zz_circuit(given_circuit) simulator = qiskit.Aer.get_backend('qasm_simulator') job = execute(zz_meas, backend= simulator, shots= num_shots, parameter_binds=[{theta: theta_val} for theta_val in theta_range]) counts = job.result().get_counts() zz = [] for count in counts: # Fill if the ij doesn't show up if '00' not in count: count['00'] = 0 if '01' not in count: count['01'] = 0 if '10' not in count: count['10'] = 0 if '11' not in count: count['11'] = 0 # Get total counts in order to obtain the probability total_counts = count['00'] + count['11'] + count['01'] + count['10'] # Get counts for expected value zz_meas = count['00'] + count['11'] - count['01'] - count['10'] # Append the probability zz.append(zz_meas / total_counts) return np.array(zz) theta_range = np.linspace(0, 2 * np.pi, 128) zz = measure_zz(qc, theta_range) plt.plot(theta_range, zz); plt.title(r"$\langle ZZ \rangle$", fontsize=16) plt.xlabel(r"$\theta$", fontsize=14) plt.ylabel(r"$\langle ZZ \rangle$", fontsize=14) plt.show() # ### 2.2.2) Constructing YY measurement # # In order to get the expectation value for the YY measurement, we need a change of basis because the measurement are only made on the Z basis. We use the following identity: $Y = (HS^\dagger)^\dagger Z H S^\dagger$, thus the YY expected value is: # # $$ # \langle \psi | Y | \psi \rangle = (\langle \psi |(H S^\dagger)^\dagger) Z (H S^\dagger| \psi \rangle) \equiv \langle \tilde \psi | Z | \tilde \psi \rangle # $$ # # Therefore, we need to add $S^\dagger H$ and do a Z measurement (just as the previous section) where we want to measure the $Y$ expected value. # + def measure_yy_circuit(circuit: qiskit.QuantumCircuit): """Measure the YY Component of the variational circuit. Args: given_circuit (qiskit.QuantumCircuit): Circuit that you want to measure the YY Component. Outputs: yy_meas (qiskit.QuantumCircuit): Circuit with YY measurement. """ yy_meas = circuit.copy() yy_meas.barrier(range(2)) yy_meas.sdg(range(2)) yy_meas.h(range(2)) yy_meas.measure_all() return yy_meas yy_meas = measure_yy_circuit(qc) yy_meas.draw('mpl') # - def measure_yy(given_circuit: qiskit.QuantumCircuit, theta_range: [list, np.array], num_shots: int = 10000): """ Measure the YY expected value for a given_circuit. Args: given_circuit (qiskit.QuantumCircuit): The parametrized circuit that you want to calculate the YY expected value. theta_range (Union[list, np.array]): Range of the parameter that you want to sweep. num_shots (int): Number of shots for each circuit run. (default=10000) Returns: yy (np.array): Expected values for each parameter. """ yy_meas = measure_yy_circuit(given_circuit) simulator = Aer.get_backend('qasm_simulator') job = execute(yy_meas, backend= simulator, shots=num_shots, parameter_binds=[{theta: theta_val} for theta_val in theta_range]) counts = job.result().get_counts() yy = [] for count in counts: if '00' not in count: count['00'] = 0 if '01' not in count: count['01'] = 0 if '10' not in count: count['10'] = 0 if '11' not in count: count['11'] = 0 total_counts = count['00'] + count['11'] + count['01'] + count['10'] yy_meas = count['00'] + count['11'] - count['01'] - count['10'] yy.append(yy_meas / total_counts) return np.array(yy) theta_range = np.linspace(0, 2 * np.pi, 128) yy = measure_yy(qc, theta_range) plt.plot(theta_range, yy) plt.title(r"$\langle YY \rangle$", fontsize=16) plt.xlabel(r"$\theta$", fontsize=14) plt.ylabel(r"$\langle YY \rangle$", fontsize=14) plt.show() # ### 2.2.3) Constructing XX measurement # # Just as the YY measurement, we need to use a identity in order to change from the X basis to the Z basis in order to do the measurement: $X = H Z H $, thus the YY expected value is: # # $$ # \langle \psi | Y | \psi \rangle = (\langle \psi |H) Z (H | \psi \rangle) \equiv \langle \tilde \psi | Z | \tilde \psi \rangle # $$ # # Therefore, we need to add $H$ and do a Z measurement where we want to measure the $X$ expected value. # + def measure_xx_circuit(circuit: qiskit.QuantumCircuit): """Measure the XX Component of the variational circuit. Args: circuit (qiskit.QuantumCircuit): Circuit that you want to measure the XX Component. Outputs: xx_meas (qiskit.QuantumCircuit): Circuit with XX measurement. """ xx_meas = circuit.copy() xx_meas.barrier(range(2)) xx_meas.h(range(2)) xx_meas.measure_all() return xx_meas xx_meas = measure_xx_circuit(qc) xx_meas.draw('mpl') # - def measure_xx(given_circuit: qiskit.QuantumCircuit, theta_range: [list, np.array], num_shots: int = 10000): """ Measure the XX expected value for a given_circuit. Args: given_circuit (qiskit.QuantumCircuit): The parametrized circuit that you want to calculate the XX expected value. theta_range (Union[list, np.array]): Range of the parameter that you want to sweep. num_shots (int): Number of shots for each circuit run. (default=10000) Returns: xx (np.array): Expected values for each parameter. """ xx_meas = measure_xx_circuit(given_circuit) simulator = Aer.get_backend('qasm_simulator') job = execute(xx_meas, backend= simulator, shots=num_shots, parameter_binds=[{theta: theta_val} for theta_val in theta_range]) counts = job.result().get_counts() xx = [] for count in counts: if '00' not in count: count['00'] = 0 if '01' not in count: count['01'] = 0 if '10' not in count: count['10'] = 0 if '11' not in count: count['11'] = 0 total_counts = count['00'] + count['11'] + count['01'] + count['10'] xx_meas = count['00'] + count['11'] - count['01'] - count['10'] xx.append(xx_meas / total_counts) return np.array(xx) theta_range = np.linspace(0, 2 * np.pi, 128) xx = measure_xx(qc, theta_range) plt.plot(theta_range, xx) plt.title(r"$\langle XX \rangle$", fontsize=16) plt.xlabel(r"$\theta$", fontsize=14) plt.ylabel(r"$\langle XX \rangle$", fontsize=14) plt.show() # One fun fact is that since the ansatz consists of rotations on the X axis, i.e. $H$ and $R_X$, the expected value of the XX operator is constant, if the ansatz was only rotations on the Y axis, the YY operator would be constant, and so on. This shows that our ansatz is not searching through all the Hilbert space and only a portion of it, thus we hope that our solution is in this portion, if it is not we should search for another ansatz. # ## 2.3) Getting the eigenvalue # # Now that we constructed XX, YY, and ZZ expected values, we can measure them given our defined ansatz and apply for the pauli decomposition of the given matrix, which is: # # $$ # A = 0.5 I \otimes I \ - \ 0.5 \sigma_x \otimes \sigma_x \ - \ 0.5 \sigma_y \otimes \sigma_y \ + \ 0.5 \sigma_z \otimes \sigma_z # $$ # # $I \otimes I$ measurements are always 1, because of the normalization of the quantum state, i.e. $\langle \psi | \psi \rangle = 1$, all other values comes from what is measured varying the parameters of the ansatz. def get_eigenvalue(circuit, theta_range, num_shots = 10000): xx = measure_xx(circuit, theta_range, num_shots = num_shots) yy = measure_yy(circuit, theta_range, num_shots = num_shots) zz = measure_zz(circuit, theta_range, num_shots = num_shots) energy = 0.5*1 \ - 0.5*xx \ - 0.5*yy \ + 0.5*zz return energy theta_range = np.linspace(0, 2 * np.pi, 128) eigenvalues = get_eigenvalue(qc, theta_range) plt.plot(theta_range, eigenvalues) plt.title(r"Eigenvalues", fontsize=16) plt.xlabel(r"$\theta$", fontsize=14) plt.ylabel(r"$Eigenvalue$", fontsize=14) plt.show() # + tags=[] print(f"Smallest eigenvalue from VQE: {np.round(np.min(eigenvalues),4)} with theta = {np.round(theta_range[np.argmin(eigenvalues)], 2)}") print(f"Smallest eigenvalue calculated classically = {np.round(np.min(np.linalg.eigh(A)[0]),4)}") print(f"Error between classical and quantum: {np.round(np.abs(np.min(eigenvalues) - np.min(np.linalg.eigh(A)[0])), 4)}") # - # -------------------------------------------- # # References # # 1) [Qiskit Documentation](https://qiskit.org/documentation/tutorials/circuits_advanced/1_advanced_circuits.html) # # 2) [Quantum Computing: An applied Approach - Hidary](https://www.springer.com/gp/book/9783030239213) # # 3)[Qiskit Summer School](https://www.youtube.com/watch?v=Rs2TzarBX5I&list=PLOFEBzvs-VvrXTMy5Y2IqmSaUjfnhvBHR) from qiskit.tools.jupyter import * # %qiskit_version_table
Task 4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fba4d2 # language: python # name: fba4d2 # --- # Find substrates that could aleviate need for biotin import cobra import cobra.test import pickle import pandas as pd from cobra.flux_analysis import gapfilling df = pd.read_csv('vitamin_to_blocked_reactions.csv', index_col=0) ijo = cobra.io.load_json_model('/home/sbrg-cjlloyd/Desktop/ecoli_M_models/iJO1366.json') ijo.solver = 'gurobi' uni = cobra.Model() for m in ijo.metabolites.query('_c'): new_r = cobra.Reaction('source_' + m.id) uni.add_reaction(new_r) new_r.add_metabolites({m: 1}) result_dict = {} for i in df.index: model = ijo.copy() for met in i.split(', '): if '_c' not in met: met += '_c' met_obj = model.metabolites.get_by_id(met) biomass = model.reactions.BIOMASS_Ec_iJO1366_core_53p95M if met_obj in biomass.metabolites: biomass.add_metabolites({met_obj:0}, combine=False) for r in df.loc[i, '0'].split(', '): if r in model.reactions: model.reactions.get_by_id(r).knock_out() elif r[:-1] in model.reactions: model.reactions.get_by_id(r[:-1]).knock_out() elif r == 'BTS6': model.reactions.get_by_id('BTS5').knock_out() else: print(r) print(i) model.solver.problem.setParam('IntFeasTol', 1e-9) model.solver.problem.setParam('FeasibilityTol', 1e-9) gfill = gapfilling.GapFiller(model, uni, lower_bound=.01, exchange_reactions=True, demand_reactions=True, integer_threshold=1e-9) try: result = gfill.fill(iterations=5) except: continue result_dict[i] = result for i, entries in enumerate(result): print("---- Run %d ----" % (i + 1)) for e in entries: print(e.id) v.
me_biomass/script_archive/find_media_to_alleviate_aux_m_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="bQ74bhZVgP1p" executionInfo={"status": "ok", "timestamp": 1630451969068, "user_tz": -420, "elapsed": 312, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="1648cab4-f6af-40b9-cb62-0d9ce4ede6fb" import pandas as pd df = pd.read_csv("salaries_by_college_major.csv") df.head() # + colab={"base_uri": "https://localhost:8080/"} id="9aEZYyKvgqjn" executionInfo={"status": "ok", "timestamp": 1630452259269, "user_tz": -420, "elapsed": 359, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="e82d3ded-1214-410a-e8f9-4041d4dc6309" df.shape # + colab={"base_uri": "https://localhost:8080/"} id="GzTdlqVDhxXM" executionInfo={"status": "ok", "timestamp": 1630452356533, "user_tz": -420, "elapsed": 383, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="0d6a21c6-b2ab-4521-8133-7e672a54325b" df.columns # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="-M3S1fRaiJI4" executionInfo={"status": "ok", "timestamp": 1630452425451, "user_tz": -420, "elapsed": 375, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="582df5dc-62b8-4812-f325-4a7a8377b066" df.isna() # + colab={"base_uri": "https://localhost:8080/", "height": 221} id="94aNeYNfiZ8h" executionInfo={"status": "ok", "timestamp": 1630452452722, "user_tz": -420, "elapsed": 324, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="cbec8667-4a6d-4497-d2ce-8979a2f01570" df.tail() # + id="cNV2S2aQigfk" executionInfo={"status": "ok", "timestamp": 1630452553526, "user_tz": -420, "elapsed": 432, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} clean_df = df.dropna() # + colab={"base_uri": "https://localhost:8080/", "height": 221} id="1F2VJWaui5Nf" executionInfo={"status": "ok", "timestamp": 1630452566451, "user_tz": -420, "elapsed": 327, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="29a47a2d-afbd-45e8-adac-88b784b51978" clean_df.tail() # + colab={"base_uri": "https://localhost:8080/"} id="qUs5wNl7i8Zp" executionInfo={"status": "ok", "timestamp": 1630453055470, "user_tz": -420, "elapsed": 338, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="56b28bf6-6597-4b2e-9993-018f1b8a14a6" clean_df["Starting Median Salary"] # + colab={"base_uri": "https://localhost:8080/"} id="suJh44YEkzxy" executionInfo={"status": "ok", "timestamp": 1630453129868, "user_tz": -420, "elapsed": 408, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="3e3e9132-df5b-43aa-adad-a4f337912009" clean_df["Starting Median Salary"].max() # + colab={"base_uri": "https://localhost:8080/"} id="R_IHU5BHlF7I" executionInfo={"status": "ok", "timestamp": 1630453201216, "user_tz": -420, "elapsed": 363, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="c88fca5b-edbe-4d99-ae11-2b84bf3a7c33" clean_df["Starting Median Salary"].idxmax() # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="9TLHl5jWlXXX" executionInfo={"status": "ok", "timestamp": 1630453265379, "user_tz": -420, "elapsed": 383, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="fb889613-7ca2-4676-afdd-12c88c0a9a51" clean_df["Undergraduate Major"].loc[43] # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="N1xj-ycClnBe" executionInfo={"status": "ok", "timestamp": 1630453318673, "user_tz": -420, "elapsed": 543, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="59ef67af-4b46-4cae-d5ef-cb6e8ba796e7" clean_df["Undergraduate Major"][43] # + colab={"base_uri": "https://localhost:8080/"} id="ojE0COuClz9X" executionInfo={"status": "ok", "timestamp": 1630453441477, "user_tz": -420, "elapsed": 378, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="77eddf11-19fb-4289-bc9e-69b03a145ed7" clean_df.loc[43] # + colab={"base_uri": "https://localhost:8080/"} id="G1Wmh2bmmSA3" executionInfo={"status": "ok", "timestamp": 1630453815193, "user_tz": -420, "elapsed": 342, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="3efd2f21-ff61-4986-a989-dca687fc9672" clean_df["Mid-Career Median Salary"].idxmax() # + colab={"base_uri": "https://localhost:8080/"} id="c2_0oMBznnqY" executionInfo={"status": "ok", "timestamp": 1630453854394, "user_tz": -420, "elapsed": 354, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="ae83b837-799b-432c-cb9e-51d325f47178" clean_df.loc[8] # + colab={"base_uri": "https://localhost:8080/"} id="t-qDHdAnn0MN" executionInfo={"status": "ok", "timestamp": 1630453919596, "user_tz": -420, "elapsed": 334, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="61602996-a80e-4d06-c535-5d5f41e4d222" clean_df["Starting Median Salary"].idxmin() # + colab={"base_uri": "https://localhost:8080/"} id="bwBfGz8EoGwe" executionInfo={"status": "ok", "timestamp": 1630453932186, "user_tz": -420, "elapsed": 366, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="772f4fb2-1372-481b-a3ea-5834689b89cd" clean_df.loc[49] # + colab={"base_uri": "https://localhost:8080/"} id="YTvqb0qAoJ0c" executionInfo={"status": "ok", "timestamp": 1630454012296, "user_tz": -420, "elapsed": 363, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="19907971-4740-4db6-d36a-4e093b9a29ad" clean_df["Mid-Career Median Salary"].idxmin() # + colab={"base_uri": "https://localhost:8080/"} id="Ly78C2AxodYS" executionInfo={"status": "ok", "timestamp": 1630454022739, "user_tz": -420, "elapsed": 393, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="bbf5edcc-b872-4ada-de15-69649654367a" clean_df.loc[49] # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="at5YOYXfof7O" executionInfo={"status": "ok", "timestamp": 1630454183977, "user_tz": -420, "elapsed": 443, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="d0322642-2ba2-4211-be4b-9ccc7c350dff" clean_df["Undergraduate Major"].loc[clean_df["Mid-Career Median Salary"].idxmin()] # + colab={"base_uri": "https://localhost:8080/"} id="WNKHQ3CnpHRX" executionInfo={"status": "ok", "timestamp": 1630454429928, "user_tz": -420, "elapsed": 330, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="0e566350-51a0-4258-9eb8-63a1416351bb" clean_df["Mid-Career 90th Percentile Salary"] - clean_df["Mid-Career 10th Percentile Salary"] # + colab={"base_uri": "https://localhost:8080/"} id="FOTfcvTvqDVw" executionInfo={"status": "ok", "timestamp": 1630454485329, "user_tz": -420, "elapsed": 332, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="03465768-c660-4a92-d792-c03d94014973" clean_df["Mid-Career 90th Percentile Salary"].subtract(clean_df["Mid-Career 10th Percentile Salary"]) # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="sTk9tFY9qQ3W" executionInfo={"status": "ok", "timestamp": 1630454718010, "user_tz": -420, "elapsed": 368, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="469010c8-7135-4b45-94ba-e449c0e98743" differ_col = clean_df["Mid-Career 90th Percentile Salary"] - clean_df["Mid-Career 10th Percentile Salary"] clean_df.insert(1, "Spread", differ_col) clean_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="Qi0Xpry9qwXw" executionInfo={"status": "ok", "timestamp": 1630454784613, "user_tz": -420, "elapsed": 339, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="6a8ac4f7-e253-42c3-9be2-0af55be2d35b" del clean_df["Differ"] clean_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 238} id="eoVO_QilrHC-" executionInfo={"status": "ok", "timestamp": 1630454806364, "user_tz": -420, "elapsed": 370, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="0347cf9a-885b-40ef-b61c-3f374223831b" differ_col = clean_df["Mid-Career 90th Percentile Salary"] - clean_df["Mid-Career 10th Percentile Salary"] clean_df.insert(5, "Differ", differ_col) clean_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="FGhBNSglrTcx" executionInfo={"status": "ok", "timestamp": 1630455099536, "user_tz": -420, "elapsed": 342, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07681876538566626634"}} outputId="96468a3d-9aa8-4006-ecec-20fd55cb3cdf" low_risk = clean_df.sort_values("Differ") low_risk[["Undergraduate Major", "Differ"]].head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="9P1RdpousaEy" executionInfo={"status": "ok", "timestamp": 1630455344759, "user_tz": -420, "elapsed": 310, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07681876538566626634"}} outputId="18c9ab16-822e-488d-96e5-8efa8212008b" potential = clean_df.sort_values("Mid-Career 90th Percentile Salary", ascending=False) potential[["Undergraduate Major", "Mid-Career 90th Percentile Salary"]].head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="kORfcz8StggT" executionInfo={"status": "ok", "timestamp": 1630455464836, "user_tz": -420, "elapsed": 347, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="b57dcc1c-33a7-4a44-b5d3-faa630a63221" high_risk = clean_df.sort_values("Differ", ascending=False) high_risk[["Undergraduate Major", "Differ"]].head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="n_MjNPEdt7m7" executionInfo={"status": "ok", "timestamp": 1630455901633, "user_tz": -420, "elapsed": 364, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="d8862296-22ae-42d7-951e-3012eee0db9d" popular = clean_df.sort_values("Mid-Career Median Salary", ascending=False) popular[["Undergraduate Major", "Mid-Career Median Salary"]].head() # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="3iKL3VqVvqpN" executionInfo={"status": "ok", "timestamp": 1630455997413, "user_tz": -420, "elapsed": 377, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="dded11a0-e30d-423b-852a-9080db4a2d75" clean_df.groupby("Group").count() # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="sQbbrt_YwCB3" executionInfo={"status": "ok", "timestamp": 1630456062763, "user_tz": -420, "elapsed": 368, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="3c99c122-bce0-49ba-95bd-54d0390e673b" clean_df.groupby("Group").mean() # + colab={"base_uri": "https://localhost:8080/", "height": 190} id="BeLaN1wGwR-0" executionInfo={"status": "ok", "timestamp": 1630456181840, "user_tz": -420, "elapsed": 372, "user": {"displayName": "hadi learn", "photoUrl": "", "userId": "07681876538566626634"}} outputId="c28a2984-aa8f-485a-adb6-f2f5e9b94dcc" pd.options.display.float_format = "{:,.2f}".format clean_df.groupby("Group").mean() # + id="osh7-9y6wvCz"
day-71-data-exploration-with-pandas-jupyter-notebook/Test-Colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Explainer on Hypothesis Testing and Bayesian Posterior Inference # # In this notebook we will consider two types of reasoning, hypothesis testing and Bayesian inference. As we will see, both of these types of reasoning are quite common. But, we will be concerned with situtations in which this reasoning can be carried out with precision and objectivity, specifically, circumstances in which we can use the mathematical theories of probability and statistics. Our goal here is simply to understand what the types of reasoning are and how probability and statistics can be used to help us reason well. I will begin by talking about hypothesis testing. After introducing the main idea I will focus on a particular sort of hypothesis test called a t-test and give an example of how to do a t-test. Then I will talk about Bayesian inference. I will go through a couple different techniques for doing Bayesian inference using a similar example as was used to illustrate the t-test. Lastly, we can compare the two sorts of inference to see what the main differences are. # # ## Hypothesis Testing # # A __hypothesis test__ is a method for determining how likely an observation is given some hypothesis. If we make some observation and it is highly unlikely that we would make that observation given some hypothesis, then we have justification for rejecting that hypothesis. Alternatively, if we make some observation and it is highly likely that we would make that observation given some hypothesis, then this gives some further credence to the hypothesis (the degree of confirmation depends on many different factors I won't go into here). We are going to focus on cases in which an observation is highly unlikely given some hypothesis. # # Consider the following scenario: I arrive home from work and am greeted by my dog. I assume that my dog has been a good dog today per usual. Then I observe that my trash can is tipped over and spilled onto my floor. I judge this observation to be highley unlikely given the hypothesis that my dog has been a good dog today. Consequently, I reject that hypthesis. # # This is an example of what we might call a _subjective hypothesis test_, where my method is simply to make a judgment about how likely I think an observation is given some hypothesis. People use hypothesis testing of this sort regularly. _But,_ one issue with this sort of reasoning is that it is subjective! While I judge that the observation is unlikely given the hypothesis, reasonable people could disagree. I haven't given any basis for my judgment beyond that is how it seems to me. And even if I could give some rationale for my judgement, people often just are not good at making judgments of this sort. So how can we do better? # # A _statistical hypothesis test_ is a hypothesis test that utilizes the mathematical theories of statistics and probability to determine how likely the observation is given some hypothesis. By using statistics and probability we can be precise and give an objective basis for our judgment of how likely the observation is. # # Unlike the subjective hypothesis test, we cannot always use a statistical hypothesis test. Some hypotheses aren't apt for statistical hypothesis testing and sometimes we just won't have the information necessary to use a statistical hypothesis test. But, if our hypothesis is is apt and we do have relevant data then we can perform a statistical hypothesis test. # # The topic of statistical hypothesis tests is large, so here we will just focus in on one sort of statistical hypothesis test, namely the t-test. There are a few different types of t-test, we are just going to talk about a two independent sample t-test, but, see the appendix for the other types. # # A __two independent sample t-test__ is a type of hypothesis test that can be used when we want to determine whether there is a difference in the means of some numeric feature for two different groups. We use it to test the hypothesis that there is no difference in means. This is typically called the null hypothesis. We will test the hypothesis by taking a sample from the groups being tested and then do some math to determine how likely it is that we would get this sample if the null hypothesis is true. If it is highly unlikely that we would get this sample given the null hypothesis then we reject it in favor of the alternative hypothesis that there is a difference in means. # # Note: We can perform a t-test if the numeric feature is normally distributed, the groups have similar variance, and we have an appropriate sample size of between 20-30. We can use the t-test for larger sample sizes but in that case there are more appropriate hypothesis tests, e.g. a z-test. # # To perform a two sample t-test we take our samples, and for each we compute the mean $\bar{x}_i$, standard deviation $\sigma_i$, and record the sample size $n_i$. We then use these values to calculate something called the t-statistic. Once we have the t-statistic we will determine how likely it is that we would get a sample with this t-statistic given the null hypothesis. If it is highly unlikely we would get that t-statistic given the null hypothesis then we will reject the null hypothesis. Otherwise we won't do anything. # # The t-statistic essentially depends on two factors, how big is the difference in the sample means and how much variance is there in the samples. The bigger the difference in sample means, the more extreme the t-statistic (farther from zero). And the bigger the variance in the samples, the less extreme the t-statistic (closer to zero). It should be clear why a big difference in sample means is relevant, we are trying to determine if there is a difference between the group means. The reason why we take the variance into account is that if there is a lot of variance in the sample we should be less confident that the sample mean is close to the group mean. So even if there is a big difference in sample means, if there is sufficiently large sample variance we should not reject the null hypothesis. # # We calculate the t-statistic using the following equation: # # $$t = \frac{\bar{x}_1 - \bar{x}_2}{\sqrt{\frac{\sigma_1^2}{n_1} + \frac{\sigma_2^2}{n_2}}}.$$ # # Now we want to assess how likely it is that we would get the calculated t-statistic if the null hypothesis is true. The probability that we would observe this t-statistic under the null hypothesis is called the __p-value__. If the p-value is very low, this just mean that it is highly unlikely we would get this t-statistic given the null hypothesis, in which case we will reject the null hypothesis. Typically, a p-value of less than .05 is seen as grounds for rejecting the null hypothesis. But really it all depends on how important it is to be correct. If it is not a vital matter whether the null hypothesis is true, we might raise the threshold for rejecting the null hypothesis to say .1. Alternatively, if it is really important that we only reject the null hypothesis when it really is false, then we might lower the threshold to .01 or even lower. # # To find the p-value we can use the probability density function for the t-statistic. Under the null hypothesis, if we took many samples from the two groups we are interested in, $t$ would have an approximately normal distribution with a mean of zero and a variance of $\frac{d}{d-1}$, where $d$ is the degrees of freedom. This follows from the central limit theorem, which I won't talk about here, but is worth being familiar with. Let's take a look at a t-distribution. # + # Plot t-distribution import numpy as np import scipy.stats as ss import matplotlib.pyplot as plt import matplotlib.mlab as mlab import seaborn as sns import pandas as pd import math # %matplotlib inline plt.style.use('seaborn') plt.rcParams['figure.figsize'] = (10, 6) # t-distribution for two sample test with sample sizes of 30 t_dist = ss.t(df=58) x = np.linspace(t_dist.ppf(0.0001), t_dist.ppf(0.9999), 100) s = math.sqrt(t_dist.stats(moments='v')) plt.plot(x, t_dist.pdf(x)) #plt.vlines([-3*s, -2*s, -s, 0, s, 2*s, 3*s], 0, .4, linestyle='dashed', colors='y') plt.show() # - # We can see it does indeed look like a normal distribution centered around zero. To calculate our p-value we will find the value of our t-statistic on the x-axis and then find the area under the curve beyond that point. This is actually the t-distribution we will use for the example of how to do a t-test, so let's turn to that now. # # ### Example # Suppose we are planning to move to a new city and we are trying to decide between living in two neighborhoods, Uptown and Downtown. Our budget to buy a home is 200,000 dollars. There are several homes in both neighborhoods that are priced within our budget. But, it would be preferable to find a home within our budget that is in the neighborhood with the higher average of home prices as this leaves more room for our new home's value to grow. So we want to find out if Uptown and Downtown have different averages of home prices. # # To check if there is a difference between the average of home prices for each neighborhood we can perform a t-test. Our null hypothesis is $H_0$: The averages of home prices for each neighborhood are the same. Our alternative hypothesis is $H_1$: The averages of home prices for each neighborhood are different. # # We will take a random sample of 30 home values from each neighborhood and perform a t-test on them. If we get p-value less that .05 then we will reject the null hypothesis and conclude the alternative hypothesis is true. First let's get those samples and take a look at them. # + import numpy as np import scipy.stats as ss import matplotlib.pyplot as plt import matplotlib.mlab as mlab import seaborn as sns import pandas as pd # %matplotlib inline plt.style.use('seaborn') plt.rcParams['figure.figsize'] = (10, 6) np.random.seed(seed=123) # Draw Samples uptown_sample = np.random.normal(loc=185000.0, scale=50000.0, size=30) downtown_sample = np.random.normal(loc=215000.0, scale=50000.0, size=30) # Create Data Frame d_1 = {'location':['uptown']*30, 'price':uptown_sample} d_2 = {'location':['downtown']*30, 'price':downtown_sample} df = pd.concat([pd.DataFrame(data=d_1), pd.DataFrame(data=d_2)], axis=0) # Plot Samples sns.swarmplot(y='price', x='location', data=df) plt.show() # - # Using the good ol' eye test it looks like the average prices for downtown homes is a bit higher than that of uptown homes. But, I think most would agree that the eye test is inconclusive here and so it really is necessary to do the t-test. To do this we need to calculate the t-statistic and then we can see where it is on the t-distribution. # + #Perform t-test uptown_sample_mean = uptown_sample.mean() uptown_sample_stdev = uptown_sample.std() uptown_n = 30 downtown_sample_mean = downtown_sample.mean() downtown_sample_stdev = downtown_sample.std() downtown_n = 30 t = (uptown_sample_mean - downtown_sample_mean)/math.sqrt((uptown_sample_stdev**2/uptown_n) + (downtown_sample_stdev**2/downtown_n)) print("t-statistic:",t) # t-distribution for two sample test with sample sizes of 30 t_dist = ss.t(df=58) x = np.linspace(t_dist.ppf(0.0001), t_dist.ppf(0.9999), 100) s = math.sqrt(t_dist.stats(moments='v')) plt.plot(x, t_dist.pdf(x)) plt.vlines([t, -t], 0, .4, linestyle='dashed', colors='g') plt.xlabel('t-statistic') plt.show() # - # To get the p-value we need to find the area under the curve beyond our lines. Since it can be a bit of hassle to do this ourselves, there are tables we could refer to that will give us an approximate p-value for our t-statistic. Alternatively, we can just use a python package to calculate the p-value for us. The scipy.stats package has function for doing a t-test on two samples. #Perform t-test using built-in function display(ss.ttest_ind(uptown_sample, downtown_sample)) # We can see from the t-test that our p-value is approximately .03 which is less than .05! This means that if the null hypothesis is true there is less than a 5% chance that would have gotten the t-statistic we did get. So, we are justified in rejecting the null hypothesis and concluding that there is a difference in the averages of home prices between Uptown and Downtown. But, it is worth noting that if our threshold for a significant p-value had been set lower at .01 then we would not reject the null hypothesis. # # Before moving on to talk about Bayesian inference it is worth considering in a bit more depth what the p-value for our t-statistic means. The p-value for a t-statistic represents the _frequency_ at which we would get that t-statistic or one more extereme if the null hypothesis is true. If we performed 100 different t-tests and got this t-statistic or one more extreme each time we would expect the null hypothesis to be true in three of those cases. If we reject the null hypothesis for all of those tests we would be wrong approximately three times. # # Let's now suppose that we do 200 hypothesis tests with a threshold of .05 for rejecting the null hypothesis. Assume that 50% of the time the null hypothesis is true and the other 50% of the time the null hypothesis is false. In the cases where the null hypothesis is true, we should expect to get the answer right 95 times and the answer wrong 5 times. What about the cases in which the null hypothesis is false? In this case we need to calculate the __power__ of our test, which is just the proability that we reject the null hypothesis given that it is false. I will omit the details of how to calculate it, but the power of our test will depend on the sample size and the size of the difference we want to detect. We can use a python package to calculate the power of our t-test. # + from statsmodels.stats.power import TTestIndPower mu_uptown = 185000 mu_downtown = 215000 sigma = 50000 effect_size = (mu_downtown - mu_uptown)/sigma sample_size = 30 alpha = 0.05 calculator = TTestIndPower() power = calculator.solve_power(effect_size = effect_size, power=None, nobs1=sample_size, ratio=1.0, alpha=alpha) print(power) # - # So we have a power of roughly .63. This means that if the null hypothesis is false there is a 63% chance we will reject it (where 63% chance means that around 63 out of 100 times we will get a t-statistic that will lead us to reject the null hypothesis). Let's put this information together in table: # # |Confusion Matrix|$H_0$ Rejected|$H_0$ Not Rejected| # |----------------|----|--------| # |$H_0$ is False|63|37| # |$H_0$ is True|5|95| # # We can use this table along with the concepts of precision and recall to evaluate our test. __Precision__ is defined as the number of true positives divided by the total number of predicted positives. In our case a true positive is the case where we reject the null hypothesis and the null hypothesis is indeed false. The predicted positives are all the cases in which we reject the null hypothesis. So our precision would be .92. We want our test to have high precision and ours seems to have reasonably high precision. __Recall__ is defined as the number of true positives divided by the number of positives. We have 63 true positives. The total number of positives is the number of cases in which the null hypothesis is false, which is 100. So our recall is .63 (which is the same as the power). While our precision is reasonably high our recall would ideally be higher. In our example we know there was a difference in means by design and so we did the correct thing when we rejected the null hypothesis. But, if we had picked a different random sample there is a substantial chance that we would not have rejected the null hypothesis. So our test was not as well designed as it could be. One way we could address the low recall/power would be to increase our sample size. # ## Bayesian Inference # # We saw before that statistical hypothesis testing is essentially a way of updating beliefs about some hypothesis in light of some observation made. Bayesian inference can similarly be viewed as a way of updating beliefs given some observation made. The difference is we won't be rejecting a hypothesis but rather will be adjusting how confident we are that the hypothesis is true. So consider again the example where I arrive home from work and am greeted by my dog. Before I make any observation I am fairly confident that my dog has been a good dog today, call that level of confidence $P(H)$. Also, I know in that moment that if I were to see that my trash can is tipped over and spilled onto my floor then I will be much less confident that my dog was a good dog today, call that level of confidence $P(H|O)$. Subsequently, I see that my trash can is tipped over and spilled onto my floor. How confident should I be that my dog was a good dog today, call this unknown level of confidence $P_O(H)$. Well I already knew ahead of time that if I made this observation I would be less confident in this hypothesis, specifically to degree $P(H|O)$. Bayesian inference is just the practice of setting my confidence in the hypothesis after I make the observation to what I had established earlier it would be if I were to make that observation. That is $P_O(H) = P(H|O)$. # # Now one worry we should have about this example, a worry we raised before when talking about hypothesis tests, is that my reasoning is too subjective. Specifically, where did $P(H|O)$ come from? Well it was just a vague judgment I made. Fortunately sometimes we are in a position to do better, namely when we can make Bayesian inference precise using the mathematical theory of probability. If we can get some more objective probabilities and use them to determine $P(H|O)$ then our inference will be better. When we are dealing with probabilites, $P_O()$ is called the __posterior probability distribution__ and $P()$ is called the __prior probability distribution__. So another way to describe Bayesian inference is that it is just a way of getting the posterior distribution from the prior distribution, namely we set $P_O() = P(\cdot |O)$ (which is why you will often see the $P(\cdot |O)$ referred to as the posterior distribution). # # __Calculating $P(H|O)$:__ To calculate $P(H|O)$ we use what's called "Bayes' Theorem" or "Bayes' Rule": # # <br> # $$P(H|O) = \frac{P(O|H)P(H)}{P(O)}.$$ # <br> # I'll skip over where this comes from, but see the appendix for the motivation for it. The important thing to note is that we can calculate $P(H|O)$ if we can calculate $P(O|H)$, $P(H)$, and $P(O)$. It is often feasible to calculate $P(O|H)$ (we essentially did this when doing a hypothesis test) and $P(H)$ is the prior for our hypothesis which we will supply. Unfortunatley, calculating $P(O)$ is often problematic. We'll see a couple ways of getting around this difficultity but it is best to just see them in action. So let's now turn to an example of Bayesian inference. # # ### Example # Suppose again that we are interested in home prices for a partiuclar area. Specifically I am interested in the average of home prices in Uptown. Suppose my credences for what the average of home prices in Uptown is are normally distributed with mean 200,000 with a standard deviation of 50,000. That is, I am most confident that the average of home prices in Uptown is 200,000 dollars. As you increase or decrease the value my confidence that this value is the average of home prices in Uptown decreases. Let's visualize the distribution. x = np.linspace(200000.0 - 4*50000.0, 200000 + 4*50000.0, 100) plt.plot(x,mlab.normpdf(x, 200000.0, 50000.0), label='Prior') plt.legend() plt.show() # So, I already have some rough idea of what I think the average of home prices in Uptown is. But, this is more or less an educated guess. I would have a better idea if I took a sample and used that to inform my beliefs. Suppose I get a sample of home prices in uptown $X = x_1, ..., x_{30}$, and let $\theta$ be a variable that ranges over the candidate means of home prices in Uptown. Using Bayesian inference and Bayes' rule the posterior distribution is # # $$P_X(\theta) = P(\theta|X) = \frac{P(X|\theta)P(\theta)}{P(X)}.$$ # # Unfortunately, calculating what we need to in order to specify the posterior distribution is difficult. In particular, it is often infeasible to calculate $P(X)$, which is just the prior probability of getting the data we got. But, there are a couple ways around this. One is posterior sampling which we can use to approximate the posterior and the other is to use the fact that in our case the prior is conjugate to the posterior relative to the likelihood, which basically means we can look up an equation to get the parameters for the posterior which only uses values we already know. I'll go through both now. # # We can sample the posterior distribution in order to get an approximate sense of what it is like. You may wonder how we can sample from a distribution for which we are trying to figure out what it is! Yet, there are techniques we can use to get our sample. I'll largely gloss over the technical details, but the basic idea is that the sampling process will propose values for $\theta$ in such a way that the values for $\theta$ that fit better with our observation $X$ and prior will be chosen more often, the values for $\theta$ that do not fit well with our observation $X$ and prior will be chosen less often, and values that are inconsistent with our observation $X$ and prior will be rejected. # # What values of $\theta$ fit better with our observation and prior? Well, the difficulty in calculating $P(\theta|X)$ using Bayes' rule was that we could not calculate $P(X)$, but it is just a fixed number. So, while we cannot calculate the posterior, we can note that for two specific values of $\theta$, $\theta_1$ and $\theta_2$, $P(\theta_1|X) > P(\theta_2|X)$ if and only if $P(X|\theta_1)P(\theta_1) > P(X|\theta_2)P(\theta_2)$. So we can use $P(X|\theta)P(\theta)$ as a way of evaluating how well a particular value of theta fits with our obsevration and prior. # # Now that we have covered the basic idea of posterior sampling, the question remains how do we generate the samples. In this case we will use something called Markov Chain Monte Carlo sampling, specifically what is called the Metropolis–Hastings algorithm. See the appendix for the details on the algorithm. # + import scipy.stats as ss theta_sample = [] sampling_width = 50000.0 #Step One theta_start = np.random.normal(loc=200000.0, scale=50000.0, size=1) theta_current = theta_start for i in range(10000): #Step Two theta_new = np.random.normal(loc=theta_current, scale=25000.0, size=1) theta_new = theta_new[0] #Step Three likelihood_current = ss.norm(theta_current, 50000.0).pdf(uptown_sample).prod() likelihood_new = ss.norm(theta_new, 50000.0).pdf(uptown_sample).prod() prior_current = ss.norm(200000.0, 50000.0).pdf(theta_current) prior_new = ss.norm(200000.0, 50000.0).pdf(theta_new) r = (likelihood_new * prior_new) / (likelihood_current * prior_current) #Step Four t = np.random.rand() #Step Five if t <= r: theta_current = theta_new theta_sample.append(theta_current) #Drop first half of samples theta_sample = theta_sample[-5000:] theta_sample = np.array(theta_sample) # - # Now that we have our sample from the posterior we can visulaize it to get a sense of what it looks like. First let's take a look at the distribution of values for $\theta$ we got, which is the theoretical mean for home prices in Uptown. plt.hist(theta_sample, bins=25) # The sample appears normally distributed centered just below 190,000. Next let's look at the probability density function for values of $\theta$, i.e. our approximation of the posterior. We know the posterior is normal so we take the mean of the sample and the standard deviation of our sample and use that to approximate the posterior. Here we will visualize the approximated posterior along with the prior and we will indicate what the mean of observations of home prices in Uptown is. # + x = np.linspace(200000.0 - 4*50000.0, 200000 + 4*50000.0, 100) mean = theta_sample.mean() std = theta_sample.std() observation = uptown_sample.mean() plt.plot(x, mlab.normpdf(x, mean, std), label='Posterior') plt.plot(x,mlab.normpdf(x, 200000.0, 50000.0), label='Prior') plt.vlines([observation], 0, .000043, linestyle='dashed', colors='k', label='Mean of Observed Values') plt.legend() # - # We can see that the posterior is normal and centered right around the mean of our sample of Uptown home prices. So the sample of Uptown home prices is having a large impact on what the posterior distribution is. Given that the true mean of home prices in Uptown in 185,000 dollars and our prior for the mean of home prices in Uptown was centered around 200,000 dollars this seems like a significant improvement. And it is especially good because $P(X|\theta)$ is an objective probability. Even though our prior was arguably subjective, our posterior is much less subjective. # # Is this a good approxiamtion of the posterior? There are various ways of evaluating whether we got a good approximation or not I won't go into here. Since we can calculate our posterior from the conjugate prior we can compare the two to evaluate how good our approxiamtion is. Let's turn to calculating the posterior from the conjugate prior. # # The second way around calculating the posterior using Bayes' rule is to rely on the fact that in this particular problem we have a prior that is normal and a likelihood, i.e. $P(X|\theta)$, that is normal. Given this situation it can be proved that the posterior distribution will be a normal distribution with the following mean and standard deviation: $$\mu_{\text{posterior}} = \frac{\sigma_{\text{sample}}^2\cdot\mu_{\text{prior}} + n\cdot\sigma_{\text{prior}}^2\cdot\mu_{\text{sample}}}{\sigma_{\text{sample}}^2 + n\cdot\sigma_{\text{prior}}^2},$$ # # $$\sigma_{\text{posterior}} = \sqrt{\frac{\sigma_{\text{sample}}^2\cdot\sigma_{\text{prior}}^2}{\sigma_{\text{sample}}^2 + n\cdot\sigma_{\text{prior}}^2}}.$$ # # <br> # The proof is not something worth going through here. Just note that there are other conjugate distributions so it is worth being familiar with what they are if you are doing Bayesian inference often. Let's calculate the posterior and compare it to our approximation. # + x = np.linspace(185000.0 - 50000.0, 185000.0 + 50000.0, 100) #Equation Inputs n=30 mu_prior = 200000.0 stdev_prior = 50000.0 sample_mean = uptown_sample.mean() sample_std = 50000.0 #This is a known quantity and is not calculated from the sample #Calculate Posterior and Plot it mu_posterior_ = ((sample_std**2)*mu_prior + (stdev_prior**2)*n*sample_mean)/((stdev_prior**2)*n + (sample_std**2)) std_posterior = math.sqrt(((sample_std**2)*(stdev_prior**2))/((stdev_prior**2)*n + (sample_std**2))) plt.plot(x, mlab.normpdf(x, mu_posterior_, std_posterior), label='True Posterior') #Plot Approximation mean = theta_sample.mean() std = theta_sample.std() plt.plot(x, mlab.normpdf(x, mean, std), 'g--', label='Approximation Based On Sampling') plt.legend() # - # Here we can see that the approximation of the posterior we got appears to be a good one. There is a bit more variance in the approximation we got but that is to be expected given it based on a sample. # # ## Comparing Bayesian Inference to Hypothesis Testing # # One main difference between the two methods is what we get out. In the example for hypothesis testing we get a hypothesis that the means are different. In the Bayesian case we got a probability distribution for the mean out of it. Another major difference between these two types of inference is what went into them. In the case of hypothesis testing we need a hypothesis and some data, but that is all it is based on. In the case of Bayesian inference we have some data but we also have a prior distribution. We can think of the prior distribution as the analog of the hypothesis but it is a much more complex sort of thing. A third important difference is when they can be used. Bayesian inference can be used anytime we can compute the needed probabilities or can use one of the other methods of specifying the posterior, e.g. posterior sampling. The applicability of the t-test is much narrower, though there are other statistical hypothesis test that can be used in some other circumstances. # # While there are important differences between these two methods, there is a key similarity between the two. Both methods converage to the truth at the same rate as sample size increases. If we look at the equation for the standard deviation of the posterior distribution, # # <br> # $$\sigma_{\text{posterior}} = \sqrt{\frac{\sigma_{\text{sample}}^2\cdot\sigma_{\text{prior}}^2}{\sigma_{\text{sample}}^2 + n\cdot\sigma_{\text{prior}}^2}},$$ # <br> # we can see that as the sample size $n$ increases the standard deviation decreases at a rate of $\sqrt{\frac{1}{n}}$. Similarly, if we look at the equation for the t-stastic, # # <br> # $$t = \frac{\bar{x} - \mu}{\sqrt{\frac{\sigma^2}{n}}},$$ # <br> # we can see that as the sample size $n$ increases we become more confident that $\bar{x}$ is the true population mean at a rate of $\sqrt{\frac{1}{n}}$. So, sample size does not give either method an advantage over the other. # # # ## Appendix # # #### The three types of t-test: # # - A one sample t-test is a test used to determine if the mean for a group we have a sample from is different from some fixed number. For example, suppose we know the average height of a person, if we have a sample of heights of basketball players we could perform this sort of t-test to determine if there is a difference in the average height of basketball players as compared to the known average height of people in general. # - A two independent samples t-test is a t-test applied to two independent groups that both have the same numeric feature and we want to determine if the mean of that feature differs between the groups. For example, we could perform a t-test of this sort to determine if there is a difference in the average height of men and the average height of women. # - A paired samples t-test is a test for a difference in means between groups where the individuals in the sample are related in some way. For example, we might measure the average blood pressure of a group of individuals before taking some medication and then compare this to the average blood pressure of the same group of individuals after taking the medication. # # #### Motivation for definition of conditional probability and Bayes' Rule: # # In the theory of probability, we have the following definition of conditional probability: # # <br> # $$P(H|O) =_{df} \frac{P(H\& O)}{P(O)}.$$ # <br> # To motivate the definition of conditional probability we can use one of Bayes' examples. Suppose we have a billiard table and a billiard ball. We will roll the ball and it will bounce around the table until it stops. We are interested in where it will stop and let's assume that for any two spots on the table there is an equal chance it will stop there. The probability that it will land in specific region of the table is the area of that region divided by the total area of the pool table. So looking at the picture below, the probability that the ball will stop in region $B$ is the area of $B$ divided by the total area of the table. This gives us a general method for calculating the probability the ball will stop in a region of interest. The probability is equal to the area of the region of interest divided by the total area of the region where the ball could stop. # # ![Conditional Probability](https://github.com/jpkell05/images/blob/master/Pool_Table_Conditional_Probability.png?raw=true) # # Suppose we are interested in the conditional probability that the ball stops in region $A$ given that it stops somewhere in region $B$. This is analogous to the simple case above, we know the ball will stop somewhere in region $B$ and we want to know the probability that it also stops in region $A$. So, we look at the subregion of $B$ that is also part of region $A$, i.e. region $A\& B$. We take the area of $A\& B$ and divide by the total area of $B$. We end up with $P(A|B) = \frac{P(A\& B)}{P(B)}$, which is consistent with the definition of conditional probability given above. Given that the definition gives the intuitively correct result in this case gives some reason to think it is correct. # # By doing a little algebra on the definition of conditional probability we can see that $P(H\& O) = P(O|H)P(H)$ and so by substitution we get Bayes' Theorem: # # <br> # $$P(H|O) = \frac{P(O|H)P(H)}{P(O)}.$$ # # #### The Metropolis–Hastings algorithm: # # The Metropolis–Hastings algorithm algorithm is as follows. # # 1. Pick a starting point $\theta_0$ (I will do this by sampling from the prior) and set $\theta_{\text{current}} = \theta_0$. # # 2. Propose a new value $\theta_{\text{new}}$ by sampling $\theta_{\text{new}}$ from a normal distribution centered around $\theta_{\text{current}}$ (the standard deviation of the normal is a parameter you pick and can tune). # # 3. Calculate $r$ where $$r = \frac{P(X|\theta_{\text{new}})P(\theta_{\text{new}})}{P(X|\theta_{\text{current}})P(\theta_{\text{current}})}.$$ # <br> # 4. Sample a threshold value $t$ from a uniform distibution over the interval $[0, 1]$. # # 5. If $r < t$ then $\theta_{\text{new}}$ is rejected and we repeat steps two through five, otherwise set $\theta_{\text{current}} = \theta_{\text{new}}$, record $\theta_{\text{current}}$ and repeat steps two through five. #
Explainer on Hypothesis Testing and Bayesian Posterior Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #! pip install -U dtale #https://pypi.org/project/dtale/ # - # dicoba dulu yaa... biar kebayang penggunaannya :D import pandas as pd data = pd.read_csv('penguins_size.csv') data.shape data.isnull().sum() data.head(3) data.info() data.describe(include="all") import dtale import dtale.app as dtale_app dtale_app.NGROK = True dtale_app.show(data) #klik tanda segitiga hitam => open in new tab, dan teman-teman dapat bermain-main di sana... have fun!!!
dtale2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Make a chart # Explore various ways to chart. # # * Line chart or bar chart # * Cumulative cases, cumulative deaths # * New cases, new deaths (change from the prior day) # * Raw values or rolling averages (5-day, 7-day, etc to smooth it out) # * Raw values or population-adjusted values # # Keep in mind: # * What is important to convey to the viewer # * Convey what is accurate, but also, how serious is the situation (did the local or state or federal government define what they consider "severe" vs "not severe"? # * For a viewer within a county, if they see this chart everyday, can they pick up on whether the situation is getting worse or getting better? # * For a viewer looking across counties, do they have a reasonable way to compare across counties (who may have different definitions of what "severe" is)? # * How do you compare very large cities like NYC and LA to much smaller cities like Indianapolis and Alburquerque? # ## Bring in cleaned data # The data I cleaned I did in a previous notebook. How can I bring them in? # # Use a `utils.py` file to easily use functions across notebooks. # This is one way functions keep your data cleaning and processing clean. # You can put all your commonly used functions in one file, then use them in each notebook. # # Note: `utils.py` must be in the same directory (folder) as your notebooks, so that you can import it. # + import pandas as pd import utils # - df = utils.clean_jhu() df.head() # ## Use the `altair` package to make charts # + import altair as alt #alt.themes.enable('urbaninstitute') alt.themes.enable('vox') # Other themes: https://vega.github.io/vega-themes/ # alt.themes.enable("fivethirtyeight") # - # Make a line chart def make_chart(df, county_name, start_date): # Subset by county and start date df = (df[(df.date2 >= start_date) & (df.county == county_name)] # date2, which is datetime can be used in altair # but date will throw up a JSON-serializable error .drop(columns = "date") ) # Make cases charts cases_line = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("date2", title="date"), y=alt.Y("cases_avg7:Q", title="7-day avg"), ).properties( title="Daily New Cases", width=300, height=200 ) ) display(cases_line) make_chart(df, "Los Angeles", "6-1-20") # + # Make bar chart alt.themes.enable('fivethirtyeight') def make_chart(df, county_name, start_date): # Subset by county and start date df = (df[(df.date2 >= start_date) & (df.county == county_name)] # date2, which is datetime can be used in altair # but date will throw up a JSON-serializable error .drop(columns = "date") ) # Make cases charts cases_bar = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("date2", title="date"), y=alt.Y("cases_avg7:Q", title="7-day avg"), ).properties( title="Daily New Cases", width=300, height=200 ) ) display(cases_bar) # - make_chart(df, "Los Angeles", "6-1-20") # ## Multiple layers to a chart # Experiment with the different layers you'd like to add. # # Ideas to experiment with: # * Multiple lines # * Bar chart with daily numbers with a line chart of rolling average that smooths out the daily fluctuations # * Shade the last 2 weeks or the last week # * Add lines to show "severity" by using CA's 4 tiers definition # + from datetime import date, timedelta # 15 days ago because the case data only goes up to yesterday's date # We won't get today's full case numbers until tomorrow two_weeks_ago = ( (date.today() - timedelta(days=15) ) ) two_weeks_ago # - # Actually, this will work with our column `date`, but not `date2` type(two_weeks_ago) # This will work with our column `date2`, but not `date` type(pd.to_datetime(two_weeks_ago)) # + alt.themes.enable('latimes') def make_chart(df, county_name, start_date): # Subset by county and start date df = (df[(df.date2 >= start_date) & (df.county == county_name)] # date2, which is datetime can be used in altair # but date will throw up a JSON-serializable error .drop(columns = "date") ) df_two_weeks = (df[df.date2 >= pd.to_datetime(two_weeks_ago)]) # Set up base charts ''' The base charts are a keep certain chart characteristics over multiple charts. Similar to functions, it allows us to "inherit" certain things and then add-on more customization. This quickly becomes handy if we're adding many, many layers. ''' base = (alt.Chart(df) .mark_line() .encode( x=alt.X("date2", title="date") ) ) base_2weeks = ( alt.Chart(df_two_weeks) .mark_line() .encode( x=alt.X("date2", title="date", axis=alt.Axis(format="%-m/%-d")) ) ) # Make cases charts cases_line = ( base .encode( y=alt.Y("cases_avg7:Q", title="7-day avg"), ) ) # Area chart gets us the shading cases_shaded = ( base_2weeks .mark_area() .encode( y=alt.Y("cases_avg7:Q", title="7-day avg"), color=alt.value("#EAEBEB") ) ) # F-strings might come in handy for chart titles # You can display a string and insert code within the {} # Here, county_name is one of the args # If county_name == "los Angeles", then chart_title == "Daily New Cases: Los Angeles" chart_title = f"Daily New Cases: {county_name}" # Combine all the layers # We'll put the shaded area first, then the line # otherwise, the shaded area chart will cover up part of the line cases_chart = ( (cases_shaded + cases_line) .properties( title=chart_title, width=300, height=200 ) .configure_title(fontSize = 14, font = "Roboto", color = "black", anchor = "middle") ) display(cases_chart) # - make_chart(df, "Los Angeles", "8/1/20") # If you're making lots of charts, you can also move all your charting functions into a `charts_utils.py` file, so that you can reuse those chart functions across notebooks! # # Experiment with how the chart function is defined, especially with what args you want to use. It doesn't necessarily have to be `county_name` and `start_date`, it can include other things, or even different things. It simply has to suit your need for making the chart. When you're thinking of making the same chart for different counties, you'll have to think of what args you need to make it simple to repeat over and over. # # Altair docs: https://altair-viz.github.io/getting_started/overview.html
notebooks/2-demo-chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Driver Drowsiness Detection # ## Importing Libraries import cv2 as cv from keras.models import model_from_json import tensorflow as tf import numpy as np from keras.preprocessing import image from IPython.display import Audio # ## Loading CNN Model # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights("model.h5") print("Loaded model from disk") # ### Face and Eye Detection using Haar Cascade Detection # + face_cascade_name = "haarcascade_frontalface_default.xml" eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml" l_eye_cascade_name = "haarcascade_lefteye_2splits.xml" r_eye_cascade_name = "haarcascade_righteye_2splits.xml" face_cascade = cv.CascadeClassifier(face_cascade_name) eye_cascade = cv.CascadeClassifier(eyes_cascade_name) l_eye_cascade = cv.CascadeClassifier(l_eye_cascade_name) r_eye_cascade = cv.CascadeClassifier(r_eye_cascade_name) #Works with gray scale images. So converting into GrayScale images font = cv.FONT_HERSHEY_COMPLEX_SMALL cap = cv.VideoCapture(0) score = 0 while cap.isOpened(): _, frame = cap.read() height,width = frame.shape[:2] gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray,1.1,4) for (x,y,w,h) in faces: roi_color = frame[y:y+h, x:x+w] roi_gray = gray[y:y+h, x:x+w] l_eye = l_eye_cascade.detectMultiScale(roi_gray) r_eye = r_eye_cascade.detectMultiScale(roi_gray) cv.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 3) rprediction = 0 lprediction = 0 for (ex,ey,ew,eh) in l_eye: #Prediction l_eye_frame = frame[ey:ey+eh,ex:ex+ew] l_eye_frame = cv.resize(l_eye_frame,(64,64)) l_eye_frame = l_eye_frame.reshape(64,64,-1) l_eye_frame = image.img_to_array(l_eye_frame) l_eye_frame = np.expand_dims(l_eye_frame, axis = 0) result = model.predict(l_eye_frame) cv.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2) if result[0][0] == 0: lprediction = 0 else: lprediction = 1 for (ex,ey,ew,eh) in r_eye: #Prediction r_eye_frame = frame[ey:ey+eh,ex:ex+ew] r_eye_frame = cv.resize(r_eye_frame,(64,64)) r_eye_frame = r_eye_frame.reshape(64,64,-1) r_eye_frame = image.img_to_array(r_eye_frame) r_eye_frame = np.expand_dims(r_eye_frame, axis = 0) result = model.predict(r_eye_frame) cv.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2) if result[0][0] == 0: rprediction = 0 else: rprediction = 1 if lprediction == 0 and rprediction == 0: cv.putText(frame,"Closed",(10,height-20), font, 1,(255,255,255),1,cv.LINE_AA) score += 1 else: cv.putText(frame,"Open",(10,height-20), font, 1,(255,255,255),1,cv.LINE_AA) score -= 1 if score<0: score = 0 cv.putText(frame,'Score:'+str(score),(100,height-20), font, 1,(255,255,255),1,cv.LINE_AA) if score >= 15: print("ALERT!!!!") alert() cv.imshow('img',frame) if cv.waitKey(1) & 0xFF == ord('q'): cv.destroyAllWindows() cap.release() break
Driver_Drowsiness_Detection/Driver_Drowsiness_Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # import required libs import glob import os import tensorflow as tf import tensorflow_model_analysis as tfma print('TF version: {}'.format(tf.version.VERSION)) print('TFMA version: {}'.format(tfma.version.VERSION_STRING)) # + # Read artifact information from metadata store. import beam_dag_runner from tfx.orchestration import metadata from tfx.types import standard_artifacts metadata_connection_config = metadata.sqlite_metadata_connection_config( beam_dag_runner.METADATA_PATH) with metadata.Metadata(metadata_connection_config) as store: model_eval_artifacts = store.get_artifacts_by_type(standard_artifacts.ModelEvaluation.TYPE_NAME) # - # configure output paths # Exact paths to output artifacts can be found in the execution logs # or KFP Web UI if you are using kubeflow. model_eval_path = model_eval_artifacts[-1].uri print("Generated model evaluation result:{}".format(model_eval_path)) # ## Install Jupyter Extensions # Note: If running in a local Jupyter notebook, then these Jupyter extensions must be installed in the environment before running Jupyter. # # ```bash # jupyter nbextension enable --py widgetsnbextension # jupyter nbextension install --py --symlink tensorflow_model_analysis # jupyter nbextension enable --py tensorflow_model_analysis # ``` eval_result = tfma.load_eval_result(model_eval_path) tfma.view.render_slicing_metrics(eval_result, slicing_spec = tfma.slicer.SingleSliceSpec(columns=['trip_start_hour']))
notebooks/tfx_pipelines/guided_projects/guided_project_2_solution/model_analysis.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,md # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import geopandas import numpy import matplotlib.pyplot as plt import geoplanar from shapely.geometry import box, Point # # Omitted interiors # For a planar enforced polygon layer there should be no individual polygons that are contained inside other polygons. # # Violation of this condition can lead to a number of errors in subsequent spatial analysis. # # ## Violation: Points within more than a single feature # If this were not the case, then it would be possible for a point to be contained inside more than a single polygon which would be a violation of planar enforcement. # An example can be seen as follows: # + p1 = box(0,0,10,10) p2 = box(1,1, 3,3) p3 = box(7,7, 9,9) gdf = geopandas.GeoDataFrame(geometry=[p1,p2,p3]) base = gdf.plot(edgecolor='k') pnt1 = geopandas.GeoDataFrame(geometry=[Point(2,2)]) pnt1.plot(ax=base,color='red') # - pnt1.within(gdf.geometry[0]) pnt1.within(gdf.geometry[1]) # The violation here is that `pnt1` is `within` *both* polygon `p1` *and* `p2`. # ## Error in area calculations # # A related error that arises in this case is that the area of the "containing" polygon will be too large, since it includes the area of the smaller polygons: gdf.geometry[0] gdf.area gdf.area.sum() # ## Missing interior rings (aka holes) # # The crux of the issue is that the two smaller polygons are entities in their own right, yet the large polygon was defined to have only a single external ring. It is missing two **interior rings** # which would allow for the correct topological relationship between the larger polygon and the two smaller polygons. # # `geoplanar` can detect missing interiors: mi = geoplanar.missing_interiors(gdf) mi # ## Adding interior rings # Once we know that the problem is missing interior rings, we can correct this with `add_interiors`: gdf1 = geoplanar.add_interiors(gdf) gdf1.geometry[0] # And we see that the resulting area of the GeoSeries is now correct: gdf1.area # Additionally, a check for `missing_interiors` reveals the violation has been corrected geoplanar.missing_interiors(gdf1) # The addition of the interior rings also corrects the violation of the containment rule that a point should belong to at most a single polygon in a planar enforced polygon GeoSeries: # pnt1.within(gdf1.geometry[0]) pnt1.within(gdf1.geometry[1]) # ## Failure to detect contiguity # # A final implication of missing interiors in a non-planar enforced polygon GeoSeries is that algorithms that rely on planar enforcement to detect contiguous polygons will fail. # # More specifically, in [pysal](https://pysal.org), fast polygon detectors can be used to generate so called Queen neighbors, which are pairs of polygons that share at least one vertex on their exterior/interior rings. import libpysal w = libpysal.weights.Queen.from_dataframe(gdf) w.neighbors # The original GeoDataFrame results in fully disconnected polygons, or islands. `pysal` at least throws a warning when islands are detected, and for this particular type of planar enforcement violation, missing interiors, the contained polygons will always be reported as islands. # # Using the corrected GeoDataFrame with the inserted interior rings results in the correct neighbor determinations: w = libpysal.weights.Queen.from_dataframe(gdf1) w.neighbors
notebooks/holes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # OpenACC Directives Basics # ========================= # # This lab is meant to accompany Module 3 of the OpenACC.org teaching # materials. The purpose of this lab is to introduce OpenACC directives. Lab # instructions and source code is available for C/C++ and Fortran. # # Please see the following files to begin the lab: # # * [C/C++](English/C/README.ipynb) # * [Fortran](English/Fortran/README.ipynb) #
labs/module3/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/shivangi-jodbhavi/face_authentication/blob/master/face_authentication.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="vfKdYdxE0Y-i" import glob import os import time from datetime import datetime from uuid import uuid4 import cv2 import insightface import matplotlib.pyplot as plt import numpy as np # Import necessary libraries from flask import Flask, Response, render_template, request from insightface.app import FaceAnalysis from insightface.data import get_image as ins_get_image from numpy.lib.npyio import save from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier cwd = os.path.dirname(os.path.abspath(__file__)) print('Setting current working directory to:', cwd) os.chdir(cwd) app = Flask(__name__, template_folder='./templates') TRAIN_EMBEDDINGS_PATH = r'project\embeddings\train' NEW_USER_EMBEDDINGS_PATH = r'project\embeddings\new_users' TRAIN_IMAGES_PATH = r'project\images\train' TEST_IMAGES_PATH = r'project\images\test' NEW_USER_IMAGES_PATH = r'project\images\new_users' NUM_FRAMES_FOR_AUTHENTICATION = 20 MAX_NEW_USER_IMAGES = 5 FPS_MAX = 10 # not less than 5 H = 1280 W = 720 SCALE = 1 # not more than 3 def visualize_results(image, boxes, identities, scores): image = np.uint8(image) boxes = np.array(boxes, dtype=np.int32) for box, identity, score in zip(boxes, identities, scores): text = '{} | {:.2f}'.format(identity, score) text_orig = (box[0] + 5, box[1] - 6) image = cv2.putText( image, text, text_orig, cv2.FONT_HERSHEY_COMPLEX_SMALL, .45, [0, 0, 0], 4, lineType=cv2.LINE_AA) image = cv2.putText( image, text, text_orig, cv2.FONT_HERSHEY_COMPLEX_SMALL, .45, [255, 255, 255], 1, lineType=cv2.LINE_AA) image = cv2.rectangle( image, (box[0], box[1]), (box[2], box[3]), [255, 0, 0], 1) return image def imshow(image, figsize=(16, 9), mode=None): if mode == 'bgr': image = image[:, :, ::-1] plt.figure(figsize=figsize) plt.axis('off') plt.imshow(np.uint8(image)) plt.show() def get_randon_identifier(): return str(uuid4()) def read_image(path): return cv2.imread(path) def crop_image(image, box): x1, y1, x2, y2 = np.int32(box) cropped_image = image[y1:y2, x1:x2, :] return cropped_image def get_embeddings(image): boxes = [] embeddings = [] faces = detection.get(image) for face in faces: embeddings.append(recognition.get(image, face)) boxes.append(face['bbox']) embeddings = np.array(embeddings) boxes = np.array(boxes) return embeddings, boxes def run(knn, image, index_to_identity): embeddings, boxes = get_embeddings(image) probs = knn.predict_proba(embeddings) labels = np.argmax(probs, axis=-1) scores = np.max(probs, axis=-1) identities = [index_to_identity[label] for label in labels] for i, score in enumerate(scores): if score > 0.9: continue identities[i] = 'UNKNOWN' viz_image = visualize_results(image.copy(), boxes, identities, scores) return viz_image, identities, boxes def get_models(): detection = FaceAnalysis(allowed_modules=['detection']) recognition = insightface.model_zoo.get_model(r'project\webface_r50.onnx') detection.prepare(ctx_id=0, det_size=(640, 640)) recognition.prepare(ctx_id=0) return detection, recognition detection, recognition = get_models() def dump_train_embeddings(): train_images = glob.glob(os.path.join(TRAIN_IMAGES_PATH, '*')) for folder in train_images: for img_path in glob.glob(os.path.join(folder, '*')): img = cv2.imread(img_path) faces = detection.get(img) feature = recognition.get(img, faces[0]) person_name = os.path.basename(folder) image_name = os.path.basename(img_path).split('.')[0] output_path = os.path.join( TRAIN_EMBEDDINGS_PATH, person_name, image_name + '.npy') os.makedirs( os.path.join(TRAIN_EMBEDDINGS_PATH, person_name), exist_ok=True) np.save(output_path, feature) def dump_new_user_embedding(img, person_name, idx): faces = detection.get(img) feature = recognition.get(img, faces[0]) output_path = os.path.join( NEW_USER_EMBEDDINGS_PATH, person_name, str(idx) + '.npy') os.makedirs( os.path.join(NEW_USER_EMBEDDINGS_PATH, person_name), exist_ok=True) np.save(output_path, feature) def train(): train_embeddings = glob.glob(os.path.join(TRAIN_EMBEDDINGS_PATH, '*')) new_user_embeddings = glob.glob(os.path.join(NEW_USER_EMBEDDINGS_PATH, '*')) print('Found {} train identities'.format(len(train_embeddings))) print('Found {} new identities'.format(len(new_user_embeddings))) identities = [] features = [] for folder in train_embeddings + new_user_embeddings: person = os.path.basename(folder) folder = os.path.join(folder, '*') for path in glob.glob(folder): assert path.endswith('.npy') features.append(np.load(path)) identities.append(person) features = np.array(features) identities = np.array(identities) # labels = sorted(list(set(identities))) index_to_identity = {i: person for i, person in enumerate(labels)} # knn = KNeighborsClassifier(n_neighbors=3) print(features.shape, identities.shape) knn.fit(features, identities) return knn, labels, index_to_identity def video_loop(): global knn, labels, index_to_identity new_user = None num_frames_seen = {} authenticated = [] done = False while (authenticated == [] or not done): ret, image = video.read() image = np.float32(image) ts = time.time() timestamp = datetime.fromtimestamp(ts).strftime('%H:%M:%S %Y-%m-%d') viz_image, identities, boxes = run(knn, image, index_to_identity) max_area = 0 identity_idx = 0 for idx, box in enumerate(boxes): x1, y1, x2, y2 = box area = (x2 - x1) * (y2 - y1) if area > max_area: max_area = area identity_idx = idx if len(identities) > 1: print('Multiple faces detected, only one user can be authenticated') identity = identities[identity_idx] if identity in labels: if identity in num_frames_seen: num_frames_seen[identity] += 1 if num_frames_seen[identity] >= NUM_FRAMES_FOR_AUTHENTICATION: if identity not in authenticated: authenticated.append(identity) print('{} authenticated at {}'.format( identity, timestamp)) done = True else: num_frames_seen[identity] = 1 else: if new_user is None: new_user = input( 'Unknown user detected, please enter user name for new user: ') new_user += '_{}'.format(get_randon_identifier()) save_path = os.path.join(NEW_USER_EMBEDDINGS_PATH, new_user) print('Saving embeddings for {} in {}'.format(new_user, save_path)) if not os.path.exists(save_path): os.makedirs(save_path, exist_ok=True) num_user_images = len(os.listdir(save_path)) if num_user_images >= MAX_NEW_USER_IMAGES: knn, labels, index_to_identity = train() print('Successfull registered:', new_user) dump_new_user_embedding(image, new_user, num_user_images + 1) ret, buffer = cv2.imencode('.jpg', viz_image) viz_image = buffer.tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + viz_image + b'\r\n') # concat frame one by one and show result video.release() cv2.destroyAllWindows() @app.route('/') def index(): return render_template('index.html') @app.route('/video_feed') def video_feed(): return Response(video_loop(), mimetype='multipart/x-mixed-replace; boundary=frame') if __name__ == "__main__": if not os.path.exists(TRAIN_EMBEDDINGS_PATH): os.makedirs(TRAIN_EMBEDDINGS_PATH, exist_ok=True) if not os.path.exists(NEW_USER_EMBEDDINGS_PATH): os.makedirs(NEW_USER_EMBEDDINGS_PATH, exist_ok=True) train_identities = glob.glob(os.path.join(TRAIN_EMBEDDINGS_PATH, '*')) if not train_identities: print('Generating training embeddings') dump_train_embeddings() knn, labels, index_to_identity = train() video = cv2.VideoCapture(0 + cv2.CAP_DSHOW) video.set(cv2.CAP_PROP_FRAME_WIDTH, H // SCALE) video.set(cv2.CAP_PROP_FRAME_HEIGHT, W // SCALE) video.set(cv2.CAP_PROP_FPS, FPS_MAX) video.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) app.run(debug=False)
face_authentication.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: u4-s3-dnn # kernelspec: # display_name: U4-S3-DNN (Python 3.7) # language: python # name: u4-s3-dnn # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pragmatizt/DS-Unit-4-Sprint-3-Deep-Learning/blob/master/ira_Unit_4_Sprint_3_Challenge.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ne5SUADtA9K1" colab_type="text" # <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> # <br></br> # <br></br> # # # Major Neural Network Architectures Challenge # ## *Data Science Unit 4 Sprint 3 Challenge* # # In this sprint challenge, you'll explore some of the cutting edge of Data Science. This week we studied several famous neural network architectures: # recurrent neural networks (RNNs), long short-term memory (LSTMs), convolutional neural networks (CNNs), and Autoencoders. In this sprint challenge, you will revisit these models. Remember, we are testing your knowledge of these architectures not your ability to fit a model with high accuracy. # # __*Caution:*__ these approaches can be pretty heavy computationally. All problems were designed so that you should be able to achieve results within at most 5-10 minutes of runtime on SageMaker, Colab or a comparable environment. If something is running longer, doublecheck your approach! # # ## Challenge Objectives # *You should be able to:* # * <a href="#p1">Part 1</a>: Train a LSTM classification model # * <a href="#p2">Part 2</a>: Utilize a pre-trained CNN for objective detection # * <a href="#p3">Part 3</a>: Describe the components of an autoencoder # * <a href="#p4">Part 4</a>: Describe yourself as a Data Science and elucidate your vision of AI # + [markdown] colab_type="text" id="-5UwGRnJOmD4" # <a id="p1"></a> # ## Part 1 - RNNs # # Use an RNN/LSTM to fit a multi-class classification model on reuters news articles to distinguish topics of articles. The data is already encoded properly for use in an RNN model. # # Your Tasks: # - Use Keras to fit a predictive model, classifying news articles into topics. # - Report your overall score and accuracy # # For reference, the [Keras IMDB sentiment classification example](https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py) will be useful, as well the RNN code we used in class. # # __*Note:*__ Focus on getting a running model, not on maxing accuracy with extreme data size or epoch numbers. Only revisit and push accuracy if you get everything else done! # + colab_type="code" id="DS-9ksWjoJit" outputId="95ead12f-9d2a-459d-dda1-7e2ab89a5fff" colab={"base_uri": "https://localhost:8080/", "height": 63} from tensorflow.keras.datasets import reuters (X_train, y_train), (X_test, y_test) = reuters.load_data(num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=723812, start_char=1, oov_char=2, index_from=3) # + id="Bi2yBFp2B2aD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c3e6957f-8e6e-4109-dd49-5e884014e8f0" X_train.shape, y_train.shape, X_test.shape, y_test.shape # + colab_type="code" id="fLKqFh8DovaN" outputId="6462a4e5-6373-47aa-fa19-9d16e1898be2" colab={"base_uri": "https://localhost:8080/", "height": 70} # Demo of encoding word_index = reuters.get_word_index(path="reuters_word_index.json") print(f"Iran is encoded as {word_index['iran']} in the data") print(f"London is encoded as {word_index['london']} in the data") print("Words are encoded as numbers in our dataset.") # + colab_type="code" id="_QVSlFEAqWJM" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="43d5e202-3b31-45f2-89fe-ae82e39f1572" from tensorflow.keras.preprocessing import sequence from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, LSTM batch_size = 46 max_features = len(word_index.values()) maxlen = 200 print(len(X_train), 'train sequences') print(len(X_test), 'test sequences') print('Pad sequences (samples x time)') X_train = sequence.pad_sequences(X_train, maxlen=maxlen) X_test = sequence.pad_sequences(X_test, maxlen=maxlen) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) print('Build model...') model = Sequential() model.add(Embedding(max_features, 128)) model.add(LSTM(128, dropout=0.1, recurrent_dropout=0.1)) model.add(Dense(1, activation='sigmoid')) # + id="lR68S6v1A9Lq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="6fc9bfe0-f75b-4216-cca7-b326d078d633" # You should only run this cell once your model has been properly configured model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) print('Train...') model.fit(X_train, y_train, batch_size=batch_size, epochs=1, validation_data=(X_test, y_test)) score, acc = model.evaluate(X_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc) # + id="8GjTAvJmDj48" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="16178261-58f9-4122-cde2-2faa427ac155" # Reference for the Sequence Data Question below: X_train.shape, X_test.shape # + [markdown] id="BxApIf33A9L0" colab_type="text" # ## Sequence Data Question # #### *Describe the `pad_sequences` method used on the training dataset. What does it do? Why do you need it?* # # **Answer**: Pad sequences transforms a list into a 2D numpy array. As we can see in the first time we split the data, it was 1 dimensional. # # To transform it into a 2D array, we used the the pad_sequences() method. Also, the maxlen indicates the maximum length of each sequence. # # So as we transform these into 2D arrays, we ensure that they are the same shape by using the maxlen parameter. # # **References** (for my future self reviewing this sprint challenge): # - [Keras Documentation](https://keras.io/preprocessing/sequence/) # - [Stack Overflow](https://stackoverflow.com/questions/42943291/what-does-keras-io-preprocessing-sequence-pad-sequences-do) # # ## RNNs versus LSTMs # #### *What are the primary motivations behind using Long-ShortTerm Memory Cell unit over traditional Recurrent Neural Networks?* # # **Answer**: Simply put, LSTM's can remember information for long periods of time. # # In non-technical terms it can bring up "context" from the past to present & future information. # # LSTM's have the ability to add or remove information to the cell state by structures called **gates**. # # *"Gates are a way to optionally let information through. They are composed out of a sigmoid neural net layer and a pointwise multiplication operation."* - Blog post referenced below. # # ![alt text](https://i.stack.imgur.com/Iv3nU.png) # # **Reference Links**: # - [StackOverflow](https://i.stack.imgur.com/Iv3nU.png) # - [Blog Post on LSTMs](https://colah.github.io/posts/2015-08-Understanding-LSTMs/) # - [Link](https://arxiv.org/ftp/arxiv/papers/1604/1604.04573.pdf): Saw on my google searches; looked like an interesting article on multi-label image classification # # ## RNN / LSTM Use Cases # #### *Name and Describe 3 Use Cases of LSTMs or RNNs and why they are suited to that use case* # # **Answer**: # - Unsegmented, connected handwriting recognition # - Speech recognition # - Anomaly detection in network traffic # # They're best suited for the cases mentioned above because LSTM excels in classifying, processing, and making predictions on *time series* data. # # In each of the cases above, there could be an unspecified period of time between events. # # The fact that LSTM "remembers" makes it an excellent tool for these sort of problems. # # **Reference Link**: # - [Wikipedia](https://en.wikipedia.org/wiki/Long_short-term_memory) # # + [markdown] colab_type="text" id="yz0LCZd_O4IG" # <a id="p2"></a> # ## Part 2- CNNs # # ### Find the Frog # # Time to play "find the frog!" Use Keras and ResNet50 (pre-trained) to detect which of the following images contain frogs: # # <img align="left" src="https://d3i6fh83elv35t.cloudfront.net/newshour/app/uploads/2017/03/GettyImages-654745934-1024x687.jpg" width=400> # # + colab_type="code" id="whIqEWR236Af" outputId="4c22cc22-84f7-44fd-9f52-688fe5637bd5" colab={"base_uri": "https://localhost:8080/", "height": 70} # !pip install google_images_download # + colab_type="code" id="EKnnnM8k38sN" outputId="c4eee7c6-5000-4da3-c40f-3c4ef9178eaa" colab={"base_uri": "https://localhost:8080/", "height": 351} from google_images_download import google_images_download response = google_images_download.googleimagesdownload() arguments = {"keywords": "lilly frog pond", "limit": 5, "print_urls": True} absolute_image_paths = response.download(arguments) # One error below. Looks like the fifth image is returning a 404 error. # + [markdown] colab_type="text" id="si5YfNqS50QU" # At time of writing at least a few do, but since the Internet changes - it is possible your 5 won't. You can easily verify yourself, and (once you have working code) increase the number of images you pull to be more sure of getting a frog. Your goal is to validly run ResNet50 on the input images - don't worry about tuning or improving the model. # # *Hint* - ResNet 50 doesn't just return "frog". The three labels it has for frogs are: `bullfrog, tree frog, tailed frog` # # *Stretch goals* # - Check for fish or other labels # - Create a matplotlib visualizations of the images and your prediction as the visualization label # + id="aoDod5tkaLgk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 388} outputId="4f765abf-4155-469a-b0e8-3f6e26513188" import numpy as np from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions def process_img_path(img_path): return image.load_img(img_path, target_size=(224, 224)) def img_contains_frog(img): x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) model = ResNet50(weights='imagenet') features = model.predict(x) frog_results = decode_predictions(features, top=3)[0] print(frog_results) frog_results.append for entry in frog_results: if 'frog'in entry[1]: return True # Else: return False def img_contains_fish(img): x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) model = ResNet50(weights='imagenet') features = model.predict(x) fish_results = decode_predictions(features, top=3)[0] fish_results.append(decode_predictions(features, top=10)[0]) print(fish_results) fish_results.append for entry in fish_results: if 'fish'in entry[1]: return True #Else: return False # Frogs for x in absolute_image_paths[0]['lilly frog pond']: x = process_img_path(x) print(img_contains_frog(x)) # Fish for x in absolute_image_paths[0]['lilly frog pond']: x = process_img_path(x) print(img_contains_fish(x)) # + [markdown] id="oErE_CirA9Mk" colab_type="text" # #### Stretch Goal: Displaying Predictions # + id="XVDO-mn1ffBh" colab_type="code" colab={} ## Couldn't get code to work. Good "code challenge" for myself once Winter Break starts. # + [markdown] colab_type="text" id="XEuhvSu7O5Rf" # <a id="p3"></a> # ## Part 3 - Autoencoders # # Describe a use case for an autoencoder given that an autoencoder tries to predict its own input. # # *Answer:* Given that it tries to predict its own input, one novel way of using autoencoders is image denoising. Oftentimes images contain noise in the data -- autoencoders can get rid of that noise! # # - [Medium](https://medium.com/datadriveninvestor/deep-learning-autoencoders-db265359943e), a decent blog post overview on autoencoders. # - [Kaggle](https://www.kaggle.com/shivamb/how-autoencoders-work-intro-and-usecases), I love this Kaggle post on AutoEncoders. Putting this on my list for winter reading. # + [markdown] colab_type="text" id="626zYgjkO7Vq" # <a id="p4"></a> # ## Part 4 - More... # + [markdown] colab_type="text" id="__lDWfcUO8oo" # Answer the following questions, with a target audience of a fellow Data Scientist: # # - **What do you consider your strongest area, as a Data Scientist?** # # **Answer**: I would actually consider my non-technical experience as something that will help me in the long term. # # I have a background in sales, operations, and entrepreneurship. So I'm comfortable with *storytelling* (selling a product or service), I understand business (which will help me share the data with different stakeholders: whether C-suite, finance, marketing, or customers, etc.), and I have an undergrad background in Economics (a general understanding of data and visualizations which can help me tell the story). # # - **What area of Data Science would you most like to learn more about, and why?** # # **Answer**: You know, I was more inspired by the data anlytics and visualizations part of Data Science, but since starting Unit 4, and learning about all the cool things that we can do with images, text, ... anything(!), I kind of want to spend some time looking into this deeper. # # But I would be happy if my starting job in this field is as a Data Analyst or as a Business Intelligence analyst (plays on the strengths I mentioned above) # # - **Where do you think Data Science will be in 5 years?** # # **Answer**: Able to process more data (5G, stronger hardware), maybe one or two groundbreaking algorithms, more ubiquitous, more unintimidating to the general population. # # Fully integrated with industries like energy, agriculture, finance, tech (of course), practically every industry will see the value in Data Science. # # - **What are the threats posed by AI to our society?** # # **Answer**: The social and economic changes are the biggest and most obvious ones. There will be a massive job displacement for people all over the world. Like every technological revolution in the past: agricultural, industrial, digital. # # - **How do you think we can counteract those threats? ** # # **Answer**: We need to have our brightest minds look into how to best "catch" these massive amounts of people that will see their jobs become obsolete due to A.I. # # To have support structures that will allow them to reskill. Whether that's financial support during the time that they're reskilling, as well as the educational support that allows them to get the best education while reskilling. # # (Lambda is doing a great job as a solution to this, which is already happening) # # - **Do you think achieving General Artifical Intelligence is ever possible?** # # **Answer**: Yes, I do. As hardware, algorithms, and internet speeds (meaning improved pipelines) improves, I think it's only a matter of time. # # A few sentences per answer is fine - only elaborate if time allows. # + [markdown] colab_type="text" id="_Hoqe3mM_Mtc" # ## Congratulations! # # Thank you for your hard work, and congratulations! You've learned a lot, and you should proudly call yourself a Data Scientist. # # + id="qoN2ZF2eA9NB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="2c6b4ea5-459f-44ec-a881-56350659c389" from IPython.display import HTML HTML("""<iframe src="https://giphy.com/embed/26xivLqkv86uJzqWk" width="480" height="270" frameBorder="0" class="giphy-embed" allowFullScreen></iframe><p><a href="https://giphy.com/gifs/mumm-champagne-saber-26xivLqkv86uJzqWk">via GIPHY</a></p>""") # + id="NUsou3rtadp5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e0e0c0c3-71b9-4ade-da23-71cc78276b81" print("Woohoo! We did it. Survived four units of Lambda School. Onto labs!")
ira_Unit_4_Sprint_3_Challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Full-Waveform Inversion (FWI) # # This notebook is the third in a series of tutorial highlighting various aspects of seismic inversion based on Devito operators. In this second example we aim to highlight the core ideas behind seismic inversion, where we create an image of the subsurface from field recorded data. This tutorial follows on the modelling tutorial and will reuse the modelling and velocity model. # # ## Inversion requirement # # Seismic inversion relies on two known parameters: # # - **Field data** - or also called **recorded data**. This is a shot record corresponding to the true velocity model. In practice this data is acquired as described in the first tutorial. In order to simplify this tutorial we will fake field data by modelling it with the true velocity model. # # - **Initial velocity model**. This is a velocity model that has been obtained by processing the field data. This model is a rough and very smooth estimate of the velocity as an initial estimate for the inversion. This is a necessary requirement for any optimization (method). # # ## Inversion computational setup # # In this tutorial, we will introduce the gradient operator. This operator corresponds to the imaging condition introduced in the previous tutorial with some minor modifications that are defined by the objective function (also referred to in the tutorial series as the *functional*, *f*) and its gradient, *g*. We will define this two terms in the tutorial too. # # ## Notes on the operators # # As we already describe the creation of a forward modelling operator, we will only call an wrapped function here. This wrappers already contains all the necessary operator for seismic modeling, imaging and inversion, however any new operator will be fully described and only used from the wrapper in the next tutorials. # + import numpy as np # %matplotlib inline from devito import configuration configuration['log_level'] = 'WARNING' # - # ## Computational considerations # # As we will see in this tutorial, FWI is again very computationally demanding, even more so than RTM. To keep this tutorial as light-wight as possible we therefore again use a very small demonstration model. We also define here a few parameters for the final example runs that can be changed to modify the overall runtime of the tutorial. nshots = 9 # Number of shots to create gradient from nreceivers = 101 # Number of receiver locations per shot fwi_iterations = 8 # Number of outer FWI iterations # # True and smooth velocity models # # As before, we will again use a very simple model domain, consisting of a circle within a 2D domain. We will again use the "true" model to generate our synthetic shot data and use a "smooth" model as our initial guess. In this case the smooth model is very smooth indeed - it is simply a constant background velocity without any features. # + #NBVAL_IGNORE_OUTPUT from examples.seismic import demo_model, plot_velocity, plot_perturbation # Define true and initial model shape = (101, 101) # Number of grid point (nx, nz) spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km origin = (0., 0.) # Need origin to define relative source and receiver locations model = demo_model('circle-isotropic', vp=3.0, vp_background=2.5, origin=origin, shape=shape, spacing=spacing, nbpml=40) model0 = demo_model('circle-isotropic', vp=2.5, vp_background=2.5, origin=origin, shape=shape, spacing=spacing, nbpml=40) plot_velocity(model) plot_velocity(model0) plot_perturbation(model0, model) # - # ## Acquisition geometry # # In this tutorial, we will use the easiest case for inversion, namely a transmission experiment. The sources are located on one side of the model and the receivers on the other side. This allow to record most of the information necessary for inversion, as reflections usually lead to poor inversion results. # + #NBVAL_IGNORE_OUTPUT # Define acquisition geometry: source from examples.seismic import RickerSource, Receiver # Define time discretization according to grid spacing t0 = 0. tn = 1000. # Simulation lasts 1 second (1000 ms) dt = model.critical_dt # Time step from model grid spacing nt = int(1 + (tn-t0) / dt) # Discrete time axis length time = np.linspace(t0, tn, nt) # Discrete modelling time f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz) src = RickerSource(name='src', grid=model.grid, f0=f0, time=np.linspace(t0, tn, nt)) src.coordinates.data[0, :] = np.array(model.domain_size) * .5 src.coordinates.data[0, 0] = 20. # 20m from the left end # We can plot the time signature to see the wavelet src.show() # + #NBVAL_IGNORE_OUTPUT # Define acquisition geometry: receivers # Initialize receivers for synthetic data rec = Receiver(name='rec', grid=model.grid, npoint=nreceivers, ntime=nt) rec.coordinates.data[:, 1] = np.linspace(0, model.domain_size[0], num=nreceivers) rec.coordinates.data[:, 0] = 980. # 20m from the right end # Plot acquisition geometry plot_velocity(model, source=src.coordinates.data, receiver=rec.coordinates.data[::4, :]) # - # ## True and smooth data # # We can generate shot records for the true and smoothed initial velocity models, since the difference between them will again form the basis of our imaging procedure. # + # Compute synthetic data with forward operator from examples.seismic.acoustic import AcousticWaveSolver solver = AcousticWaveSolver(model, src, rec, space_order=4) true_d, _, _ = solver.forward(src=src, m=model.m) # - # Compute initial data with forward operator smooth_d, _, _ = solver.forward(src=src, m=model0.m) # + #NBVAL_IGNORE_OUTPUT from examples.seismic import plot_shotrecord # Plot shot record for true and smooth velocity model and the difference plot_shotrecord(true_d.data, model, t0, tn) plot_shotrecord(smooth_d.data, model, t0, tn) plot_shotrecord(smooth_d.data - true_d.data, model, t0, tn) # - # # Full-Waveform Inversion # # # ## Formulation # # Full-waveform inversion (FWI) aims to invert an accurate model of the discrete wave velocity, $\mathbf{c}$, or equivalently the square slowness of the wave, $\mathbf{m} = \frac{1}{\mathbf{c}^2}$, from a given set of measurements of the pressure wavefield $\mathbf{u}$. This can be expressed as the following optimization problem [1, 2]: # # \begin{aligned} # \mathop{\hbox{minimize}}_{\mathbf{m}} \Phi_s(\mathbf{m})&=\frac{1}{2}\left\lVert\mathbf{P}_r # \mathbf{u} - \mathbf{d}\right\rVert_2^2 \\ # \mathbf{u} &= \mathbf{A}(\mathbf{m})^{-1} \mathbf{P}_s^T \mathbf{q}_s, # \end{aligned} # # where $\mathbf{P}_r$ is the sampling operator at the receiver locations, $\mathbf{P}_s^T$ is the injection operator at the source locations, $\mathbf{A}(\mathbf{m})$ is the operator representing the discretized wave equation matrix, $\mathbf{u}$ is the discrete synthetic pressure wavefield, $\mathbf{q}_s$ is the corresponding pressure source and $\mathbf{d}$ is the measured data. It is worth noting that $\mathbf{m}$ is the unknown in this formulation and that multiple implementations of the wave equation operator $\mathbf{A}(\mathbf{m})$ are possible. # # We have already defined a concrete solver scheme for $\mathbf{A}(\mathbf{m})$ in the first tutorial, including appropriate implementations of the sampling operator $\mathbf{P}_r$ and source term $\mathbf{q}_s$. # # To solve this optimization problem using a gradient-based method, we use the # adjoint-state method to evaluate the gradient $\nabla\Phi_s(\mathbf{m})$: # # \begin{align} # \nabla\Phi_s(\mathbf{m})=\sum_{\mathbf{t} =1}^{n_t}\mathbf{u}[\mathbf{t}] \mathbf{v}_{tt}[\mathbf{t}] =\mathbf{J}^T\delta\mathbf{d}_s, # \end{align} # # where $n_t$ is the number of computational time steps, $\delta\mathbf{d}_s = \left(\mathbf{P}_r \mathbf{u} - \mathbf{d} \right)$ is the data residual (difference between the measured data and the modelled data), $\mathbf{J}$ is the Jacobian operator and $\mathbf{v}_{tt}$ is the second-order time derivative of the adjoint wavefield solving: # # \begin{align} # \mathbf{A}^T(\mathbf{m}) \mathbf{v} = \mathbf{P}_r^T \delta\mathbf{d}. # \end{align} # # We see that the gradient of the FWI function is the previously defined imaging condition with an extra second-order time derivative. We will therefore reuse the operators defined previously inside a Devito wrapper. # ## FWI gradient operator # # To compute a single gradient $\nabla\Phi_s(\mathbf{m})$ in our optimization workflow we again use `solver.forward` to compute the entire forward wavefield $\mathbf{u}$ and a similar pre-defined gradient operator to compute the adjoint wavefield `v`. The gradient operator provided by our `solver` utility also computes the correlation between the wavefields, allowing us to encode a similar procedure to the previous imaging tutorial as our gradient calculation: # # - Simulate the forward wavefield with the background velocity model to get the synthetic data and save the full wavefield $\mathbf{u}$ # - Compute the data residual # - Back-propagate the data residual and compute on the fly the gradient contribution at each time step. # # This procedure is applied to multiple source positions and summed to obtain a gradient image of the subsurface. We again prepare the source locations for each shot and visualize them, before defining a single gradient computation over a number of shots as a single function. # + #NBVAL_IGNORE_OUTPUT # Prepare the varying source locations sources source_locations = np.empty((nshots, 2), dtype=np.float32) source_locations[:, 0] = 30. source_locations[:, 1] = np.linspace(0., 1000, num=nshots) plot_velocity(model, source=source_locations) # + # Create FWI gradient kernel from devito import Function, clear_cache def fwi_gradient(m_in): # Important: We force previous wavefields to be destroyed, # so that we may reuse the memory. clear_cache() # Create symbols to hold the gradient and residual grad = Function(name="grad", grid=model.grid) residual = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec.coordinates.data) objective = 0. for i in range(nshots): # Update source location src.coordinates.data[0, :] = source_locations[i, :] # Generate synthetic data from true model true_d, _, _ = solver.forward(src=src, m=model.m) # Compute smooth data and full forward wavefield u0 smooth_d, u0, _ = solver.forward(src=src, m=m_in, save=True) # Compute gradient from data residual and update objective function residual.data[:] = smooth_d.data[:] - true_d.data[:] objective += .5*np.linalg.norm(residual.data.reshape(-1))**2 solver.gradient(rec=residual, u=u0, m=m_in, grad=grad) return objective, grad.data # - # Having defined our FWI gradient procedure we can compute the initial iteration from our starting model. This allows us to visualize the gradient alongside the model perturbation and the effect of the gradient update on the model. # + #NBVAL_IGNORE_OUTPUT # Compute gradient of initial model ff, update = fwi_gradient(model0.m) print('Objective value is %f ' % ff) # + #NBVAL_IGNORE_OUTPUT from examples.seismic import plot_image # Plot the FWI gradient plot_image(update, vmin=-1e4, vmax=1e4, cmap="jet") # Plot the difference between the true and initial model. # This is not known in practice as only the initial model is provided. plot_image(model0.m.data - model.m.data, vmin=-1e-1, vmax=1e-1, cmap="jet") # Show what the update does to the model alpha = .05 / np.max(update) plot_image(model0.m.data - alpha*update, vmin=.1, vmax=.2, cmap="jet") # - # We see that the gradient and the true perturbation have the same sign, therefore, with an appropriate scaling factor, we will update the model in the correct direction. # Define bounding box constraints on the solution. def apply_box_constraint(m): # Maximum possible 'realistic' velocity is 3.5 km/sec # Minimum possible 'realistic' velocity is 2 km/sec return np.clip(m, 1/3.5**2, 1/2**2) # + #NBVAL_SKIP # Run FWI with gradient descent history = np.zeros((fwi_iterations, 1)) for i in range(0, fwi_iterations): # Compute the functional value and gradient for the current # model estimate phi, direction = fwi_gradient(model0.m) # Store the history of the functional values history[i] = phi # Artificial Step length for gradient descent # In practice this would be replaced by a Linesearch (Wolfe, ...) # that would guarantee functional decrease Phi(m-alpha g) <= epsilon Phi(m) # where epsilon is a minimum decrease constant alpha = .005 / np.max(direction) # Update the model estimate and inforce minimum/maximum values model0.m.data[:] = apply_box_constraint(model0.m.data - alpha * direction) # Log the progress made print('Objective value is %f at iteration %d' % (phi, i+1)) # + #NBVAL_IGNORE_OUTPUT # First, update velocity from computed square slowness nbpml = model.nbpml model0.vp = np.sqrt(1. / model0.m.data[nbpml:-nbpml, nbpml:-nbpml]) # Plot inverted velocity model plot_velocity(model0) # + #NBVAL_SKIP import matplotlib.pyplot as plt # Plot objective function decrease plt.figure() plt.loglog(history) plt.xlabel('Iteration number') plt.ylabel('Misift value Phi') plt.title('Convergence') plt.show() # - # ## References # # [1] _<NAME>. and <NAME>.: An overview of full-waveform inversion in exploration geophysics, GEOPHYSICS, 74, WCC1–WCC26, doi:10.1190/1.3238367, http://library.seg.org/doi/abs/10.1190/1.3238367, 2009._ # # [2] _<NAME>., <NAME>., and <NAME>.: An effective method for parameter estimation with PDE constraints with multiple right hand sides, SIAM Journal on Optimization, 22, http://dx.doi.org/10.1137/11081126X, 2012._ # <sup>This notebook is part of the tutorial "Optimised Symbolic Finite Difference Computation with Devito" presented at the Intel® HPC Developer Conference 2017.</sup>
02a_fwi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rEyGlm50XE07" # # || The Sparks Foundation || (GRIPJUNE21) # + [markdown] id="vGUjlLo6XEtO" # # Author: <NAME> # ## (Data Science and Business Analytics Intern) # + [markdown] id="PbDLeVHCXEiK" # ## Task-3: # ## Exploratory Data Analysis - Retail # ### As a business manager, try to find out the weak areas where you can work to make more profit. # + [markdown] id="FPgXutEQXL68" # - Importing Dependencies. # + id="TVzhyBG3OBIc" import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # + [markdown] id="cGEkNptyXjNe" # # Read csv File. # + colab={"base_uri": "https://localhost:8080/", "height": 306} id="Cob7pStQOW9z" outputId="f8a859e4-405a-4ecc-fc44-32e2a34ac4f4" file = "/content/drive/MyDrive/Sparks/SampleSuperstore - SampleSuperstore.csv" df = pd.read_csv(file) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 354} id="ZjYj92lM8b09" outputId="30cdf6c1-30b0-49a6-9e57-aa1fa6be5463" df.tail(6) # + colab={"base_uri": "https://localhost:8080/"} id="s8XtDIvnQDUZ" outputId="2d5f00aa-af25-43c2-8337-03f61f0201bc" df.info() # + colab={"base_uri": "https://localhost:8080/"} id="-kskSmxbQLCE" outputId="b5c69e9b-895d-43c6-f26f-5cf4e88873ca" df.shape # + colab={"base_uri": "https://localhost:8080/"} id="J4BZ8s99QOmp" outputId="881e6dd9-fdd5-48aa-a7b9-b7d1f83c97b9" df.columns # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Wv6taqZQQVkP" outputId="5a5f4522-7a9e-4c24-9aca-e89bd740d65b" df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="gIExpOJM-egr" outputId="9e6ad4b7-a177-40b3-bdb6-dceb65f1ab23" df.nunique() # + colab={"base_uri": "https://localhost:8080/"} id="Pj3U2XGc--TC" outputId="1f3509d5-addc-46a7-9e2b-c5379ed8fe80" df['Ship Mode'].unique() # + colab={"base_uri": "https://localhost:8080/"} id="vZyvfd0H_Glz" outputId="d2ec155e-3b70-487f-fe28-989796b0287c" df['Region'].unique() # + [markdown] id="vdvuc6ihXzPf" # # Data Preprocessing/ Cleaning # # + colab={"base_uri": "https://localhost:8080/"} id="sus05vw9AUOP" outputId="a812cb73-16ca-42f9-f100-42b482b3d584" # If any null values then we can put mean of that column at that place df.isnull().sum() # + [markdown] id="z3SvZyMpbi8J" # ## Make new dataframe by dropping the column which aren't useful # + id="NEZ0FSV5Am28" business = df.drop(['Country', 'Postal Code'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="DdhH1ThxBJEr" outputId="bed051a5-ce5f-4c48-f267-367ae70c41a2" business.head() # + [markdown] id="OnmA_68TbnHl" # # Relationship Analysis # + id="Egsn0lK5Hdn7" # Corelation Matrix corelation = business.corr() # + [markdown] id="HuRdtQwSbt61" # ## Heatmap # + colab={"base_uri": "https://localhost:8080/", "height": 368} id="uf-GrsUdH-jT" outputId="4c3d4918-7789-4693-a860-03041aac41b4" sns.heatmap(corelation, xticklabels=corelation.columns, yticklabels=corelation.columns, annot=True) # + [markdown] id="1T0oZo5EbxVe" # ## Pairplot # + colab={"base_uri": "https://localhost:8080/", "height": 744} id="Gi4nhYLDIkmm" outputId="b1c98cb4-1f75-4a5e-8e59-b65c8c67d92b" sns.pairplot(business) # pairwise relationship in whole dataset # + [markdown] id="mugFBsfyb0bd" # ## Relational Scatter Plot # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="2PvMhUGYJYSz" outputId="518f5fa2-2f07-4487-e52a-6775b3b6f16a" sns.relplot(x='Sales', y='Profit', hue='Ship Mode', data=business) # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="JD6Locv8KBG1" outputId="5253b729-806b-4cdf-ce47-c6d7a9d1477d" sns.relplot(x='Sales', y='Profit', hue='Category', data=business) # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="skYK8_RsGhVS" outputId="f88da6b9-1d3e-4191-ec40-4db26c0368dd" sns.relplot(x='Sales', y='Discount', data=business) # + [markdown] id="bMbhKy3gb_do" # ## Distribution Plot # + colab={"base_uri": "https://localhost:8080/", "height": 434} id="EGpNaMWP-mdz" outputId="ab0bee7d-673c-40fb-8c83-73f01ea3ff60" sns.distplot(business['Profit']) # + [markdown] id="zIQ8hKKxcFGN" # ## Categorical Plot # + colab={"base_uri": "https://localhost:8080/", "height": 401} id="LYyBJunX7GtA" outputId="6a0b40cc-320a-42ad-faee-5b290420a68e" sns.catplot(x="Profit", kind='box', data=business) # + [markdown] id="8ZQtDyV0cKLU" # ## Bar plot # + colab={"base_uri": "https://localhost:8080/", "height": 491} id="NPLzHumv7ZU5" outputId="6a5e21c1-bf7d-48d6-fcd1-19a963831675" plt.figure(figsize= (10,16)) business.groupby('Category')['Profit','Sales'].agg(['sum']).plot.bar() plt.ylabel('Profit') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 456} id="x5bN7g2FKKrI" outputId="ec755f73-a384-4466-9a27-ccf577ff5124" plt.figure(figsize= (10,16)) business.groupby('Region')['Profit','Region'].agg(['sum']).plot.bar() plt.ylabel('Profit') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 775} id="89mbEVx2HtPz" outputId="ecfbc8eb-826c-42c3-c32a-8b9990446cac" # For Sub-categories plt.figure(figsize=(14,12)) statewise = business.groupby(['Sub-Category'])['Profit'].sum().nlargest(50) statewise.plot.bar() # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="t5DMfFItGCEk" outputId="9c519b0a-0263-4c21-c77b-89197408a690" # computing top states in terms of sales from first 10 observations top_states_s = business.groupby("State").Sales.sum().nlargest(n=10) # computing top states in terms of profit from first 10 observations top_states_p = business.groupby("State").Profit.sum().nlargest(n = 10) plt.style.use('seaborn') top_states_s.plot(kind = 'bar',figsize = (10,5),fontsize = 14) top_states_p.plot(kind = 'bar',figsize = (10,5),fontsize = 14, color = 'red') plt.xlabel('States',fontsize = 15) plt.ylabel('Total sales',fontsize = 15) plt.title("Top 10 states Sales vs Profit",fontsize = 15) plt.show() # + [markdown] id="oqXMK_ESGvaj" # # Conclusions:- # ### - Even after giving discounts we aren't getting more sales(actually it is taking down our sales then usual), so instead of focusing on discount just focus on expanding in some of the states like New york and Califonia. # ### - Try selling more Technology even if we need to give up on furniture(less profit) as technology gives more profit. # ### - Sales of tables, bookcases are needed to be stoped as it has became the source of loss. # ### - Increase the sales of technology whose price is more than 5000. # ### - Increase sales in East, then take over the market of first west then south and then central.
Task3__Sparks_by_Shubh_Dholakiya.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fundamentals of Python Variables And Numpy # ## <NAME> # ## Setting up the environment import numpy as np # This is the main numerical library we will use import matplotlib.pyplot as plt # This is the main plotting library we will use import scipy.io.wavfile as wavfile # We will use this library to load in audio import IPython.display as ipd # This is a library that allows us to play audio samples in Jupyter # ## Basic Arithmetic / Variable Naming # Go through +, *, /, **, % 1+1 9*8 10/4 2**2 + 2 20 % 6 # Remainder a = 5 # This sets the variable a to be 5 b = 2 b**2 a = a + 1 # This is weird syntax, but it means set a to be the previous value of a plus 1 a**a matt = 0 1 / matt 1matt = 0 # Cannot start a variable name with a number christralie = 30 print(ChrisTralie) # Case sensitive!! # ## Numpy Arrays / Plotting x = [1, 9, -10, 2] x[4] x[0] # Lists are zero-indexed x[2] x[-1] x[-2] x = [i**2 for i in range(1000)] x[-1] len(x) # This gets the length of the list # Slicing mylist = [12, 9, -2, 8, 17, 13, 24] firstfour = mylist[0:4] print(firstfour) print(mylist[2:5]) print(mylist[0:5:2]) mylist[0:5:2] = 0 # This is a limitation of lists mylist[0:5:2] = [0, 0, 0] print(mylist) mylist[0] = 64 print(mylist) list1 = [0, 1, 2, 3, 4, 5, 6, 7] list2 = [2, 4, 6, 8, 10, 12, 14, 16] list1[0:7:2] = list2[1:8:2] print(list1) print(list1[0:7:2]) # We can leave out the last element print(list1[0: :3]) # Take every third element starting at element 0 print(list1[2: : ]) # Take every element from 2 to the end print(list1[::-1]) x = [1, 2, 3, 4, 5] print(x[5::]) y = [3, 8, 9] x[5::] = y print(x) # The + by default with lists puts one list at the end of the other a = [1, 2, 3] b = [2, 3, 4] print(a + b) # + # arange, zeros, ones, plot/stem # + # np.arange? # - x = np.arange(10) + 1 # Element-wise add # This is *much* faster than adding to individual elements #x[0] = x[0] + 1 #x[1] = x[1] + 1 print(x) x = x*x # Element-wise multiplication print(x) x = x + x print(x) plt.plot(x) plt.stem(x) x = np.arange(100) plt.plot(np.mod(x, 7)) # "Modulus operator": Returns remainder after division # Class exercise: Make the array [0, 1, 0, 3, 0, 5, 0, 7, 0] using arange and slicing x = np.arange(8) print(x) x = x*2 print(x) x = x + 1 print(x) x = np.mod(x, 8) print(x) # + ## Andrew/Kat x = np.arange(17) print(x) y = np.mod(x, 8) print(y) y = y[1::2] print(y) # Class exercise: Create an array with the elements [1, 3, 5, 7, 1, 3, 5, 7] # using np.arange and np.mod # - ## You can really cheat with regular lists x = [1, 3, 5, 7] x = x + x print(x)
Week1_Fundamentals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('../') from picket.prepare.dataPrepare import dataPrepareTrain from picket.filter.filtersTrain import evaluateTrainTime, printResTwoStream # - # dataset includes 'wine', 'adult', 'restaurant', 'marketing', 'titanic', 'HTRU2' dataset = 'marketing' # 'random', 'system' for all datasets, 'poison' for purely numerical datasets noise_type = 'random' # Can be 'lr', 'svm', 'nn' if noise_type is 'poison' downstream_model = None # ## Train-test Split and Noise Injection dataPrepareTrain(dataset) # ## Outlier Detection at Training Time evaluateTrainTime(dataset, ntype=noise_type, modelname=downstream_model, ghmodeTest=True) printResTwoStream(dataset, ntype=noise_type, modelname=downstream_model)
notebooks/Experiments-MicroBenchmark-TwoStream.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # %load_ext autoreload # # %autoreload 2 import sys sys.path.append("../..") from heritageconnector import datastore from heritageconnector.config import field_mapping from heritageconnector.best_spacy_pipeline import load_model from heritageconnector.datastore import es, index from smg_jobs.smg_loader import preprocess_text_for_ner import numpy as np from elasticsearch import helpers, Elasticsearch from itertools import islice from typing import Optional, Generator, List, Tuple import pandas as pd pd.options.display.max_rows = None pd.options.display.max_colwidth = None # - nlp = load_model("en_core_web_trf") # + category_mapping = { "Textiles": [ "Textile Industry", "Textiles Machinery" ], 'Energy': [ 'Coal Mining', 'Electricity Supply', 'Environmental Science & Technology', 'Gas Industry', "Heat Engines (non steam)", "Heating, Cooling and Ventilation", "<NAME> Garret Workshop", "Lighting", "Mining & Ore Dressing", "Motive Power", "Nuclear Physics", "Stationary Engines" ], "Communications": [ "Cinematograph", "Electronic Component", "Pictorial Collection (Railway", "Printing & Writing", "Radio Communication", "Sound Reproduction", "Television", ], "General": [ "Archive Collections", "Art", "Local History", "Photographs" ] } category_mapping_reversed = {i: k for k,v in category_mapping.items() for i in v } categories = [] for k, v in category_mapping.items(): categories += v len(categories) # + # the below class inherits from datastore.NERLoader but changes _get_doc_generator so that it only gets objects from within # the categories listed above class NERLoader(datastore.NERLoader): def __init__(self, **kwargs): super().__init__(**kwargs) def _get_doc_generator( self, index: str, limit: Optional[int] = None, random_sample: bool = True, random_seed: int = 42, ) -> Generator[List[Tuple[str, str]], None, None]: """ Returns a generator of document IDs and descriptions from the Elasticsearch index, batched according to `self.batch_size` and limited according to `limit`. Only documents with an XSD.description value are returned. Args: limit (Optional[int], optional): limit the number of documents to get and therefore load. Defaults to None. random_sample (bool, optional): whether to take documents at random. Defaults to True. random_seed (int, optional): random seed to use if random sampling is enabled using the `random_sample` parameter. Defaults to 42. Returns: Generator[List[Tuple[str, str]]]: generator of lists with length `self.batch_size`, where each list contains `(uri, description)` tuples. """ es_query = { "query": { "function_score": { "query": { "bool": { "must": [ { "exists": { "field": "data.http://www.w3.org/2001/XMLSchema#description" } }, {"terms": {"graph.@sdo:[email protected]": categories}} ] } }, "random_score": {"seed": random_seed, "field": "_seq_no"}, } } } doc_generator = helpers.scan( client=es, index=index, query=es_query, preserve_order=True, ) if limit: doc_generator = islice(doc_generator, limit) doc_generator = ( ( doc["_id"], self.text_preprocess_func( doc["_source"]["data"][ "http://www.w3.org/2001/XMLSchema#description" ] ), ) for doc in doc_generator ) return doc_generator # + source_description_field = target_description_field = "data.http://www.w3.org/2001/XMLSchema#description" target_title_field = "graph.@rdfs:label.@value" target_alias_field = "graph.@skos:altLabel.@value" target_type_field = "graph.@skos:hasTopConcept.@value" record_loader = datastore.RecordLoader("SMG", field_mapping) ner_loader = NERLoader( record_loader = record_loader, source_es_index = 'heritageconnector', target_es_index = 'heritageconnector', source_description_field = source_description_field, target_title_field = target_title_field, target_description_field = target_description_field, target_type_field = target_type_field, target_alias_field = target_alias_field, entity_types_to_link={"PERSON", "OBJECT", "ORG"}, text_preprocess_func=preprocess_text_for_ner ) # - ner_loader.get_list_of_entities_from_es("en_core_web_trf", 5000) _ = ner_loader.get_link_candidates(10) # + def train_entity_linker(): df = pd.read_excel("../../GITIGNORE_DATA/NEL/review_data_1103.xlsx", index_col=0) df.loc[~df['link_correct'].isnull(), 'link_correct'] = df.loc[~df['link_correct'].isnull(), 'link_correct'].apply(int) df_annotated = df[(~df['link_correct'].isnull()) & (df['candidate_rank'] != -1)] return ner_loader.train_entity_linker(df_annotated) clf = train_entity_linker() # - # get entities and split by those with candidates and those without ent_df = ner_loader.entity_list_as_dataframe ents_with_candidates, ents_without_candidates = ent_df[~ent_df['candidate_rank'].isna()], ent_df[ent_df['candidate_rank'].isna()] # predict whether each link candidate is an actual link for the entity y_pred = clf.predict_proba(ents_with_candidates)[:,1] ents_with_candidates["y_pred"] = y_pred # concatenate dataframes ents_df_with_candidates_and_preds = pd.concat([ents_with_candidates, ents_without_candidates]) # + # add spaCy docs to the dataframe so these can just be retrieved instead of created in the demo desc_unique, desc_unique_indices = np.unique(ents_df_with_candidates_and_preds.item_description.values, return_inverse=True) docs_unique = np.array(list(nlp.pipe(desc_unique)), dtype=object) # spacy docs in dataframe approach # ents_df_with_candidates_and_preds['item_description_spacy_doc'] = docs_unique[desc_unique_indices] # DocBin approach # from spacy.tokens import DocBin # doc_bin = DocBin(attrs=["ENT_IOB", "ENT_TYPE"], store_user_data=True) # for doc in docs_unique[desc_unique_indices]: # doc_bin.add(doc) # doc_bin.to_disk('docs.spacy') # - from spacy import displacy html_list = [displacy.render(doc, style='ent', jupyter=False) for doc in docs_unique] len(html_list) == len(docs_unique), html_list[0] ents_df_with_candidates_and_preds["ent_html"] = np.array(html_list)[desc_unique_indices] # test of rendering a doc (if they are stored in the dataframe) doc = ents_df_with_candidates_and_preds['ent_html'].iloc[0] print(doc) ents_df_with_candidates_and_preds.head(1) # + # TODO: add item labels to dataframe here # get categories catalogue_df = pd.read_csv("../../GITIGNORE_DATA/smg-datasets-private/mimsy-catalogue-export.csv", usecols=["MKEY", "CATEGORY1", "TITLE"]) catalogue_df["MKEY"] = "https://collection.sciencemuseumgroup.org.uk/objects/co" + catalogue_df["MKEY"].astype(str) catalogue_df["CATEGORY1"] = catalogue_df["CATEGORY1"].apply(lambda x: x.split(" - ")[1].strip()) # merge ents_df_with_candidates_and_preds = ents_df_with_candidates_and_preds.merge(catalogue_df, how='left', left_on='item_uri', right_on='MKEY',) ents_df_with_candidates_and_preds["demo_category"] = ents_df_with_candidates_and_preds["CATEGORY1"].map(category_mapping_reversed) ents_df_with_candidates_and_preds.groupby(["demo_category", "CATEGORY1"]).count()["item_uri"].unstack().fillna("-").T # - ents_df_with_candidates_and_preds.head(1) # TODO: export final dataframe to pickle ents_df_with_candidates_and_preds.to_pickle("demo_data.pkl") # also to parquet ents_df_with_candidates_and_preds.to_parquet('demo_data.parquet.gzip', compression='gzip')
demos/event_03_21/prepare data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:v2_0.7] * # language: python # name: conda-env-v2_0.7-py # --- # # Feature Representation Methods in ChemML # To build a machine learning model, raw chemical data is first converted into a numerical representation. The representation contains spatial or topological information that defines a molecule. The resulting features may either be in continuous (molecular descriptors) or discrete (molecular fingerprints) form. from chemml.chem import Molecule from chemml.datasets import load_organic_density import numpy as np import warnings warnings.filterwarnings('ignore') # ### Creating `chemml.chem.Molecule` object from molecule SMILES # # All feature representation methods available in ChemML require `chemml.chem.Molecule` as inputs # Importing an existing dataset from ChemML molecules, target, dragon_subset = load_organic_density() mol_objs_list = [] for smi in molecules['smiles']: mol = Molecule(smi, 'smiles') mol.hydrogens('add') mol.to_xyz('MMFF', maxIters=10000, mmffVariant='MMFF94s') mol_objs_list.append(mol) # ## [Coulomb Matrix](https://doi.org/10.1103/PhysRevLett.108.058301) # # Simple molecular descriptor which mimics the electro-static interaction between nuclei. # + from chemml.chem import CoulombMatrix #The coulomb matrix type can be sorted (SC), unsorted(UM), unsorted triangular(UT), eigen spectrum(E), or random (RC) CM = CoulombMatrix(cm_type='SC',n_jobs=-1) features = CM.represent(mol_objs_list) print(features[:5]) # - # ## [Fingerprints from RDKit](https://www.rdkit.org/) # # Molecular fingerprints are a way of encoding the structure of a molecule. The most common type of fingerprint is a series of binary digits (bits) that represent the presence or absence of particular substructures in the molecule. Comparing fingerprints allows you to determine the similarity between two molecules, to find matches to a query substructure, etc. # + from chemml.chem import RDKitFingerprint # RDKit fingerprint types: 'morgan', 'hashed_topological_torsion' or 'htt' , 'MACCS' or 'maccs', 'hashed_atom_pair' or 'hap' morgan_fp = RDKitFingerprint(fingerprint_type='morgan', vector='bit', n_bits=1024, radius=3) features = morgan_fp.represent(mol_objs_list) print(features[:5]) # - # ## Molecule tensors from `chemml.chem.Molecule` objects # # Molecule tensors can be used to create neural graph fingerprints using `chemml.models` from chemml.chem import tensorise_molecules atoms,bonds,edges = tensorise_molecules(molecules=mol_objs_list, max_degree=5, max_atoms=None, n_jobs=-1, batch_size=100, verbose=True) print("Matrix for atom features (num_molecules, max_atoms, num_atom_features):\n", atoms.shape) print("Matrix for connectivity between atoms (num_molecules, max_atoms, max_degree):\n", edges.shape) print("Matrix for bond features (num_molecules, max_atoms, max_degree, num_bond_features):\n", bonds.shape)
docs/ipython_notebooks/feature_representation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy as sp import scipy.signal # ## Data import # We import the csv file. # We are interested in the Ping_index , Ping_date , Ping_time , Latitude, Longitude , and the sv* columns. # Each sv* column corresponds to a depth. # The value for each cell is the logarithm of the intensity of the echo.(ratio of intensity) data_path = '/home/benjamin/Bureau/data jam days/Hackathlon data/' def load_data(filename): df = pd.read_csv(filename) del df['Distance_gps'] del df['Distance_vl'] del df['Ping_milliseconds'] del df['Depth_start'] del df['Depth_stop'] del df['Range_start'] del df['Range_stop'] del df['Sample_count'] data= np.array(df.iloc[:,5:]).transpose() return data,df # ### Filtering # Code from Roland to filter some Sonar artefacts def binary_impulse(Sv, threshold=10): ''' :param Sv: gridded Sv values (dB re 1m^-1) :type Sv: numpy.array :param threshold: threshold-value (dB re 1m^-1) :type threshold: float return: :param mask: binary mask (0 - noise; 1 - signal) :type mask: 2D numpy.array desc: generate threshold mask defined by RB status: test ''' mask = np.ones(Sv.shape).astype(int) samples,pings = Sv.shape for sample in range(1, samples-1): for ping in range(0, pings): a = Sv[sample-1, ping] b = Sv[sample, ping] c = Sv[sample+1, ping] if (b - a > threshold) & (b - c > threshold): mask[sample, ping] = 0 return mask def filter_data(data_matrix): # The relevant data values for the krill are between -70 and -65 data2 =data_matrix.copy() data2[data_matrix<-70] = -70 data2[data_matrix>-65] = -65 data2 = data2 + 70 # We apply a median filtering to get rid of the isolated peaks or lines (which are noise) # Two steps # A variant of the median filter implemented by Roland for lines datafilt = binary_impulse(data2.transpose(), threshold=2) datafilt = datafilt.transpose()*data2 # A standard median filter used in image processing datafilt2 = sp.signal.medfilt(datafilt,kernel_size=3) # try to get rid of the mean by line data3 =datafilt2.copy() data3 = data3 - np.mean(data3,1,keepdims=True) # Gaussian filtering from skimage.filters import gaussian gauss_denoised = gaussian(data3,10) # Compute a function to find the krill signaldata = gauss_denoised[0:150,:] sumsignal = np.sum(signaldata,0)-np.mean(np.sum(signaldata,0)) binary_signal = sumsignal.copy() threshold = 11 binary_signal[sumsignal<threshold] = 0 binary_signal[sumsignal>threshold] = 100 return binary_signal def extract_info(binary_signal,df): krill_list = [] krill_dic = {} data_len = len(binary_signal) for idx in range(data_len): if binary_signal[idx] >0: if idx==0 or binary_signal[idx-1] == 0: # beginning of a krill detection krill_layer_start = idx # record latitude and longitude krill_dic['latitude_start'] = df.iloc[idx,3] krill_dic['longitude_start'] = df.iloc[idx,4] krill_dic['date_start'] = df.iloc[idx,1] krill_dic['time_start'] = df.iloc[idx,2] if idx == data_len-1 or binary_signal[idx+1] == 0: # end of krill detection krill_layer_stop = idx # record latitude and longitude krill_dic['latitude_stop'] = df.iloc[idx,3] krill_dic['longitude_stop'] = df.iloc[idx,4] krill_dic['date_stop'] = df.iloc[idx,1] krill_dic['time_stop'] = df.iloc[idx,2] # store krill layer in list krill_list.append(krill_dic) krill_dic = {} # Compute Krill depth #if krill_layer_stop<data_len-1: # krill_layer = datafilt2[krill_layer_start:krill_layer_stop+1] #else: # krill_layer = datafilt2[krill_layer_start:] #min_depth,max_depth,mean_depth = krill_depth(krill_layer) return krill_list import glob global_krill_list = [] for filename in glob.iglob(data_path+'*.csv'): print('Loading data ...') print('%s' % filename) data,df = load_data(filename) print('Filtering data...') binary_signal = filter_data(data) print('Extraction information...') krill_list = extract_info(binary_signal,df) print('Number of Krill events:',len(krill_list)) global_krill_list += krill_list len(global_krill_list) import json with open('krill_data.json', 'w', encoding='utf-8') as f: f.write(json.dumps(global_krill_list, ensure_ascii=False)) def krill_depth(array): # Compute the depth of the krill swarm depth_function = np.sum(array,1)
ACE_fish script clean several files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # Python for Finance # **Analyze Big Financial Data** # # O'Reilly (2014) # # <NAME> # <img style="border:0px solid grey;" src="http://hilpisch.com/python_for_finance.png" alt="Python for Finance" width="30%" align="left" border="0"> # **Buy the book ** | # <a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> | # <a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a> # # **All book codes & IPYNBs** | # <a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a> # # **The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a> # # **Contact us** | <a href='mailto:<EMAIL>'><EMAIL></a> # <span style="color: red;">**THE FOLLOWING PRESENTS CODES THAT ARE MOSTLY DIFFERENT FROM THE ORIGINAL CODE IN THE CHAPTER.**</span> # # Web Integration from pylab import plt plt.style.use('ggplot') import matplotlib as mpl mpl.rcParams['font.family'] = 'serif' # ## Web Basics # ### ftplib # + uuid="ed06650f-c346-4211-a504-918431567c00" active="" # import ftplib # import numpy as np # + uuid="f9533953-66c8-428e-8476-fb9b5320cbc3" active="" # ftp = ftplib.FTP('YOUR_SERVER_DOMAIN.com') # + uuid="72343084-5e71-42bc-9649-acedaf9bf9b9" active="" # ftp.login(user='REPLACE', passwd='<PASSWORD>') # + uuid="bb8b9a5f-2482-4c21-bb52-c5f0c659121b" active="" # np.save('./data/array', np.random.standard_normal((100, 100))) # + uuid="1e23d513-1cbc-4dff-87c8-35bbefd552ea" active="" # f = open('./data/array.npy', 'r') # + uuid="d7723e2d-8a38-4e76-b18d-47e804bbb9e5" active="" # ftp.storbinary('STOR array.npy', f) # + uuid="2827d953-cfad-44d3-a2bf-67cf5863a1f7" active="" # ftp.retrlines('LIST') # + uuid="90b28799-36f2-4591-8767-d01b85322596" active="" # f = open('./data/array_ftp.npy', 'wb').write # + uuid="bda461e0-6d17-4011-8d12-cae2658b0938" active="" # ftp.retrbinary('RETR array.npy', f) # + uuid="fd643a77-53a7-4b88-80a1-992d37141e1b" active="" # ftp.delete('array.npy') # + uuid="b248ab34-3f84-434b-a4cf-75d3bef1f8fe" active="" # ftp.retrlines('LIST') # + uuid="372f7d6f-928a-436c-bf64-13b0aa994620" active="" # ftp.close() # + uuid="a1d9022c-bd30-43e1-b3c7-95bfefe6f60e" active="" # !ls -n ./data # + uuid="ed6eb7c9-0004-42a7-b0b3-0e1aa5f27ae1" active="" # !rm -f ./data/arr* # # clean-up directory # + uuid="f51de772-ad07-4983-8914-d60383fc0393" active="" # ftps = ftplib.FTP_TLS('YOUR_SERVER_DOMAIN.com') # + uuid="442158b0-2c8a-47f5-a60d-3019c687b196" active="" # ftps.login(user='REPLACE', passwd='<PASSWORD>') # + uuid="a8ab8f48-5524-423c-936f-b<PASSWORD>dc3b0" active="" # ftps.prot_p() # + uuid="80d163ef-151d-4536-8e16-7fc0774db409" active="" # ftps.retrlines('LIST') # + uuid="6cb73378-f642-4627-89a9-f6f69f076d9a" active="" # ftps.close() # - # ### httplib # + uuid="0d79f4d1-5d93-4c9d-9d62-40dc4981a457" import http import http.client # + uuid="d665bdf3-fc62-46c0-8f7f-ff125f12ca51" http = http.client.HTTPConnection('hilpisch.com') # + uuid="9285e3ee-2035-4816-897f-4c9e2c7c8ef0" http.request('GET', '/index.htm') # + uuid="18a7deaf-ba30-440f-88e7-fd6c90245661" resp = http.getresponse() # + uuid="ca638e56-6667-4eea-af63-c2be54c86b9c" resp.status, resp.reason # + uuid="ea7a144c-81bc-4cab-9132-c70af19773d3" content = resp.read() content[:100] # first 100 characters of the file # + uuid="f8ca3902-3e86-4398-8aa7-e301a0706871" index = content.find(b' E ') index # + uuid="4cbca133-e69f-4f1f-8378-de0a212a6fdd" content[index:index + 29] # + uuid="e1d9bb7e-2429-4a3b-8ad2-ce8e20f398d1" http.close() # - # ### urllib # + uuid="7a44c652-b62f-4974-b7be-c1c708c432d2" import urllib.request # + uuid="eeeecf85-e182-44d1-96ca-dfa7c7b770e0" url = 'http://hilpisch.com/tr_eikon_eod_data.csv' # + uuid="d0222eb9-e696-477e-9647-adf7296153ac" connect = urllib.request.urlopen(url) # + uuid="afb60cb7-6822-438a-81fd-43cdaca9290c" data = connect.read() # + uuid="b97fa159-af2c-4d4c-bd07-3b26e4942f86" print(data[:1000]) # - path = '/Users/yves/Documents/Temp/data/' # + uuid="60b60a3a-2cce-4f97-aaa7-c042f2e60279" urllib.request.urlretrieve(url, path + 'eod_data.csv') # + uuid="acf4d984-2ff8-4258-9a6c-87a86ee2f116" csv = open(path + 'eod_data.csv', 'r') csv.readlines()[:5] # + uuid="62d5e85b-1590-450c-9018-746e6872f1fa" # !rm -f ./data/* # - # ## Web Plotting # ### Static Plots # + uuid="bb617800-1dc9-4cb9-a7b7-2b8cd16cf0b2" import numpy as np import pandas as pd # %matplotlib inline # + uuid="8e1e8943-d88d-4b61-bf19-15e55bebfb3a" url = 'http://hilpisch.com/tr_eikon_eod_data.csv' data = pd.read_csv(url, parse_dates=['Date']) # + uuid="ca6562c0-9932-4d5c-9811-4512e717fac3" data.plot(x='Date', y='MSFT.O', figsize=(10, 6)); # tag: microsoft # title: Historical stock prices for Microsoft since January 2010 # - # ### Interactive Plots # **REMARK**: The original version used Bokeh for Web plotting. Plotly seems to be the more easy and intuitive way for generating interactive D3.js Web plots. # + uuid="e6364d22-c102-4a31-8bf1-35edbc5bbaae" import pandas as pd import cufflinks as cf # - from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) # direct plotting with Cufflinks within the notebook iplot(data.set_index('Date')['MSFT.O'].iplot(asFigure=True)) # ### Real-Time Plots # #### Real-Time FX Data # #### Real-Time Stock Price Quotes # ## Rapid Web Applications # ### Traders' Chat Room # ### Data Modeling # ### The Python Code # #### Imports and Database Preliminaries # #### Core Functionality # ### Templating # + uuid="1463132b-34c8-4238-8d3e-6ba5c7562194" '%d, %d, %d' % (1, 2, 3) # + uuid="c5c11d6d-ad65-4ee7-9ebe-91dc1c3d2497" '{}, {}, {}'.format(1, 2, 3) # + uuid="7ff70268-2c8f-45a5-a6b0-17bb9d77cc14" '{}, {}, {}'.format(*'123') # + uuid="da70e7f6-9068-4039-bdfc-8bfe510a7ee6" templ = '''<!doctype html> Just print out <b>numbers</b> provided to the template. <br><br> {% for number in numbers %} {{ number }} {% endfor %} ''' # + uuid="6b66b22b-fb7f-4dc7-99f4-ed6550e9eb2f" from jinja2 import Template # + uuid="641a5ee8-9ef7-4e60-80cb-c73b1d81f182" t = Template(templ) # + uuid="ad91b975-9917-4d22-99db-635d745308a8" html = t.render(numbers=range(5)) # + uuid="3ddf0465-00f2-4c70-8547-7460eba6f11d" html # + uuid="d8d44498-a17f-41b3-9e91-6d09a288c5b8" from IPython.display import HTML HTML(html) # - # ### Styling # + uuid="41649870-1061-444f-924c-733ecdef2ebb" import os for path, dirs, files in os.walk('../python36/tradechat'): print(path) for f in files: print(f) # - # ## Web Services # ### The Financial Model # ### The Implementation # + uuid="79f9048b-3427-4a79-9cbc-4bf414c0730c" import sys import requests sys.path.append('../python36/volservice') # adjust if necessary to your path # + uuid="baba8c94-1d56-42aa-a491-da6b7a9f649f" from werkzeug.wrappers import Request, Response # + uuid="5cbccdda-a5a3-4c7e-97ac-3df9b05c5efb" from vol_pricing_service import get_option_value # + uuid="b6bad926-3093-4f80-aedd-aef6e7950749" def application(environ, start_response): request = Request(environ) # wrap environ in new object text = get_option_value(request.args) # provide all paramters of the call to function # get back either error message or option value response = Response(text, mimetype='text/html') # generate response object based on the returned text return response(environ, start_response) # + uuid="a5708e66-1b39-4f82-b07f-44cb07c68280" import requests import numpy as np url = 'http://localhost:4000/' # - # Start the service on the shell in the appropriate folder: # # $ python vol_pricing.py print(requests.get(url).text) urlpara = url + 'application?V0=%s&kappa=%s&theta=%s&sigma=%s&zeta=%s' urlpara += '&T=%s&r=%s&K=%s' urlval = urlpara % (25, 2.0, 20, 1.0, 0.0, 1.5, 0.02, 22.5) urlval print(requests.get(urlval).text) # %%time urlpara = 'http://localhost:4000/application?V0=25&kappa=2.0' urlpara += '&theta=25&sigma=1.0&zeta=0.0&T=1&r=0.02&K=%s' strikes = np.linspace(20, 30, 50) results = [] for K in strikes: results.append(float(requests.get(urlpara % K).text)) results = np.array(results) results from pylab import plt plt.style.use('seaborn') # %matplotlib inline plt.figure(figsize=(10, 6)) plt.plot(strikes, results, 'b') plt.plot(strikes, results, 'ro') plt.grid(True) plt.xlabel('strike') plt.ylabel('European call option value'); # ## Conclusions # ## Further Reading # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # <a href="http://tpq.io" target="_blank">http://tpq.io</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a> | <a href="mailto:<EMAIL>"><EMAIL></a> # # **Quant Platform** | # <a href="http://quant-platform.com">http://quant-platform.com</a> # # **Python for Finance** | # <a href="http://python-for-finance.com" target="_blank">Python for Finance @ O'Reilly</a> # # **Derivatives Analytics with Python** | # <a href="http://derivatives-analytics-with-python.com" target="_blank">Derivatives Analytics @ Wiley Finance</a> # # **Listed Volatility and Variance Derivatives** | # <a href="http://lvvd.tpq.io" target="_blank">Listed VV Derivatives @ Wiley Finance</a> # # **Python Training** | # <a href="http://training.tpq.io" target="_blank">Python for Finance University Certificate</a>
jupyter36/14_Web_Integration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from utils.load import * # /!\ pythreejs is a dependency, install it: https://pythreejs.readthedocs.io/en/stable/installing.html from utils.pythree_display import * # + # SKETCH FROM STUDY user_id = 2 system_id = 2 model_id = 2 file_path = get_file_path(SKETCH_HISTORY_FOLDER, user_id, system_id, model_id) # SKETCH WITH FILE NAME # file_path = os.path.join(SKETCH_HISTORY_FOLDER, "hat.json") # - # Load sketch history data sketch_history = try_load_data(file_path) if sketch_history is not None: print("Success") # Display input strokes (only non deleted strokes) draw_strokes_samples([stroke["input_samples"] for stroke in sketch_history if stroke["deletion_time"] is None]) # Display structured strokes (only non deleted strokes) draw_curves([stroke["ctrl_pts"] for stroke in sketch_history if stroke["deletion_time"] is None])
scripts/Example - sketch history with pythreejs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from datetime import date, timedelta from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import pandas as pd import datetime import logging def get_historic_data(date: datetime.datetime): date_str = date.strftime('%Y-%m-%d') lookup_URL = "https://www.wunderground.com/history/daily/us/wy/jackson/KJAC/date/{0}".format(date_str) options = webdriver.ChromeOptions(); options.add_argument('headless'); # to run chrome in the backbroung driver = webdriver.Chrome(executable_path='/usr/lib/chromium-browser/chromedriver', options=options) # Optional argument, if not specified will search path. xpath = "/html/body/app-root/app-history/one-column-layout/wu-header/sidenav/mat-sidenav-container/mat-sidenav-content/div/section/div[2]/div[1]/div[5]/div[1]/div/lib-city-history-observation/div/div[2]/table/tbody/tr[*]" driver.get(lookup_URL) rows = WebDriverWait(driver, 1).until(EC.visibility_of_all_elements_located((By.XPATH, xpath))) raw_data = [] for row in rows: cols=row.find_elements_by_tag_name("td") data_dict = {"Time":cols[0].text, "Temperature":cols[1].text, "Dew Point":cols[2].text, "Humidity":cols[3].text, "Wind":cols[4].text, "Wind Speed":cols[5].text, "Wind Gust":cols[6].text, "Pressure":cols[7].text, "Precip.":cols[8].text, "Condition":cols[9].text } raw_data.append(data_dict) df = pd.DataFrame.from_records(raw_data) return df # + import datetime dt = datetime.datetime(2021, 1, 1) end = datetime.datetime.now() step = datetime.timedelta(days = 1) dates = [] while dt < end: dates.append(dt) dt += step # - df = pd.DataFrame() for date in dates[:-1]: print("checking date"+date.strftime("%m/%d/%Y, %H:%M:%S")) df_temp = get_historic_data(date) df_temp["Time"] = pd.to_datetime(df_temp['Time']).apply(lambda x: x.replace(year=date.year, month=date.month, day=date.day)) df = pd.concat([df, df_temp], ignore_index=True) df pickle.dump(df, open( "test_weather_df.p", "wb" ) ) # + import pickle df = pickle.load(open("jackson_hole_weather_df.p","rb")) # - # # Prepare data for uploading to mongodb def convert_to_numeric(df,col): df[col]=df[col].str.extract(r'(\d+)') df[col] = pd.to_numeric(df[col]) def prep_for_mongo(df): convert_to_numeric(df,"Wind Speed") convert_to_numeric(df,"Dew Point") convert_to_numeric(df,"Humidity") convert_to_numeric(df,"Wind Gust") convert_to_numeric(df,"Pressure") convert_to_numeric(df,"Precip.") convert_to_numeric(df,"Temperature") df = df.rename(columns={"Precip.": "Precipitation"}) return df df = prep_for_mongo(df) df.dtypes df.head() # # Now upload to mongodb # + jupyter={"outputs_hidden": true} from pymongo import MongoClient import urllib.parse # init mongo connection username = urllib.parse.quote_plus('dbUser') password = urllib.parse.quote_plus("<PASSWORD>") url = "mongodb+srv://{}:{}@cluster0.<EMAIL>.net/test?retryWrites=true&w=majority".format(username, password) client = MongoClient(url) db = client["weather"] mycol = db["jackson_hole"] data_dict = df.to_dict("records") # Insert collection mycol.insert_many(data_dict) # - # # Now retrieve data from mongodb pipeline = [ { "$addFields": { "datetime": { "$toDate": { "$multiply": [ "$counted_at", 1000 ] } } } }, { "$addFields": { "counted_at": { "$convert": { "input": "$counted_at", "to": "double", "onError": None } }, "computer_id": { "$convert": { "input": "$computer_id", "to": "string", "onError": None } } } }, { "$match": { "arg_file": { "$in": [ "jackson_hole.env" ] }, "type": { "$nin": [] }, "computer_id": { "$in": [ None, "", "00000000-0000-0000-0000-3cecef225486", "00000000-0000-0000-0000-f894c218ae25" ] }, # "datetime": { # "$gte": { # "$date": "2021-01-04T03:32:13Z" # } # } } }, { "$addFields": { "datetime": { "$cond": { "if": { "$eq": [ { "$type": "$datetime" }, "date" ] }, "then": "$datetime", "else": None } } } }, { "$addFields": { "__alias_0": { "year": { "$year": "$datetime" }, "month": { "$subtract": [ { "$month": "$datetime" }, 1 ] }, "date": { "$dayOfMonth": "$datetime" }, "hours": { "$hour": "$datetime" }, "minutes": { "$minute": "$datetime" } } } }, { "$group": { "_id": { "__alias_0": "$__alias_0" }, "__alias_1": { "$sum": { "$cond": [ { "$ne": [ { "$type": "$datetime" }, "missing" ] }, 1, 0 ] } } } }, { "$project": { "_id": 0, "__alias_0": "$_id.__alias_0", "__alias_1": 1 } }, { "$project": { "x": "$__alias_0", "y": "$__alias_1", "_id": 0 } }, { "$sort": { "x.year": 1, "x.month": 1, "x.date": 1, "x.hours": 1, "x.minutes": 1 } }, { "$limit": 5000 } ] # + # init mongo connection username = urllib.parse.quote_plus('dbUser') password = urllib.parse.quote_plus("<PASSWORD>") url = "mongodb+srv://{}:{}@cluster0.edygp.mongodb.net/test?retryWrites=true&w=majority".format(username, password) client = MongoClient(url) # - traffic_results = client["traffic"]["traffic_data"].aggregate(pipeline) traffic_results = list(traffic_results) len(traffic_results) for i in traffic_results: i["x"] = datetime.datetime(year = i["x"]["year"], month = i["x"]["month"]+1, day = i["x"]["date"],hour = i["x"]["hours"]) traffic_df = pd.DataFrame.from_records(traffic_results) traffic_df # # Now get back the weather data from the db pipeline_weather = [ { "$group": { "_id": { "__alias_0": "$Time", "__alias_1": "$Dew Point", "__alias_2": "$Wind Gust", "__alias_3": "$Wind Speed", "__alias_4": "$Temperature", "__alias_5": "$Pressure" } } }, { "$project": { "_id": 0, "__alias_0": "$_id.__alias_0", "__alias_1": "$_id.__alias_1", "__alias_2": "$_id.__alias_2", "__alias_3": "$_id.__alias_3", "__alias_4": "$_id.__alias_4", "__alias_5": "$_id.__alias_5" } }, { "$project": { "x": "$__alias_0", "y": "$__alias_1", "y_series_0": "$__alias_2", "y_series_1": "$__alias_3", "y_series_2": "$__alias_4", "y_series_3": "$__alias_5", "_id": 0 } }, { "$sort": { "x": 1, "y": 1, "y_series_0": 1, "y_series_1": 1, "y_series_2": 1, "y_series_3": 1 } }, { "$addFields": { "__multi_series": { "$objectToArray": { "Wind Gust": "$y_series_0", "Wind Speed": "$y_series_1", "Temperature": "$y_series_2", "Pressure": "$y_series_3", "Dew Point": "$y" } } } }, { "$unwind": "$__multi_series" }, { "$addFields": { "color": "$__multi_series.k", "y": "$__multi_series.v" } }, { "$project": { "__multi_series": 0, "y_series_0": 0, "y_series_1": 0, "y_series_2": 0, "y_series_3": 0 } }, { "$limit": 50000 } ] # + jupyter={"outputs_hidden": true} weather_results = client["weather"]["jackson_hole"].aggregate(pipeline_weather) list(weather_results) # + import plotly.express as px import plotly.io as pio pio.renderers.default = 'browser' fig = px.line(df, x="Time", y=df[["Temperature","Dew Point","Wind Speed","Wind Gust","Pressure","Precipitation"]].columns) fig fig.show() traffic_df # + fig = px.line(x = traffic_df["x"], y = traffic_df["y"]) fig fig.show() # + import plotly.graph_objects as go from plotly.subplots import make_subplots # Create figure with secondary y-axis fig = make_subplots(specs=[[{"secondary_y": True}]]) # Add traces fig.add_trace( go.Scatter(x=df["Time"], y=df["Temperature"], mode='lines', name='Temperature'), secondary_y=False, ) fig.add_trace( go.Scatter(x=df["Time"], y=df["Wind Gust"], mode='lines', name='Wind Gust'), secondary_y=False, ) fig.add_trace( go.Scatter(x=df["Time"], y=df["Wind Gust"], mode='lines', name='Wind Gust'), secondary_y=False, ) fig.add_trace( go.Scatter(x = traffic_df["x"], y = traffic_df["y"], mode='lines', name='Traffic volume'), secondary_y=True, ) # Add figure title fig.update_layout( title_text="Double Y Axis Example" ) # Set x-axis title fig.update_xaxes(title_text="xaxis title") # Set y-axes titles fig.update_yaxes(title_text="<b>primary</b> yaxis title", secondary_y=False) fig.update_yaxes(title_text="<b>secondary</b> yaxis title", secondary_y=True) fig.show() # + import plotly.graph_objects as go from plotly.subplots import make_subplots import plotly.graph_objects as go fig = go.Figure() # Add traces fig.add_trace( go.Scatter(x=df["Time"], y=df["Temperature"], mode='lines', name='Temperature', yaxis="y2" ) ) fig.add_trace( go.Scatter(x=df["Time"], y=df["Condition"], mode='lines', name='Condition', yaxis="y3") ) fig.add_trace( go.Bar(x = traffic_df["x"], y = traffic_df["y"], name='Traffic volume'), ) # Create axis objects fig.update_layout( yaxis=dict( title="yaxis title", titlefont=dict( color="#1f77b4" ), tickfont=dict( color="#1f77b4" ) ), yaxis2=dict( title="yaxis2 title", titlefont=dict( color="#ff7f0e" ), tickfont=dict( color="#ff7f0e" ), anchor="free", overlaying="y", side="left", position=0.15 ), yaxis3=dict( title="yaxis3 title", titlefont=dict( color="#d62728" ), tickfont=dict( color="#d62728" ), anchor="x", overlaying="y", side="right" ), yaxis4=dict( title="yaxis4 title", titlefont=dict( color="#9467bd" ), tickfont=dict( color="#9467bd" ), anchor="free", overlaying="y", side="right", position=0.85 ) ) # Update layout properties fig.update_layout( title_text="multiple y-axes example", ) fig.show() # + import plotly.graph_objects as go fig = go.Figure() fig.add_trace(go.Scatter( x=[1, 2, 3], y=[4, 5, 6], name="yaxis1 data" )) fig.add_trace(go.Scatter( x=[2, 3, 4], y=[40, 50, 60], name="yaxis2 data", yaxis="y2" )) fig.add_trace(go.Scatter( x=[4, 5, 6], y=[40000, 50000, 60000], name="yaxis3 data", yaxis="y3" )) fig.add_trace(go.Scatter( x=[5, 6, 7], y=[400000, 500000, 600000], name="yaxis4 data", yaxis="y4" )) # Create axis objects fig.update_layout( xaxis=dict( domain=[0.3, 0.7] ), yaxis=dict( title="yaxis title", titlefont=dict( color="#1f77b4" ), tickfont=dict( color="#1f77b4" ) ), yaxis2=dict( title="yaxis2 title", titlefont=dict( color="#ff7f0e" ), tickfont=dict( color="#ff7f0e" ), anchor="free", overlaying="y", side="left", position=0.15 ), yaxis3=dict( title="yaxis3 title", titlefont=dict( color="#d62728" ), tickfont=dict( color="#d62728" ), anchor="x", overlaying="y", side="right" ), yaxis4=dict( title="yaxis4 title", titlefont=dict( color="#9467bd" ), tickfont=dict( color="#9467bd" ), anchor="free", overlaying="y", side="right", position=0.85 ) ) # Update layout properties fig.update_layout( title_text="multiple y-axes example", width=800, ) fig.show() # + # /html/body/app-root/app-today/one-column-layout/wu-header/sidenav/mat-sidenav-container/mat-sidenav-content/div/section/div[3]/div[1]/div/div[1]/div[1]/lib-city-current-conditions/div
data_analysis/.ipynb_checkpoints/custom scraper for weather-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: math_venv # language: python # name: math_venv # --- # # ЛР №5 # # Решение систем нелинейных уравнений # from scipy.optimize import fsolve import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as mpatches import sympy as sp import numpy as np import math # ### 1 # Отделить корни заданного уравнения графически # # Заданное уравнение f = lambda x: np.cos(x) - np.sqrt(x + 2) + 1 df = lambda x: -1 * math.sin(x) - 0.5 * math.pow(x + 2, -0.5) # Отделяем корни # %matplotlib inline roots = fsolve(f, 2) x = np.array(range(-1, 30)) y = f(x) plt.plot(x, y) plt.axvline(x=roots, color='r', linestyle='-') red_patch = mpatches.Patch(color='red', label=f'x={roots[0]:.3f}') plt.legend(handles=[red_patch]) plt.show() # ### 2 # Решить уравнения методом хорд x0 = 0 x1 = 1 if f(x0) * f(x1) < 0: while abs(x1 - x0) > 1e-5: x = x0 - f(x0) * (x1 - x0) / (f(x1) - f(x0)) x0, x1 = x1, x print(f"\nКорень уравнения: {x:.3}") # ### 3 # Решить уравнение методом касательных x0 = 100 x1 = x0 - f(x0) / df(x0) while abs(x1 - x0) > 1e-5: x = x1 - f(x1) / df(x1) x0, x1 = x1, x print(f"\nКорень уравнения: {x:.3}") # ### 4 # Отделить корни заданной системы нелинейных уравнений графически sys_f1 = lambda x, y: sp.tan(x * y) - x**2 sys_f2 = lambda x, y: 0.7 * x**2 + 2 * y**2 - 1 # Отделяем корни x, y = sp.symbols('x y') p0 = sp.plot_implicit(sys_f2(x, y), (x, -1.5, 1.5), (y, -1.5, 1.5), show=False, line_color='b') p1 = sp.plot_implicit(sys_f1(x, y), (x, -1.5, 1.5), (y, -1.5, 1.5), show=False, line_color='r') p0.extend(p1) p0.show() # ### 5 # Решить систему методом простых итераций F = lambda xy: [np.tan(xy[0] * xy[1]) - xy[0]**2, 0.7 * xy[0]**2 + 2 * xy[1]**2 - 1] fi = lambda xy: [math.sqrt(math.tan(xy[0] * xy[1])), math.sqrt((1 - 0.7 * xy[0]**2) / 2)] minus_fi = lambda xy: [-math.sqrt(math.tan(xy[0] * xy[1])), -math.sqrt((1 - 0.7 * xy[0]**2) / 2)] def J(xy): res = [[0.0, 0.0], [0.0, 0.0]] res[0][0] = -2 * xy[0] + xy[1] * (np.tan(xy[0]*xy[1])**2 + 1) res[0][1] = xy[0] * (np.tan(xy[0]*xy[1])**2 + 1) res[1][0] = 1.4 * xy[0] res[1][1] = 4 * xy[1] return np.array(res) def iterr(x0, itrr_func): x1 = itrr_func(x0) while abs(x1[0] - x0[0]) > 1e-5 or abs(x1[1] - x0[1]) > 1e-5: x = itrr_func(x1) x0, x1 = x1, x print("Корень системы:", ", ".join(map(str, x))) iterr([0.83, 1], fi) iterr([0, 0.8], fi) iterr([-0.83, -0.7], minus_fi) iterr([0, -0.8], minus_fi) # ### 6 # Решить систему методом Ньютона def newton(x0): i = 0 x1 = [k - m for (k, m) in zip(x0, np.linalg.inv(J(x0)) @ F(x0))] while abs(x1[0] - x0[0]) > 1e-5 or abs(x1[1] - x0[1]) > 1e-5: i += 1 x = [k - m for k, m in zip(x1, np.linalg.inv(J(x1)) @ F(x1))] x0, x1 = x1, x print("Корень системы:", ", ".join(map(str, x))) print(f"Количество итераций: {i}\n") newton([0.8, 0.9]) newton([0, 0.5]) newton([-0.6, -0.6]) newton([-0, -0.9]) # ### 7 # Решить систему модифицированным методом Ньютона. Сравнить скорости сходимости методов def modified_newton(x0): i = 0 J0 = np.linalg.inv(J(x0)) x1 = [k - m for (k, m) in zip(x0, J0 @ F(x0))] while abs(x1[0] - x0[0]) > 1e-5 or abs(x1[1] - x0[1]) > 1e-5: i += 1 x = [k - m for k, m in zip(x1, J0 @ F(x1))] x0, x1 = x1, x print("Корень системы:", ", ".join(map(str, x))) print(f"Количество итераций: {i}\n") modified_newton([0.8, 0.9]) modified_newton([0, 0.5]) modified_newton([-0.6, -0.6]) modified_newton([0, -0.9])
term4/MNA/lab5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Purpose # The purpose of this notebook is to separate the potential operators by taking advantage of the linearity of the SMS chiral semi-regulated momentum space potential in the coupling constants for all partial waves. # # And then to load the individual matrix elements in an HDF5 file for external use. # # Notebook Setup # # ## Library import # We import all the required Python libraries # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import h5py import numpy as np import pandas as pd # - # ## Local library import # We import all the required local libraries # + from constants import * from mesh import BuildMesh from sms_chiral_potential import SMSChiralPot # - # # Parameters # + ### MESH INPUT PARAMETERS ### ki = 0 cut_reg1 = 4 # Mesh cutoff of first region cut_reg2 = 8 # Mesh cutoff of second region kf = 100 pts_reg1 = 40 # Total points in first region pts_reg2 = 20 # Total points in second region pts_reg3 = 20 # Total points in third region Ntot = pts_reg1 + pts_reg2 + pts_reg3 ### POTENTIAL INPUT PARAMETERS ### force = 'np' # Choice of interaction ostat = 5 # Order of EFT cutnum = 2 # Choice of cutoff jmax = 20 # Momentum # - # # Creating h5 file # Nodes/weights mesh_nodes = [ki, cut_reg1, cut_reg2, kf] pts_per_region = [pts_reg1, pts_reg2, pts_reg3] mesh = BuildMesh(mesh_nodes, pts_per_region) nodes, weights = mesh.nodes, mesh.weights # + # open a new h5 file for writing # initialize the h5 file file_name = './potential_SMS_n4lo_plus_' + force + \ '_Lambda450MeV_jmax-'+ str(jmax) + '_' + str(ki) \ + str(cut_reg1) + str(cut_reg2) + '_' + str(pts_reg1) \ + str(pts_reg2) + str(pts_reg3) + '.h5' hf_sms = h5py.File(file_name, 'w') # + # hard-wired numbers temporarily --> do better! num_lecs = 30 # includes through F-wave contacts num_sing_pws = 42 num_coup_pws = 20 num_k = len(nodes) # V0 is the part of the potential independent of the (non-\pi N) LECs V0_sing = np.zeros((num_sing_pws, num_k, num_k)) hf_sms.create_dataset('V0_sing', data=V0_sing) print('shape of V0_sing (uncoupled potentials): ',V0_sing.shape) V0_coup = np.zeros((num_coup_pws, 2*num_k, 2*num_k)) hf_sms.create_dataset('V0_coup', data=V0_coup) print('shape of V0_coup (coupled potentials): ', V0_coup.shape) # Each Vi corresponds to one of the (non-\pi N) LECs Vi_sing = np.zeros((num_sing_pws, num_k, num_k, num_lecs)) hf_sms.create_dataset('Vi_sing', data=Vi_sing) print('shape of Vi_sing (uncoupled potentials): ',Vi_sing.shape) Vi_coup = np.zeros((num_coup_pws, 2*num_k, 2*num_k, num_lecs)) hf_sms.create_dataset('Vi_coup', data=Vi_coup) print('shape of Vi_coup (coupled potentials): ', Vi_coup.shape) hf_sms.create_dataset('k', data=nodes) hf_sms.create_dataset('dk', data=weights) quadratic = np.zeros(num_lecs, dtype=bool) hf_sms.create_dataset('quadratic', data=quadratic) # + dt = h5py.special_dtype(vlen=str) # data type for the LEC name strings LEC_names = np.array(['LO 1S0 pp', 'LO 1S0 np', 'LO 1S0 nn', 'LO 3S1 np', 'NLO 1S0', 'NLO 3P0', 'NLO 1P1', 'NLO 3P1', 'NLO 3S1', 'NLO 3S1-3D1', 'NLO 3P2', 'N3LO t1S0', 'N3LO 1S0', 'N3LO 3P0', 'N3LO 1P1', 'N3LO 3P1', 'N3LO t3S1', 'N3LO 3S1', 'N3LO 3D1', 'N3LO t3S1-3D1', 'N3LO 3S1-3D1', 'N3LO 1D2', 'N3LO 3D2', 'N3LO 3P2', 'N3LO 3P2-3F2', 'N3LO 3D3', 'N4LO+ 3F2', 'N4LO+ 1F3', 'N4LO+ 3F3', 'N4LO+ 3F4'], dtype=dt) hf_sms.create_dataset('lec names', data=LEC_names) # add to the .h5 file # + single = np.array([[0, 0, 0, 1], [1, 1, 0, 1], [1, 0, 1, 0], [1, 1, 1, 1], [2, 0, 2, 1], [2, 1, 2, 0], [3, 0, 3, 0], [3, 1, 3, 1], [4, 0, 4, 1], [4, 1, 4, 0], [5, 0, 5, 0], [5, 1, 5, 1], [6, 0, 6, 1], [6, 1, 6, 0], [7, 0, 7, 0], [7, 1, 7, 1], [8, 0, 8, 1], [8, 1, 8, 0], [9, 0, 9, 0], [9, 1, 9, 1], [10, 0, 10, 1], [10, 1, 10, 0], [11, 0, 11, 0], [11, 1, 11, 1], [12, 0, 12, 1], [12, 1, 12, 0], [13, 0, 13, 0], [13, 1, 13, 1], [14, 0, 14, 1], [14, 1, 14, 0], [15, 0, 15, 0], [15, 1, 15, 1], [16, 0, 16, 1], [16, 1, 16, 0], [17, 0, 17, 0], [17, 1, 17, 1], [18, 0, 18, 1], [18, 1, 18, 0], [19, 0, 19, 0], [19, 1, 19, 1], [20, 0, 20, 1], [20, 1, 20, 0]]) coupled = np.array([[[0, 1, 1, 0], [2, 1, 1, 0]], [[1, 1, 2, 1], [3, 1, 2, 1]], [[2, 1, 3, 0], [4, 1, 3, 0]], [[3, 1, 4, 1], [5, 1, 4, 1]], [[4, 1, 5, 0], [6, 1, 5, 0]], [[5, 1, 6, 1], [7, 1, 6, 1]], [[6, 1, 7, 0], [8, 1, 7, 0]], [[7, 1, 8, 1], [9, 1, 8, 1]], [[8, 1, 9, 0], [10, 1, 9, 0]], [[9, 1, 10, 1], [11, 1, 10, 1]], [[10, 1, 11, 0], [12, 1, 11, 0]], [[11, 1, 12, 1], [13, 1, 12, 1]], [[12, 1, 13, 0], [14, 1, 13, 0]], [[13, 1, 14, 1], [15, 1, 14, 1]], [[14, 1, 15, 0], [16, 1, 15, 0]], [[15, 1, 16, 1], [17, 1, 16, 1]], [[16, 1, 17, 0], [18, 1, 17, 0]], [[17, 1, 18, 1], [19, 1, 18, 1]], [[18, 1, 19, 0], [20, 1, 19, 0]], [[19, 1, 20, 1], [21, 1, 20, 1]]]) hf_sms.create_dataset('waves_coup', data=coupled) # add to the .h5 file hf_sms.create_dataset('waves_sing', data=single) # add to the .h5 file # - list(hf_sms.keys()) # check the keys # %%time my_sms = SMSChiralPot(ostat, force, cutnum) cc_pred = my_sms.get_LECs() nodes_GeV, weights_GeV = hbar_c_GeV*nodes, hbar_c_GeV*weights spectral, contacts = my_sms.get_smschiral(nodes_GeV, weights_GeV, jmax) # + # Fill up the V0 entries for j_index in range(0, jmax + 1): pot_spec = (V_factor_RME / (0.5*np.pi)) * spectral[j_index,:,:,:] hf_sms['V0_sing'][2*j_index,:,:] = pot_spec[0,:,:] if j_index == 0: hf_sms['V0_sing'][2*j_index+1,:,:] = pot_spec[5,:,:] elif j_index >= 1: hf_sms['V0_sing'][2*j_index+1,:,:] = pot_spec[1,:,:] hf_sms['V0_coup'][j_index-1,:num_k,:num_k] = pot_spec[2,:,:] hf_sms['V0_coup'][j_index-1,:num_k,num_k:2*num_k] = pot_spec[3,:,:] hf_sms['V0_coup'][j_index-1,num_k:2*num_k,:num_k] = pot_spec[4,:,:] hf_sms['V0_coup'][j_index-1,num_k:2*num_k,num_k:2*num_k] = pot_spec[5,:,:] # + # Fill up the Vi entries for j_index in range(jmax + 1): pot_op = (V_factor_RME / (0.5*np.pi)) * contacts[j_index] if (j_index == 0): hf_sms['Vi_sing'][2*j_index,:num_k,:num_k,1] = pot_op[0] # 'CT_1S0' hf_sms['Vi_sing'][2*j_index,:num_k,:num_k,4] = pot_op[1] # 'C_1S0' hf_sms['Vi_sing'][2*j_index,:num_k,:num_k,12] = pot_op[2] # 'D_1S0' hf_sms['Vi_sing'][2*j_index+1,:num_k,:num_k,5] = pot_op[3] # 'C_3P0' hf_sms['Vi_sing'][2*j_index+1,:num_k,:num_k,13] = pot_op[4] # 'D_3P0' elif (j_index == 1): hf_sms['Vi_sing'][2*j_index,:,:,6] = pot_op[0] # 'C_1P1' hf_sms['Vi_sing'][2*j_index,:,:,14] = pot_op[1] # 'D_1P1' hf_sms['Vi_sing'][2*j_index+1,:,:,7] = pot_op[2] # 'C_3P1' hf_sms['Vi_sing'][2*j_index+1,:,:,15] = pot_op[3] # 'D_3P1' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,3] = pot_op[4] # 'CT_3S1' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,8] = pot_op[5] # 'C_3S1' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,17] = pot_op[6] # 'D_3S1' hf_sms['Vi_coup'][j_index-1,:num_k,num_k:2*num_k,9] = pot_op[7] # 'C_e1' hf_sms['Vi_coup'][j_index-1,:num_k,num_k:2*num_k,20] = pot_op[8] # 'D_e1' hf_sms['Vi_coup'][j_index-1,num_k:2*num_k,:num_k,9] = pot_op[9] # 'C_e1' hf_sms['Vi_coup'][j_index-1,num_k:2*num_k,:num_k,20] = pot_op[10] # 'D_e1' hf_sms['Vi_coup'][j_index-1,num_k:2*num_k,num_k:2*num_k,18] = pot_op[11] # 'D_3D1' elif (j_index == 2): hf_sms['Vi_sing'][2*j_index,:,:,21] = pot_op[0] # 'D_1D2' hf_sms['Vi_sing'][2*j_index+1,:,:,22] = pot_op[1] # 'D_3D2' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,10] = pot_op[2] # 'C_3P2' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,23] = pot_op[3] # 'D_3P2' hf_sms['Vi_coup'][j_index-1,:num_k,num_k:2*num_k,24] = pot_op[4] # 'D_e2' hf_sms['Vi_coup'][j_index-1,num_k:2*num_k,:num_k,24] = pot_op[5] # 'D_e2' hf_sms['Vi_coup'][j_index-1,num_k:2*num_k,num_k:2*num_k,26] = pot_op[6] # 'E_3F2' elif (j_index == 3): hf_sms['Vi_sing'][2*j_index,:,:,27] = pot_op[0] # 'E_1F3' hf_sms['Vi_sing'][2*j_index+1,:,:,28] = pot_op[1] # 'E_3F3' hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,25] = pot_op[2] # 'D_3D3' elif (j_index == 4): hf_sms['Vi_coup'][j_index-1,:num_k,:num_k,29] = pot_op[0] # 'E_3F4' # - hf_sms.close()
nn_scattering/eft_gp_jupyter/Create_h5_file_for_sms_chiral_potential.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <center><img src="http://i.imgur.com/sSaOozN.png" width="500"></center> # ## Course: Computational Thinking for Governance Analytics # # ### Prof. <NAME>, PhD # * Visiting Professor of Computational Policy at Evans School of Public Policy and Governance, and eScience Institute Senior Data Science Fellow, University of Washington. # * Professor of Government and Political Methodology, Pontificia Universidad Católica del Perú. # # # # Session 0: Introduction to Python Data Structures # # _____ # <a id='home'></a> # # 1. Data Structures # Python has basic native structures, like lists, tuples and dictionaries. # ## A. **LISTS** # # Lists are the most flexible structure to save or contain data elements. names=["Qing", "Françoise", "Raúl", "Bjork","Marie"] ages=[32,33,28,30,29] country=["China", "Senegal", "España", "Norway","Korea"] education=["Bach", "Bach", "Master", "PhD","PhD"] # Above we have created some lists. Lists can contain any values. Lists support different operations: # * **Accessing**: # # Keep in mind the positions in Python start in **0**. # one element ages[0] # several, using slices: ages[1:-1] #second to before last # several, using slices: ages[:-2] #all but two last ones # non consecutive from operator import itemgetter list(itemgetter(0,2,3)(ages)) # difficul to understand? ages[0:4:2] + [ages[3]] # * **Modifying**: # + # by position country[2]="Spain" # list changed: country # + # by value country=["PR China" if x == "China" else x for x in country] # list changed: country # - # * **Deleting** # + # by position del country[-1] #last value # list changed: country # + # by position names.pop() #last value by default # list changed: names # + # only 'del' works for several positions lista=[1,2,3,4,5,6] del lista[1:3] #now: lista # + # by value ages.remove(29) # list changed: ages # just first ocurrence of value!! # + # by value education.remove('PhD') # list changed: education # just first ocurrence!! # + # deleting every value: lista=[1,'a',45,'b','a'] lista=[x for x in lista if x!='a'] # you get: lista # - # * **Inserting values** # at the end lista.append("abc") lista # PART ONE: # first delete a position education.pop(2) education # PART TWO: # now insert in that position education.insert(2,"Master") education # ## B. **TUPLES** # # Tuples are inmutable structures in Python, they look like lists but do not share much of their functionality: # new list: weekend=("Friday", "Saturday", "Sunday") # You can access: weekend[0] # But no other operation is allowed. # Python itself uses tuples as output of some important functions: zip(names,ages) # The **zip** functions creates tuples, by combining in parallel. You can see it if you turn the result into a list: list(zip(names,ages)) # a list of tuples # ## C. **DICTIONARIES** # # *Dicts* work in a more sophisticated way, as they have a **'key'**:**'value'** structure: # + classroom={'student':names,'age':ages,'edu':education} # see it: classroom # - # Dicts do not use indexes to access values: # + #classroom[0] # - # Dicts use keys: classroom['student'] # Notice I created a dictionary where the value is not ONE but a LIST of values. # Once you access a value, you can modify it. You can also use _pop_ or _del_ using the **keys**. But you can not use _append_ to add an element, you need **update**: classroom.update({'country':country}) # now: classroom # ## D. DATA FRAMES # **Data frames** are more complex containers of values. The most common analogy is a spreadsheet. To create a data frame, we need to call **pandas**: import pandas # We can prepare a data frame from a dictionary immediately, but ONLY if you have the same amount of elements in each list representing a column. # our data frame: students=pandas.DataFrame(classroom) ## see it: students # But, let me update the dictionary with: names=["Qing", "Françoise", "Raúl", "Bjork","Marie"] # classroom.update({'student':names}) # classroom # We have five students, but only data for four of them. Then this does not work: # + #pandas.DataFrame(classroom) # - # In that case, you need this: # + #then students=pandas.DataFrame({key:pandas.Series(value) for key, value in classroom.items()}) # seeing it: students # - # Sometimes, Python users code like this: # + import pandas as pd # renaming the library students=pd.DataFrame({key:pd.Series(value) for key, value in classroom.items()}) students # - # ### Data frame basic operations # data of structure: list? tuple? dataframe? type(students) # type of data in data frame column students.dtypes # details of data frame students.info() # number of rows and columns students.shape # number of rows: len(students) # first rows students.head(2) # compare with: students.tail(2) # name of columns students.columns # If you needed the column names as a list: students.columns.tolist()# or simply: list(students) # If you needed a column values as a list: students.age.tolist()# list(students.ages) # ### Accesing elements in DF: # # The data frames in pandas behave much like in R: #one particular column students.student # or students['student'] # it is not the same as: students[['student']] # a data frame, not a column (or series) # this is also a DF students[['country','student']] # and this, using loc: columnNames=['country','student'] students.loc[:,columnNames] ## Using positions is very common: columnPositions=[1,3,0] students.iloc[:,columnPositions] # ### Changing values # # If you have a position, you can update values: students.iloc[4,1]=23 # change is immediate! (no warning) students # ### Deleting columns # You can modify any values in a data frame, but let me create a **deep** copy of this data frame to play with: studentsCopy=students.copy() studentsCopy # + # This is what you want get rid of: byeColumns=['edu'] # you can delete more than one #this is the result studentsCopy.drop(columns=byeColumns) # - # Notice you do not have saved the previous result: studentsCopy #NOW we do studentsCopy.drop(columns=byeColumns,inplace=True) #then: studentsCopy # ### Deleting a row # # Let me delete a row: # axis 0 is delete by row studentsCopy.drop(index=2,inplace=True) studentsCopy # As you see, the index dissapeared. Then, you should reset the indexes: studentsCopy.reset_index(drop=True,inplace=True) studentsCopy
introPython.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="CCJNiN5klVtA" # ! pip install -q kaggle from google.colab import files files.upload() # ! mkdir ~/.kaggle # ! cp kaggle.json ~/.kaggle/ # ! chmod 600 ~/.kaggle/kaggle.json # ! kaggle datasets list # + id="LRERWW14stEY" # !kaggle datasets download -d puneet6060/intel-image-classification # + id="1GcCYXdCstHO" # !unzip /content/intel-image-classification.zip # + id="ytO7bPHTstJn" import cv2 import os import matplotlib.pyplot as plt from keras import Sequential from tensorflow.keras import optimizers from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Conv2D, Dropout, MaxPool2D, Flatten # + colab={"base_uri": "https://localhost:8080/", "height": 183} id="iF3GduUVstMT" outputId="d3ef6621-a7a9-400e-ef94-7c1d083f5ac4" w = 10 h = 10 fig = plt.figure(figsize=(15,10)) columns = 6 rows = 1 fielName = "/content/seg_train/seg_train" for i in range(0, columns*rows ): folderName = os.path.join((fielName), os.listdir(fielName)[i]) img = cv2.imread(folderName+'/'+(os.listdir(os.path.join((fielName), os.listdir(fielName)[i]))[i])) fig.add_subplot(rows, columns, i+1) plt.imshow(img) plt.title(os.path.basename(folderName)) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="O6WT0U6DtqDo" outputId="9660f9da-389d-40db-d9f5-c66b015fd720" batch_size = 32 resize = (224, 224) train_datagen = ImageDataGenerator( rescale=1./255, featurewise_std_normalization=True) validation_datagen = ImageDataGenerator( rescale=1./255, featurewise_std_normalization=True) train_generator = train_datagen.flow_from_directory( '/content/seg_train/seg_train', target_size=resize, batch_size=batch_size, class_mode='categorical') validation_generator = validation_datagen.flow_from_directory( '/content/seg_test/seg_test', target_size=resize, batch_size=batch_size, class_mode='categorical') # + id="7jpHinwFbV6I" # %pip install wandb -q import wandb from wandb.keras import WandbCallback from keras.callbacks import LambdaCallback # + id="5oIYqADklF9_" # !wandb login wandb.init(project="vgg", entity="manar") # + id="ZQYn3ny5nwL0" model = Sequential() model.add(Conv2D(64, (3,3), input_shape=(224,224,3), activation="relu", padding="same")) model.add(Conv2D(64, (3,3), activation="relu", padding="same")) model.add(MaxPool2D((2,2), (2,2))) model.add(Conv2D(128, (3,3), activation="relu", padding="same")) model.add(Conv2D(128, (3,3), activation="relu", padding="same")) model.add(MaxPool2D((2,2), (2,2))) model.add(Conv2D(256, (3,3), activation="relu", padding="same")) model.add(Conv2D(256, (3,3), activation="relu", padding="same")) model.add(Conv2D(256, (1,1), activation="relu", padding="same")) model.add(MaxPool2D((2,2), (2,2))) model.add(Conv2D(512, (3,3), activation="relu", padding="same")) model.add(Conv2D(512, (3,3), activation="relu", padding="same")) model.add(Conv2D(512, (1,1), activation="relu", padding="same")) model.add(MaxPool2D((2,2), (2,2))) model.add(Conv2D(512, (3,3), activation="relu", padding="same")) model.add(Conv2D(512, (3,3), activation="relu", padding="same")) model.add(Conv2D(512, (1,1), activation="relu", padding="same")) model.add(MaxPool2D((2,2), (2,2))) model.add(Flatten()) model.add(Dense(4096, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(4096, activation="relu")) model.add(Dense(6, activation="softmax")) # + colab={"base_uri": "https://localhost:8080/"} id="jYxH_4c4nzc7" outputId="d01a1393-0d00-4798-b36d-a7cc26d9a559" model.summary() # + id="9np06UO2nzaj" opt = optimizers.SGD(learning_rate=0.01, momentum=0.9) model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"]) # + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="i1_gfW7I1WCC" outputId="bc81c9de-51e2-41e5-8203-051627b17431" model.fit(train_generator, validation_data=validation_generator, epochs=10, callbacks=[WandbCallback()]) # + colab={"background_save": true} id="1Zhjj0su1V8T"
2. VggNet/VGG_16_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import datetime, time import simpy import pandas as pd import openclsim.core as core import openclsim.model as model import openclsim.plot as plot # setup environment simulation_start = 0 my_env = simpy.Environment(initial_time=simulation_start) registry = {} # + reporting_activity = model.BasicActivity( env=my_env, name="Reporting activity", registry=registry, duration=0, ) sub_processes = [ model.BasicActivity( env=my_env, name="Basic activity1", registry=registry, duration=14, additional_logs=[reporting_activity], ), model.BasicActivity( env=my_env, name="Basic activity2", registry=registry, duration=5, additional_logs=[reporting_activity], ), model.BasicActivity( env=my_env, name="Basic activity3", registry=registry, duration=220, additional_logs=[reporting_activity], ), ] # - activity = model.ParallelActivity( env= my_env, name= "Parallel process", registry= registry, sub_processes= sub_processes, ) model.register_processes([activity, reporting_activity]) my_env.run() plot.vessel_planning([*sub_processes, activity]) display(plot.get_log_dataframe(reporting_activity, [*sub_processes, activity, reporting_activity])) for act in [*sub_processes, activity]: display(plot.get_log_dataframe(act, [*sub_processes, activity, reporting_activity]))
notebooks/08 Parallel activity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # linear model to classify the MNIST data set # # In this second tutorial, we will continue to work on image classification and try a linear classification model. This kind of model have the same number of parameters as the input images (64 here) plus one bias. They work by trying to with the parameters so that we minimize some Loss function at training time. At test time, a prediction is fast as it is basically just a dot product. # ## Prepare and get a sense of the data # # We start by loading our image data set: MNIST. Using the function `load_digits` of the `datasets` module of `sklearn` provide the dataset in a reduced form suitable for this practival session. # import numpy and initialize the random seed to yield consistent results import numpy as np np.random.seed(42) from sklearn.datasets import load_digits mnist = ... mnist.keys() # The data set need to be partitioned into train and test data. Here use the handy function `train_test_split` of `sklearn` to reserve 20% of the data to test your model. # # **/!\ The test data is to be left untouched.** # + from sklearn.model_selection import train_test_split (X_train, X_test, y_train, y_test) = train_test_split(..., test_size=...) print('shape of train data is {}, type is {}'.format(X_train.shape, X_train.dtype)) print('shape of test data is {}, type is {}'.format(X_test.shape, X_test.dtype)) # - # observe the data points: they are in 64 bits floats but only integers values from 0 to 16. The data can therefore be safely casted to uint8 to reduce the memory footprint by a factor of 8. print(...) # min print(...) # max print(...) # unique X_train = X_train.astype(...) # plot an image using matplotlib. The function `imshow` can be used reshaping the data as $(8\times8)$ array. # + import matplotlib # %matplotlib inline from matplotlib import pyplot as plt, cm index = 0 plt.imshow(...), cmap=cm.gray_r) plt.axis('off') plt.title('image %d in the train set' % index) # - # With this particular dataset, the list of the categories is identical to their indices (from 0 to 9). # # Print the class of image `index`. print('image {} is a {}'.format(..., ...)) # ## Model definition # # Here we define our simple machine learning algorithm which takes the features $x$, multiply them be some weights $W$ and add a bias term $b$. # # $$f(x, W, b) = W.x + b = s$$ # # For a given image in vector form with $d$ features, W has size (10, d) so that the product $W.X$ produces 10 numbers which are called the scores for each class. # # Initialize `numpy` arrays of size (10, 64) for $W$ and (10) for $b$. Concatenate $b$ and $W$ using the function `np.c_` to use the bias trick. # + # initialization with random weights W = 0.1 * np.random.randn(...) b = 0.1 * np.random.randn(...) # apply the bias trick W = ... print('shape of W is now {}'.format(W.shape)) # - # The data points are already in vector form, let's add 1 to each for the bias trick. # + X_train = np.c_[..., X_train] print('shape of train data is now {}'.format(X_train.shape)) X_test = np.c_[..., X_test] print('shape of test data is now {}'.format(X_test.shape)) # - # now compute the 10 scores for the `index` training image with a dot product using `np.dot` and use the max score to determine the prediction scores = np.dot(...) # look at the individual score for each class for (label, score) in zip(labels, scores): print('{}: {:5.2f}'.format(..., ...)) # Print the result, note that as we have 10 scores, we need to find the index of the maximum score to determine the class. print('prediction: {}'.format(...) print('ground thruth: {}'.format(...) # ## Loss function # # ### Hinge loss # # We now need to define a way to tell the machine how happy we are with this prediction. The machine will then use this information to learn and come up with better predictions. The measure of our "happiness" is called a *loss function* and the process of learning the parameters (both $W$ and $b$) is called optimisation. # # One possibility to measure how good is the prediction is the so called Hinge Loss: # # $$L_i=\sum_{j\neq y^i}\max(0, s_j - s_{y^i} + 1)$$ # # Since it is inspired by linear support vector machines, this loss is also called Multi-class SVM Loss. # Now we can average arithmetically the losses $L_i$ for each instance $x^i$ to compute the general loss $L$ of the model. # # $$L=\frac{1}{n}\sum_i L_i(f(x^i, W), y^i)$$ # step by step calculation of the loss Li = 0 yi = ... # ground truth target for j in range(...): if j == yi: print('skipping %d' % j) continue margin = ... print('{:2d} {:6.2f} {:6.2f}'.format(j, scores[j], margin)) Li += ... print(18 * '-') print('hinge loss is {:.1f}'.format(Li)) # Now we understand how the hinge loss works, we can use a more efficient implementation and include it in a reusable function. # # Create a function (using `def`) called `loss_i` that compute the loss for given parameters `W` and `index`. # + # inline calculation of the loss yi = np.squeeze(y_train)[index] Li = np.sum([max(0, scores[j] - scores[yi] + 1) for j in range(10) if j != yi]) print(Li) # create a function to evaluate the loss for the given W for image index in the training set def loss_i(...): yi = ... # ground truth target scores = ... Li = np.sum([max(0, scores[j] - scores[yi] + 1) for j in range(10) if j != yi]) return Li print(loss_i(W, index)) # - # Finally create a function to compute the average loss on a batch of images def loss_batch(W, batch_size=100): L = 0. # average loss for index in range(batch_size): L += ... L /= batch_size return L loss_batch(W, batch_size=50) # ### Softmax loss # # Another very popular loss function to use with multiclassification problems is the multinomial logistic or softmax loss (popular in deep learning). Here the score for each class is passed to the softmax function: exponentiated (and become positive) and normalized. This gives the probability distribution of this class: # # $$P(Y=k|X=x_i)=\frac{e^{s_k}}{\sum_j e^{s_j}}$$ # # Now we have a probability we can try to maximize the likelihood which is equivalent to minimize the negative of the log likelihood: # # $$L_i=-\log P(Y=k|X=x_i)=-\log\left(\frac{e^{s_k}}{\sum_j e^{s_j}}\right)$$ # start by exponentiating our scores to obtain unnormalized probabilities escores = np.exp(scores) norm_escores = escores / np.sum(escores) for j in range(10): print('{:6d} | {:8.1f} | {:6.4f}'.format(j, escores[j], norm_escores[j])) print(26 * '-') # verify that the sum of the probability is 1 print('sum of probabilities check: {:.3f}'.format(np.sum(norm_escores))) # compute the softmax loss Li = -np.log(norm_escores[yi]) print('Softmax loss is {:.2f}'.format(Li)) # ## Learning the model # # Here we use the calculated loss to optimize the parameters $W$ and $b$. For this we need to evaluate the gradient $\dfrac{\partial L}{\partial W}$ of $L$ with respect to $W$. # # The gradient is obtained by differentiating the loss expression with respect to $W$: # # $$\nabla_{w_j}L_i=1\left(w_j^T x_i - w_{y_i}^T x_i + 1 > 0\right) x_i\quad\text{for }j\neq y_i$$ # # $$\nabla_{w_{y_i}}L_i=-\left(\sum_{j\neq y_i}1\left(w_j^T x_i - w_{y_i}^T x_i + 1 > 0\right)\right) x_i$$ # # with $1(condition)$ equals to 1 if $condition$ is true, 0 otherwise. Here we see that the data vector $x$ is scaled by the number of classes that did not meet the margins. # verify one more time the size of our matrices print('shape of train data is {}'.format(X_train.shape)) print('shape of W is {}'.format(W.shape)) # ### Implementation # # Simple SVM loss gradient implementation: # - iterate over each data point $i$ in the batch # - compute the score using $W.x^i$ (bias trick) # - compute the margin for each class # - compute the loss and the gradient components associated with this data point # - finally average the gradient and the loss with respect to the number of data points in the batch def svm_loss_gradient(W, X, y): """ SVM loss gradient. Inputs: - W: array of shape (K, 1 + D) containing the weights. - X: array of shape (N, 1 + D) containing the data. - y: array of shape (N, 1) containing training labels 0 <= k < K. Returns a tuple of: - average loss - gradient of the loss with respect to weights W """ dW = np.zeros_like(W) # initialize the gradient as zero K = ... # number of classes n = ... # number of data points loss = 0.0 for i in range(n): #print('evaluating gradient / image %d' % i) yi = np.squeeze(y)[i] # ground truth target scores = ... # compute SVM loss and gradient for this data point for j in range(K): if j == yi: continue # only compute loss if incorrectly classified margin = ... if margin > 0: loss += margin dW[yi, :] -= ... # correct class gradient dW[j, :] += ... # incorrect class gradient # average the loss and gradient loss /= n dW /= n return loss, dW # Now try our SVM gradient loss by computing the gradient with respect to the first `nb` images in the training set. nb = 100 loss, dW = svm_loss_gradient(...) print('loss is {:.2f}'.format(loss)) print('gradient dW with respect to the first pixel =', dW[:, 2]) # ### Gradient check # # now, to verify our SVM gradietn implementation, we are going to perform a **gradient check**. # # The gradient is computed numerically using a finite difference scheme: # # $$\nabla L\approx\dfrac{L(W+h) - L(W-h)}{2h}$$ def gradient_check(f, W, h=0.0001): dL = np.zeros_like(W) # evaluate the loss modifiying each value of W for c in range(W.shape[0]): for p in range(W.shape[1]): W[c, p] += h fxph = f(W) W[c, p] -= 2*h fxmh = f(W) dL[c, p] = ... # centered finite differences W[c, p] += h # put back initial value return dL # apply our gradient check, print the gradient with respect to the first pixel. Compare with the analytical value. Realize that to evaluate the gradient numerically, the loss function was called $2\times64$ times. This is why it is so slow. And we tested it only with 100 training images over 1437! print('loss is {:.2f}'.format(loss_batch(W, batch_size=100))) dL = gradient_check(loss_batch, W) print(dL.shape) print(dL[:, 2]) # ### Gradient Descent # # now we have successfully created our linear model, loss function, and that we can compute the gradient of the loss with respect to $W$, let's actually use this to perform gradient descent and learn our model. # # The backbone of the gradient descent is this simple equation: # $$W\leftarrow W - \eta \nabla_W L$$ # # $\eta$ is the learning rate (the most important hyperparameter). The weights $W$ are being updated at each iteration until a stop criterion is met or a maximum number of iteration reached. # + # examine one single gradient descent step W = 0.1 * np.random.randn(10, 65) print('average loss is %.1f' % loss_batch(W, batch_size=X_train.shape[0])) loss, dL_dw = svm_loss_gradient(W, X_train, y_train) # perform one gradient descent eta = 0.005 W = W - eta * dL_dw print('after one step the average loss is %.1f' % loss_batch(W, batch_size=X_train.shape[0])) # - # ### Mini-batch gradient descent # # because $n$ is large (1437 here, but can also be much much larger), it does not actually make sense of computing the gradient on the complete set of training images at each iteration (remeber that the gradient is averaged). Instead, it is very common to compute the gradient on a subset (called a mini-batch) of 32 to 256 images. This is much faster and performs well. W = np.random.randn(10, 65) # initialization of the coefficients eta = 0.005 # learning rate (< 1) batch_size = 128 loss_history = [] it = 0 while it < 2000: # prepare batch idxs = np.random.choice(range(X_train.shape[0]), size=batch_size, replace=True) X_batch = X_train[idxs, :] y_batch = y_train[idxs] # evaluate loss and gradient loss, dL_dw = ... print('it {:d} - loss {:.1f}'.format(it, loss)) # gradient descent W = ... loss_history.append(loss) it += 1 plt.plot(loss_history) # Now make some prediction! Try the first 20 entries in the test set. for i in range(20): y_pred = ... print('{} - {}'.format(y_pred, y_test[i])) # Construct the confusion matrix which is usefull to measure the performances of our multinomial classifier. # + from sklearn.metrics import confusion_matrix y_train_pred = ... conf = confusion_matrix(...) # - plt.imshow(conf) plt.xlabel('predicted class') plt.ylabel('actual class') plt.title('confusion matrix') # To better visualize the errors, it is useful to normalize each row by the total number of samples in each category. row_sums = conf.sum(axis=1, keepdims=True) norm_conf = conf / row_sums np.fill_diagonal(norm_conf, 0) plt.imshow(norm_conf) plt.xlabel('predicted class') plt.ylabel('actual class') plt.title('matrix of error rates') # The columns for classes 8 and 9 look worse than the other. Analyzing the type of errors of the model can help improving it. # # Finally we can compare our results with the `SGDClassifier` from `sklearn`. # ## Compare our gradient descent results with sklearn (X_train, X_test, y_train, y_test) = train_test_split(mnist['data'], mnist['target'], test_size=0.2) from sklearn import linear_model clf = linear_model.SGDClassifier(random_state=42) clf.fit(X_train, y_train) y_pred = clf.predict(...) for i in range(20): print('{} - {}'.format(y_pred[i], y_test[i])) # Compute the **accuracy** by dividing the number of correct prediction in the train set by the number os training samples. y_train_pred = clf.predict(...) print(np.sum(...) / ...) # It is better to perform K-fold cross validation to measure the performances of the model. For this we can use the `cross_val_score` method with `cv=3`. from sklearn.model_selection import cross_val_score cross_val_score(clf, X_train, y_train, cv=3, scoring="accuracy") # + from sklearn.model_selection import cross_val_predict y_train_pred = cross_val_predict(clf, X_train, y_train, cv=3) from sklearn.metrics import confusion_matrix conf = confusion_matrix(y_train, y_train_pred) # - plt.imshow(conf) plt.xlabel('predicted class') plt.ylabel('actual class') plt.title('confusion matrix') plt.figure(figsize=(12, 5)) plt.subplot(251); plt.imshow(clf.coef_[0].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(252); plt.imshow(clf.coef_[1].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(253); plt.imshow(clf.coef_[2].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(254); plt.imshow(clf.coef_[3].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(255); plt.imshow(clf.coef_[4].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(256); plt.imshow(clf.coef_[5].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(257); plt.imshow(clf.coef_[6].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(258); plt.imshow(clf.coef_[7].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(259); plt.imshow(clf.coef_[8].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.subplot(2, 5, 10); plt.imshow(clf.coef_[9].reshape((8, 8)), cmap=cm.gray); plt.axis('off') plt.show()
tutorials/mnist_linear_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies # !pip install wget ## Install NeMo BRANCH = 'main' # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] """ Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)! Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case that you want to use the "Run All Cells" (or similar) option. """ # exit() # - import json import torch import os from nemo.collections.asr.metrics.wer import word_error_rate from nemo.collections.asr.parts.utils.vad_utils import stitch_segmented_asr_output, construct_manifest_eval # # Offline ASR+VAD # In this tutorial, we will demonstrate how to use offline VAD to extract speech segments and transcribe the speech segments with CTC models. This will help to exclude some non_speech utterances and could save computation resources by removing unnecessary input to the ASR system. # # The pipeline includes the following steps. # # 0. [Prepare data and script for demonstration](#Prepare-data-and-script-for-demonstration) # 1. [Use offline VAD to extract speech segments](#Use-offline-VAD-to-extract-speech-segments) # 2. [Transcribe speech segments with CTC models](#Transcribe-speech-segments-with-CTC-models) # 3. [Stitch the prediction text of speech segments](#Stitch-the-prediction-text-of-speech-segments) # 4. [Evaluate the performance of offline ASR with VAD ](#Evaluate-the-performance-of-offline-VAD-with-ASR) # ## Prepare data and script for demonstration # # !mkdir -p data # !wget -P data/ https://nemo-public.s3.us-east-2.amazonaws.com/chris-sample01_02.wav # !wget -P data/ https://nemo-public.s3.us-east-2.amazonaws.com/chris-sample03.wav # !wget https://nemo-public.s3.us-east-2.amazonaws.com/chris_demo.json input_manifest="chris_demo.json" vad_out_manifest_filepath="vad_out.json" vad_model="vad_marblenet" # here we use vad_marblenet for example, you can choose other VAD models. # !head -n 10 $input_manifest # + # This cell is mainly for colab. # You can ignore it if run locally but do make sure change the filepaths of scripts and config file in cells below. # !mkdir -p scripts if not os.path.exists("scripts/vad_infer.py"): # !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/speech_classification/vad_infer.py if not os.path.exists("scripts/transcribe_speech.py"): # !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/transcribe_speech.py # !mkdir -p conf/vad if not os.path.exists("conf/vad/vad_inference_postprocessing.yaml"): # !wget -P conf/vad/ https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/conf/vad/vad_inference_postprocessing.yaml # - # ## Use offline VAD to extract speech segments # Here we are using very simple parameters to demonstrate the process. # # Please choose or tune your own postprocessing parameters. # # You can find more details in # ```python # <NeMo_git_root>/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb and # <NeMo_git_root>/scripts/voice_activity_detection/vad_tune_threshold.py # ``` # The <code>vad_infer.py</code> script will help you generate speech segments. See more details in the script below. # if run locally, vad_infer.py is located in <NeMo_git_root>/examples/asr/speech_classification/vad_infer.py # %run -i scripts/vad_infer.py --config-path="../conf/vad" --config-name="vad_inference_postprocessing.yaml" \ # dataset=$input_manifest \ # vad.model_path=$vad_model \ # frame_out_dir="chris_demo" \ # vad.parameters.window_length_in_sec=0.63 \ # vad.parameters.postprocessing.onset=0.5 \ # vad.parameters.postprocessing.offset=0.5 \ # vad.parameters.postprocessing.min_duration_on=0.5 \ # vad.parameters.postprocessing.min_duration_off=0.5 \ # out_manifest_filepath=$vad_out_manifest_filepath # Let's have a look at VAD output. If there are no speech segments in the sample. The sample will not appear in VAD output. # !head -n 10 $vad_out_manifest_filepath # ## Transcribe speech segments with CTC models segmented_output_manifest="asr_segmented_output_manifest.json" asr_model="stt_en_citrinet_1024_gamma_0_25" # here we use citrinet for example, you can choose other CTC models. # The <code>transcribe_speech.py</code> script will help you transcribe each speech segment. See more details in the script below. # if run locally, transcribe_speech.py is located in <NeMo_git_root>/examples/asr/transcribe_speech.py # %run -i scripts/transcribe_speech.py \ # pretrained_name=$asr_model \ # dataset_manifest=$vad_out_manifest_filepath \ # batch_size=32 \ # amp=True \ # output_filename=$segmented_output_manifest # Let's have a look at the segmented ASR transcript. # !head -n 5 $segmented_output_manifest # ## Stitch the prediction text of speech segments # You can also evaluate the whole ASR output by stitching the segmented outputs together. # # Note, there would be a better method to stitch them together. Here, we just demonstrate the simplest method, concatenating. stitched_output_manifest="stitched_asr_output_manifest.json" stitched_output_manifest = stitch_segmented_asr_output(segmented_output_manifest) # Let's have a look at the stitched output and the stored speech segments of the first sample. # + stitched_output = [] for line in open(stitched_output_manifest, 'r', encoding='utf-8'): file = json.loads(line) stitched_output.append(file) print(stitched_output[0]) print(f"\n The speech segments of above file are \n {torch.load(stitched_output[0]['speech_segments_filepath'])}") # - # # Evaluate the performance of offline VAD with ASR # If we have ground-truth <code>'text'</code> in input_manifest, we can evaluate our performance of stitched output. Let's align the <code>'text'</code> in input manifest and <code>'pred_text'</code> in stitched segmented asr output first, since some samples from input_manifest might be pure noise and have been removed in VAD output and excluded for ASR inference. aligned_vad_asr_output_manifest = construct_manifest_eval(input_manifest, stitched_output_manifest) # !head -n 10 $aligned_vad_asr_output_manifest predicted_text, ground_truth_text = [], [] for line in open(aligned_vad_asr_output_manifest, 'r', encoding='utf-8'): sample = json.loads(line) predicted_text.append(sample['pred_text']) ground_truth_text.append(sample['text']) metric_value = word_error_rate(hypotheses=predicted_text, references=ground_truth_text, use_cer=False) print(f"WER is {metric_value}")
tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.0 64-bit # language: python # name: python3 # --- # + # modules we'll use import pandas as pd import numpy as np # read in all our data sf_permits = pd.read_csv("data/Building_Permits.csv") # set seed for reproducibility np.random.seed(0) # - sf_permits.head() # + missing_values_count = sf_permits.isnull().sum() total_cells = np.product(sf_permits.shape) total_missing = missing_values_count.sum() # percent of data that is missing percent_missing = (total_missing/total_cells) * 100 print(percent_missing) # - sf_permits.dropna(axis=0) # + # remove all columns with at least one missing value sf_permits_with_na_dropped = sf_permits.dropna(axis=1) # calculate number of dropped columns cols_in_original_dataset = sf_permits.shape[1] cols_in_na_dropped = sf_permits_with_na_dropped.shape[1] dropped_columns = cols_in_original_dataset - cols_in_na_dropped dropped_columns # - sf_permits.shape # Try replacing all the NaN's in the sf_permits data with the one that comes directly after it and then replacing # any remaining NaN's with 0. Set the result to a new DataFrame sf_permits_with_na_imputed sf_permits_with_na_imputed = sf_permits.fillna(method='bfill', axis=0).fillna(0)
Course_DataCleaning/course1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Educational Attainment Distribution # # An exploration of levels of education among the general population in various countries. # # To download the data, visit the [Wittgenstein Centre Human Capital Data Explorer](http://dataexplorer.wittgensteincentre.org/wcde-v2/). In the Indicator dropdown menu, select Educational Attainment Distribution by Broad Age. Select all countries, all years, age 15+, and the Medium (SSP2) scenario. # # - Date: 2019-01-23 # - Source: [Wittgenstein Centre for Demography and Global Human Capital](http://dataexplorer.wittgensteincentre.org/wcde-v2/) # ## Setup # + import io import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as ticker from src.plot import ds_plot # %config InlineBackend.figure_format = 'retina' # Custom chart style plt.style.use('../assets/datastory.mpltstyle') # - df = pd.read_csv('../data/raw/educational_attainment.csv', skiprows=8) # ## Overview df.sample(3) # Unique levels of education df.Education.value_counts() # Time extent of the data df.Year.min(), df.Year.max() # ## Plotting def reshape_country(df, country, year_max=2050): cols = ['No Education', 'Incomplete Primary', 'Primary', 'Lower Secondary', 'Upper Secondary', 'Post Secondary'] colors = list(reversed(['#2bbaff', '#ffaa8c', '#ff713f', '#5d1800'])) # Transform the data data = (df .query(f'Area == "{country}" & Year <= {year_max}') .drop(['Area', 'Age'], axis=1) .pivot_table(columns='Education', values='Distribution', index='Year') .filter(items=cols) .assign(Primary=lambda x: x['Primary'] + x['Lower Secondary']) .drop(['Lower Secondary', 'Incomplete Primary'], axis=1)) # Convert absolute numbers to percentages data = data.apply(lambda x: x / data.sum(axis=1)) * 100 return data def plot_country(data): """Plot stacked area chart for `country`.""" fig, ax = ds_plot(figsize=(14.4, 5.8)) colors = list(reversed(['#2bbaff', '#ffaa8c', '#ff713f', '#5d1800'])) ax = data.plot(kind='area', stacked=True, color=colors, ax=ax, legend=False) ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0)) ax.set_xlim(1970, 2050) ax.set_ylim(0, 100) ax.set_xlabel('') ax.set_yticks([0, 25, 50, 75, 100], minor=False) ax.grid(axis='y', which='major') ax.axvline(2019, ls='--', lw=3, color='#404041') fd = {'size': 14, 'weight': 500, 'color': '#404041', 'backgroundcolor': '#ff713f'} ax.text(2018, 50, '<NAME>', ha='right', va='center', fontdict=fd) plt.gcf().set_facecolor('white') return ax data = df.pipe(reshape_country, 'China'); data.head() # + ax = plot_country(data) fd = {'size': 18, 'weight': 500, 'color': '#f5f5f5'} # Manually add annotations ax.text(2030, 90, 'Eftergymnasial', fontdict=fd) ax.text(1983, 90, 'Gymnasial', fontdict=fd) ax.text(1975, 60, 'Grundskola', fontdict=fd) ax.text(1972, 15, 'Ingen utbildning', fontdict=fd) fig = plt.gcf() fig.set_facecolor('#ffffff') fig.savefig('../charts/educational-levels.png') # -
notebooks/2019-01-23-educational-levels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Extending your Metadata using DocumentClassifiers at Index Time # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial16_Document_Classifier_at_Index_Time.ipynb) # # With DocumentClassifier it's possible to automatically enrich your documents with categories, sentiments, topics or whatever metadata you like. This metadata could be used for efficient filtering or further processing. Say you have some categories your users typically filter on. If the documents are tagged manually with these categories, you could automate this process by training a model. Or you can leverage the full power and flexibility of zero shot classification. All you need to do is pass your categories to the classifier, no labels required. This tutorial shows how to integrate it in your indexing pipeline. # DocumentClassifier adds the classification result (label and score) to Document's meta property. # Hence, we can use it to classify documents at index time. \ # The result can be accessed at query time: for example by applying a filter for "classification.label". # + [markdown] pycharm={"name": "#%% md\n"} # This tutorial will show you how to integrate a classification model into your preprocessing steps and how you can filter for this additional metadata at query time. In the last section we show how to put it all together and create an indexing pipeline. # + pycharm={"name": "#%%\n"} # Let's start by installing Haystack # Install the latest release of Haystack in your own environment # #! pip install farm-haystack # Install the latest master of Haystack # !pip install grpcio-tools==1.34.1 # !pip install git+https://github.com/deepset-ai/haystack.git # !wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.03.tar.gz # !tar -xvf xpdf-tools-linux-4.03.tar.gz && sudo cp xpdf-tools-linux-4.03/bin64/pdftotext /usr/local/bin # Install pygraphviz # !apt install libgraphviz-dev # !pip install pygraphviz # If you run this notebook on Google Colab, you might need to # restart the runtime after installing haystack. # + pycharm={"name": "#%%\n"} # Here are the imports we need from haystack.document_stores.elasticsearch import ElasticsearchDocumentStore from haystack.nodes import PreProcessor, TransformersDocumentClassifier, FARMReader, ElasticsearchRetriever from haystack.schema import Document from haystack.utils import convert_files_to_dicts, fetch_archive_from_http, print_answers # + pycharm={"name": "#%%\n"} # This fetches some sample files to work with doc_dir = "data/preprocessing_tutorial" s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/preprocessing_tutorial.zip" fetch_archive_from_http(url=s3_url, output_dir=doc_dir) # + [markdown] pycharm={"name": "#%% md\n"} # ## Read and preprocess documents # # + pycharm={"name": "#%%\n"} # note that you can also use the document classifier before applying the PreProcessor, e.g. before splitting your documents all_docs = convert_files_to_dicts(dir_path=doc_dir) preprocessor_sliding_window = PreProcessor( split_overlap=3, split_length=10, split_respect_sentence_boundary=False ) docs_sliding_window = preprocessor_sliding_window.process(all_docs) # - # ## Apply DocumentClassifier # We can enrich the document metadata at index time using any transformers document classifier model. While traditional classification models are trained to predict one of a few "hard-coded" classes and required a dedicated training dataset, zero-shot classification is super flexible and you can easily switch the classes the model should predict on the fly. Just supply them via the labels param. # Here we use a zero shot model that is supposed to classify our documents in 'music', 'natural language processing' and 'history'. Feel free to change them for whatever you like to classify. \ # These classes can later on be accessed at query time. doc_classifier = TransformersDocumentClassifier(model_name_or_path="cross-encoder/nli-distilroberta-base", task="zero-shot-classification", labels=["music", "natural language processing", "history"], batch_size=16 ) # + # we can also use any other transformers model besides zero shot classification # doc_classifier_model = 'bhadresh-savani/distilbert-base-uncased-emotion' # doc_classifier = TransformersDocumentClassifier(model_name_or_path=doc_classifier_model, batch_size=16, use_gpu=-1) # + # we could also specifiy a different field we want to run the classification on # doc_classifier = TransformersDocumentClassifier(model_name_or_path="cross-encoder/nli-distilroberta-base", # task="zero-shot-classification", # labels=["music", "natural language processing", "history"], # batch_size=16, use_gpu=-1, # classification_field="description") # - # convert to Document using a fieldmap for custom content fields the classification should run on docs_to_classify = [Document.from_dict(d) for d in docs_sliding_window] # classify using gpu, batch_size makes sure we do not run out of memory classified_docs = doc_classifier.predict(docs_to_classify) # let's see how it looks: there should be a classification result in the meta entry containing labels and scores. print(classified_docs[0].to_dict()) # ## Indexing # + # In Colab / No Docker environments: Start Elasticsearch from source # ! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q # ! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz # ! chown -R daemon:daemon elasticsearch-7.9.2 import os from subprocess import Popen, PIPE, STDOUT es_server = Popen(['elasticsearch-7.9.2/bin/elasticsearch'], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1) # as daemon ) # wait until ES has started # ! sleep 30 # - # Connect to Elasticsearch document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document") # Now, let's write the docs to our DB. document_store.delete_all_documents() document_store.write_documents(classified_docs) # check if indexed docs contain classification results test_doc = document_store.get_all_documents()[0] print(f'document {test_doc.id} with content \n\n{test_doc.content}\n\nhas label {test_doc.meta["classification"]["label"]}') # ## Querying the data # All we have to do to filter for one of our classes is to set a filter on "classification.label". # Initialize QA-Pipeline from haystack.pipelines import ExtractiveQAPipeline retriever = ElasticsearchRetriever(document_store=document_store) reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True) pipe = ExtractiveQAPipeline(reader, retriever) ## Voilà! Ask a question while filtering for "music"-only documents prediction = pipe.run( query="What is heavy metal?", params={"Retriever": {"top_k": 10, "filters": {"classification.label": ["music"]}}, "Reader": {"top_k": 5}} ) print_answers(prediction, details="high") # ## Wrapping it up in an indexing pipeline from pathlib import Path from haystack.pipelines import Pipeline from haystack.nodes import TextConverter, PreProcessor, FileTypeClassifier, PDFToTextConverter, DocxToTextConverter # + file_type_classifier = FileTypeClassifier() text_converter = TextConverter() pdf_converter = PDFToTextConverter() docx_converter = DocxToTextConverter() indexing_pipeline_with_classification = Pipeline() indexing_pipeline_with_classification.add_node(component=file_type_classifier, name="FileTypeClassifier", inputs=["File"]) indexing_pipeline_with_classification.add_node(component=text_converter, name="TextConverter", inputs=["FileTypeClassifier.output_1"]) indexing_pipeline_with_classification.add_node(component=pdf_converter, name="PdfConverter", inputs=["FileTypeClassifier.output_2"]) indexing_pipeline_with_classification.add_node(component=docx_converter, name="DocxConverter", inputs=["FileTypeClassifier.output_4"]) indexing_pipeline_with_classification.add_node(component=preprocessor_sliding_window, name="Preprocessor", inputs=["TextConverter", "PdfConverter", "DocxConverter"]) indexing_pipeline_with_classification.add_node(component=doc_classifier, name="DocumentClassifier", inputs=["Preprocessor"]) indexing_pipeline_with_classification.add_node(component=document_store, name="DocumentStore", inputs=["DocumentClassifier"]) indexing_pipeline_with_classification.draw("index_time_document_classifier.png") document_store.delete_documents() txt_files = [f for f in Path(doc_dir).iterdir() if f.suffix == '.txt'] pdf_files = [f for f in Path(doc_dir).iterdir() if f.suffix == '.pdf'] docx_files = [f for f in Path(doc_dir).iterdir() if f.suffix == '.docx'] indexing_pipeline_with_classification.run(file_paths=txt_files) indexing_pipeline_with_classification.run(file_paths=pdf_files) indexing_pipeline_with_classification.run(file_paths=docx_files) document_store.get_all_documents()[0] # - # we can store this pipeline and use it from the REST-API indexing_pipeline_with_classification.save_to_yaml("indexing_pipeline_with_classification.yaml") # + [markdown] pycharm={"name": "#%% md\n"} # ## About us # # This [Haystack](https://github.com/deepset-ai/haystack/) notebook was made with love by [deepset](https://deepset.ai/) in Berlin, Germany # # We bring NLP to the industry via open source! # Our focus: Industry specific language models & large scale QA systems. # # Some of our other work: # - [German BERT](https://deepset.ai/german-bert) # - [GermanQuAD and GermanDPR](https://deepset.ai/germanquad) # - [FARM](https://github.com/deepset-ai/FARM) # # Get in touch: # [Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Slack](https://haystack.deepset.ai/community/join) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai) # # By the way: [we're hiring!](https://www.deepset.ai/jobs) #
tutorials/Tutorial16_Document_Classifier_at_Index_Time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K 近邻算法 # # + 如何选择 k 值?一般采用交叉验证 Cross Validation, CV # + 优化:不同的邻居有不同的投票权重 # 获取数据 import pandas as pd df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None) df.tail()
beauty_of_deep_learning/2-k-nearest-neighbor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyESAPI: Data Mining # *By <EMAIL>* import pyesapi import atexit app = pyesapi.CustomScriptExecutable.CreateApplication('python_demo') atexit.register(app.Dispose); # ## Extracting Structure Data with Pandas # * Open a patient # * Use pandas dataframe to display basic structure info # * Extract data with loop over many patients # * Save to CSV file # # [reference](https://pandas.pydata.org/pandas-docs/stable/tutorials.html) app.ClosePatient() # good practice a_patient = app.OpenPatientById('RapidPlan-01') a_plan = a_patient.CoursesLot(0).PlanSetupsLot(0) # + import pandas as pd df = pd.DataFrame( [(s.Id, s.DicomType, s.Volume, s.IsHighResolution) for s in a_plan.StructureSet.Structures], columns = ('StructureId', 'DicomType', 'Volume[cc]', 'IsHighRes') ) df # pretty print HTML table # print(df) # string print # + def d_at_v(plan,structure,volume): _dose=plan.GetDoseAtVolume(structure,volume, pyesapi.VolumePresentation.Relative, pyesapi.DoseValuePresentation.Absolute) return _dose.Dose columns = ( 'PatientId', 'PlanId', 'StructureId', 'DicomType', 'Volume(cc)', 'IsHighRes', 'D95%(Gy)', 'D25%(Gy)', 'D50%(Gy)' ) def get_structure_info(plan): return [( plan.StructureSet.Patient.Id, plan.Id, s.Id, s.DicomType, s.Volume, s.IsHighResolution, d_at_v(plan,s,95), d_at_v(plan,s,25), d_at_v(plan,s,50) ) for s in plan.StructureSet.Structures] df = pd.DataFrame(get_structure_info(a_plan),columns=columns) df # pretty print HTML table # print(df) # string print # - # takes about a minute! patient_id_list= [ 'RapidPlan-01', 'RapidPlan-02', 'RapidPlan-03', 'RapidPlan-04', 'RapidPlan-06', 'Eclipse-01', 'Eclipse-03', 'Eclipse-04', 'Eclipse 06', 'Eclipse 07', 'Eclipse 11' ] dataframe_list = [] for patient_id in patient_id_list: print(f'Loading structure data from {patient_id} plans...\t\t',end='\r') app.ClosePatient() patient = app.OpenPatientById(patient_id) for course in patient.Courses: for plan in course.PlanSetups: if plan.Dose is not None: dataframe_list.append(pd.DataFrame(get_structure_info(plan),columns=columns)) print('Done!'+' '*80) structure_dataframe = pd.concat(dataframe_list,ignore_index=True) structure_dataframe # pretty print # print(structure_dataframe) # string print # if you insist with open('./StructureData.csv','w') as f: f.write(structure_dataframe.to_csv(sep='/')) # ## Pandas + SQLite # * Save data into SQLite database # * Run some quieries # * Plot a histogram of selected data # # [reference](https://www.dataquest.io/blog/python-pandas-databases/) # + import sqlite3 sql_connection = sqlite3.connect("big_data.db") table_name = 'structure_data' structure_dataframe.to_sql(table_name,sql_connection, if_exists="replace") # overwrite table if it exists # - df = pd.read_sql_query(f'select * from {table_name} limit 20;', sql_connection) df # print(df) ptv_df = pd.read_sql_query(f'select * from {table_name} where DicomType=="PTV" limit 10;', sql_connection) ptv_df # print(ptv_df) ptv_df.mean() ptv_df.hist(column='Volume(cc)'); pd.read_sql_query(f'select * from {table_name} where DicomType=="ORGAN";', sql_connection).hist(column='D25%(Gy)'); # ## Saving DVH to HDF5 Using [H5Py](http://www.h5py.org) # *HDF5 lets you store huge amounts of numerical data, and easily manipulate that data from NumPy.* # # Groups = Folders, Datasets = Files # # * Create HDF5 file # * Create group for patient data in H5 file # * Get DVH data for a structure and save as dataset # * Loop over patient's DVH and save data to HDF5 file # * Plot data from HDF5 file import h5py # you can reuse this code block to reset the file try: h5file.close() except NameError: pass finally: h5file = h5py.File("DVH.h5",'w') # truncate if exists app.ClosePatient() patient = app.OpenPatientById('RapidPlan-01') patient_group = h5file.create_group(f'patients/{patient.Id}') for label in h5file: print(label) # works like dictionary for label in h5file['patients']: print(label) def ls(location,depth=0): try: for key,value in location.items(): print(' ' * depth + ('\u21b3 ' if depth else '') + key) # unicode for '↳' ls(value,depth+1) except: pass ls(h5file) courses_group = patient_group.create_group('courses/') ls(h5file) # + for course in patient.Courses: for plan in course.PlanSetups: courses_group.create_group(f'{course.Id}/plans/{plan.Id}') ls(h5file) # - def get_dvh(structure): dvh = plan.GetDVHCumulativeData( structure, pyesapi.DoseValuePresentation.Absolute, pyesapi.VolumePresentation.AbsoluteCm3, .01 ) if dvh is not None: dose_x = [p.DoseValue.Dose for p in dvh.CurveData] volume_y = [p.Volume for p in dvh.CurveData] return dose_x, volume_y else: return None, None for course in patient.Courses: for plan in course.PlanSetups: for structure in plan.StructureSet.Structures: dose,vol = get_dvh(structure) if dose is not None: structure_dvh_group = courses_group.create_group( f'{course.Id}/plans/{plan.Id}/structures/{structure.Id}/dvh' ) structure_dvh_group.create_dataset('dose_gy',data=dose) structure_dvh_group.create_dataset('volume_cm3',data=vol) ls(h5file) # + from matplotlib import pyplot as plt plt.figure(figsize=(10,6)) for structure_name, structure in h5file['/patients/RapidPlan-01/courses/C1/plans/RA Calc/structures'].items(): if structure_name != 'External': plt.plot(structure['dvh/dose_gy'],structure['dvh/volume_cm3'],label=structure_name) plt.xlabel('Dose [Gy]') plt.ylabel('Volume [cc]') plt.legend(loc=0) plt.show() # + plt.figure(figsize=(10,6)) for structure_name, structure in h5file['/patients/RapidPlan-01/courses/C1/plans/RA Calc/structures'].items(): plt.plot(structure['dvh/dose_gy'],structure['dvh/volume_cm3']/structure['dvh/volume_cm3'][0],label=structure_name) plt.xlabel('Dose [Gy]') plt.ylabel('Volume [%]') plt.legend(loc=0) plt.show()
examples/DeveloperWorkshop2018/DataMining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gagan3012/pickuplines/blob/master/Create_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="C4-nUX1nrvdK" outputId="166cdfe0-3634-4b25-be72-004c1210f4aa" # !pip install praw import praw # + id="a1sHp-BvnO2k" reddit = praw.Reddit( client_id="", client_secret="", user_agent="testscript by u/fakebot3", check_for_async=False ) # + colab={"base_uri": "https://localhost:8080/"} id="e928DaBQvbOp" outputId="9505afa3-a9f1-4d58-9de4-4925cf123a46" print(reddit.read_only) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="EBsU_e4UvjD8" outputId="d346921c-d666-4340-af06-1c40e52fbd30" import pandas as pd def get_subreddit_data(sub, lim): posts = [] # list of subreddits SUBREDDIT = reddit.subreddit(sub) # name of subreddit' LIMIT = lim # limit of posts for post in SUBREDDIT.hot(limit=LIMIT): posts.append([post.id, post.subreddit, post.title, post.url, post.selftext]) posts = pd.DataFrame(posts, columns=['id', 'subreddit', 'title', 'url', 'body']) return posts df = get_subreddit_data('pickuplines', 100000) df # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="n4sC_G4EvmpL" outputId="e01bb29e-2417-471c-b1a7-3b04d90b0cf6" df['possible pick-up line'] = df[['title', 'body']].apply(lambda x: ' '.join(x), axis=1) df # + id="PyMRq9rHd3gq" from sklearn.model_selection import train_test_split train, test = train_test_split(df['possible pick-up line'], test_size=0.33, random_state=42) # + colab={"base_uri": "https://localhost:8080/"} id="llh08B3-eKjL" outputId="5d6d86ab-d67f-4744-907a-6fa2e9d0bdbf" train # + id="b_UnW3EeeLmU" train.to_csv(r'train.txt', header=None, index=None, sep=' ', mode='a') test.to_csv(r'test.txt', header=None, index=None, sep=' ', mode='a') # + colab={"base_uri": "https://localhost:8080/"} id="ZUoEFMEuedsY" outputId="78f59da9-3f78-49df-8aaf-0becbd42c347" # !git clone https://github.com/huggingface/transformers.git # + id="eolZE7u1ekoD" import os os.chdir("/content/transformers/examples/pytorch/language-modeling") # + colab={"base_uri": "https://localhost:8080/"} id="f8CGdm6zet-w" outputId="91d8f4e8-07ed-4025-e49a-aa8ddfa9fee3" # !pip install -r requirements.txt # !pip install git+https://github.com/huggingface/transformers # + colab={"base_uri": "https://localhost:8080/"} id="0vRpwDnCiFC2" outputId="4a8616eb-0b8d-41ef-ee5d-66d89df743f7" # !huggingface-cli login # + colab={"base_uri": "https://localhost:8080/"} id="CjdNu36CiR4x" outputId="550ee6c0-a81c-436f-decc-fbbdddc2ee74" # !sudo apt-get install git-lfs # + colab={"base_uri": "https://localhost:8080/"} id="ac4ie0RFex4L" outputId="7f80d7aa-425a-49fd-f1d3-fd2da71de66b" # !python run_clm.py \ # --model_name_or_path distilgpt2 \ # --train_file /content/train.txt \ # --validation_file /content/test.txt \ # --do_train \ # --do_eval \ # --output_dir /content/model \ # --push_to_hub # + id="YJtAORK5e-Fh"
Create_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/yukinaga/minnano_dl/blob/main/section_5/02_loss_function.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="t1HGlYzi4u7a" # # 「誤差」の定義 # 出力と正解の間で「誤差」を定義します。 # 誤差には様々な定義の仕方がありますが、今回は「二乗和誤差」について解説します。 # + [markdown] id="fOqhfIac2eK3" # ## 二乗和誤差 # # ニューラルネットワークには複数の出力と、それぞれに対応した正解があります。 # これらを使い、二乗和誤差は以下の式で定義されます。 # # $$ E = \frac{1}{2} \sum_{k=1}^n(y_k-t_k)^2 $$ # # $y_k$は出力、$t_k$は正解、$n$は出力層のニューロン数を表します。 # $\frac{1}{2}$をかけるのは、微分した形を扱いやすくするためです。 # # ここで、総和を取る前の個々の二乗誤差をグラフに描画します。 # # $$E_k = \frac{1}{2}(y_k-t_k)^2$$ # # 以下のコードにより、`t`の値が0.25、0.5、0.75のとき、`y`の値とともに二乗誤差がどう変化するのかを確認します。 # # # # # + id="aEQhZssn94cy" import numpy as np import matplotlib.pyplot as plt def square_error(y, t): return (y - t)**2/2 # 二乗誤差 y = np.linspace(0, 1) ts = [0.25, 0.5, 0.75] for t in ts: plt.plot(y, square_error(y, t), label="t="+str(t)) plt.legend() plt.xlabel("y") plt.ylabel("Error") plt.show() # + [markdown] id="ZSujlDCbOUJ8" # 入力と正解が等しいときに最小値の0をとり、入力と正解が離れるについて誤差は次第に大きくなっていきます。 # これを全ての出力と正解のペアで総和をとることにより、ある入力に対する誤差の大きさが決まることになります。
section_5/02_loss_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('/Users/laurenthericourt/projets/trading/trading') import warnings warnings.filterwarnings('ignore') import random from datetime import datetime, timedelta from dateutil.rrule import rrule, WEEKLY, FR, SU import pandas as pd import numpy as np pd.set_option('display.max_rows', 500) from jupyter_dash import JupyterDash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output from plotly.subplots import make_subplots import plotly.graph_objects as go from config.load import load_conf from db.utils import get_uri_db from utils.utils import get_candles from indicator.trend import ExponentialMovingAverage from notebooks.compute_indicators import moyenne_mobile_features from utils.utils import compute_sign_changement from notebooks.display_indicators import show_candle, add_indicator from indicator.trend import BollingerBands, Slope from indicator.oscillator import Atr, Obv from indicator.trend import ExponentialMovingAverage, MovingAverage # - # # Get data load_conf('../config/configuration.yaml') dsn = get_uri_db() schema = 'trading' # + start_date = '2019-01-01' end_date = '2021-01-01' candles = get_candles(dsn, schema, start_date, end_date) # + table = 'candle' symbol = 'EUR/USD' candles_to_show = candles[(candles['table'] == table) & (candles['symbol'] == symbol)].reset_index(drop=True) # - # # Find maxima candles_to_show['atr'], _ = Atr(candles_to_show).compute(span=5, avg_type = 'ma') candles_to_show.dropna(inplace=True) # + from scipy.signal import argrelextrema class Extrema(object): def __init__(self, candles, max_, atr_weight, atr_weight2, min_range, min_variance, ratio_pic_size): self.candles = candles.reset_index(drop=True) self.max = max_ self.atr_weight = atr_weight self.atr_weight = atr_weight2 self.min_range = min_range self.min_variance = min_variance self.ratio_pic_size = ratio_pic_size def _filter_by_atr(self, local_max_idx, local_max_values): local_max_idx_to_remove = list() for prev_idx, prev_val, idx, val in zip(local_max_idx[:-1], local_max_values[:-1], local_max_idx[1:], local_max_values[1:]): atr = self.candles.loc[idx, 'atr'] #print(idx) if abs(val - prev_val) < self.atr_weight * atr and val > prev_val: #print(atr) local_max_idx_to_remove.append(prev_idx) elif abs(val - prev_val) < self.atr_weight * atr and val <= prev_val: #print(atr) local_max_idx_to_remove.append(idx) local_max_idx_filtered = [x for x in local_max_idx if x not in local_max_idx_to_remove] return local_max_idx_filtered def _filter_by_variance(self, local_max_idx, local_max_values): local_max_idx_to_remove = list() for prev_idx, prev_val, idx, val in zip(local_max_idx[:-1], local_max_values[:-1], local_max_idx[1:], local_max_values[1:]): candles_tmp = self.candles.loc[prev_idx : idx].copy() max_high = candles_tmp['high'].max() min_low = candles_tmp['low'].min() candles_tmp['range'] = candles_tmp['high'] - candles_tmp['low'] max_range = max_high - min_low mean_range = candles_tmp['range'].mean() first_pic_range = candles_tmp.loc[prev_idx, 'range'] last_pic_range = candles_tmp.loc[idx, 'range'] if self.max and max_range < mean_range * self.min_variance and val > prev_val: local_max_idx_to_remove.append(prev_idx) elif self.max and max_range < mean_range * self.min_variance and val <= prev_val: local_max_idx_to_remove.append(idx) elif not self.max and max_range < mean_range * self.min_variance and val > prev_val: local_max_idx_to_remove.append(idx) elif not self.max and max_range < mean_range * self.min_variance and val <= prev_val: local_max_idx_to_remove.append(prev_idx) #elif (first_pic_range > max_range * self.ratio_pic_size or last_pic_range > max_range * self.ratio_pic_size) and val > prev_val: # local_max_idx_to_remove.append(prev_idx) #elif (first_pic_range > max_range * self.ratio_pic_size or last_pic_range > max_range * self.ratio_pic_size) and val <= prev_val: # local_max_idx_to_remove.append(idx) local_max_idx_filtered = [x for x in local_max_idx if x not in local_max_idx_to_remove] return local_max_idx_filtered def _filter_close_values(self, local_max_idx, local_max_values): local_max_idx_to_remove = list() for prev_idx, prev_val, idx, val in zip(local_max_idx[:-1], local_max_values[:-1], local_max_idx[1:], local_max_values[1:]): if self.max and idx - prev_idx <= self.min_range and val > prev_val: local_max_idx_to_remove.append(prev_idx) elif self.max and idx - prev_idx <= self.min_range and val <= prev_val: local_max_idx_to_remove.append(idx) elif not self.max and idx - prev_idx <= self.min_range and val > prev_val: local_max_idx_to_remove.append(idx) elif not self.max and idx - prev_idx <= self.min_range and val <= prev_val: local_max_idx_to_remove.append(prev_idx) local_max_idx_filtered = [x for x in local_max_idx if x not in local_max_idx_to_remove] return local_max_idx_filtered def _filter_by_pattern(self, local_max_idx, local_max_values): local_max_idx_to_remove = list() for prev_idx, prev_val, idx, val, next_idx, next_val in zip(local_max_idx[:-2], local_max_values[:-2], local_max_idx[1:-1], local_max_values[1:-1], local_max_idx[2:], local_max_values[2:]): atr = self.candles.loc[idx, 'atr'] if self.max and val < prev_val and val < next_val and prev_val - val > self.atr_weight * atr and next_val - val > self.atr_weight * atr: local_max_idx_to_remove.append(idx) elif not self.max and val > prev_val and val > next_val and val - prev_val > self.atr_weight * atr and val - prev_val > self.atr_weight * atr: local_max_idx_to_remove.append(idx) local_max_idx_filtered = [x for x in local_max_idx if x not in local_max_idx_to_remove] return local_max_idx_filtered def _get_all_extrema(self): idx_maxima = list() prev_rows = list() historic_len = 2 for row in self.candles.itertuples(): if int(row.Index) < historic_len: prev_rows.append(row) continue if self.max and prev_rows[-1].high >= row.high and prev_rows[-1].high >= prev_rows[-2].high: idx_maxima.append(int(prev_rows[-1].Index)) elif not self.max and prev_rows[-1].low <= row.low and prev_rows[-1].low <= prev_rows[-2].low: idx_maxima.append(int(prev_rows[-1].Index)) del prev_rows[0] prev_rows.append(row) return idx_maxima def get_max_extrema(self): #local_max_idx = argrelextrema(self.candles['high'].values, np.greater)[0] local_max_idx = self._get_all_extrema() local_max_values = self.candles.loc[local_max_idx, 'high'].values if self.max else self.candles.loc[local_max_idx, 'low'].values local_max_idx_filtered = self._filter_by_variance(local_max_idx, local_max_values) local_max_values_filtered = self.candles.loc[local_max_idx_filtered, 'high'].values local_max_idx_filtered = self._filter_close_values(local_max_idx_filtered, local_max_values_filtered) local_max_values_filtered = self.candles.loc[local_max_idx_filtered, 'high'].values local_max_idx_filtered = self._filter_by_variance(local_max_idx_filtered, local_max_values_filtered) local_max_values_filtered = self.candles.loc[local_max_idx_filtered, 'high'].values local_max_idx_filtered = self._filter_by_pattern(local_max_idx_filtered, local_max_values_filtered) local_max_values_filtered = self.candles.loc[local_max_idx_filtered, 'high'].values res = local_max_idx_filtered return res # - def alternate_max_and_min(candles, idx_maxs, idx_mins): def _choose_between_two_indexes(candles, idx1, idx2, col): idx_to_remove = None if candles.loc[idx1, col] > candles.loc[idx2, col] and col == 'high': idx_to_remove = idx2 elif candles.loc[idx1, col] < candles.loc[idx2, col] and col == 'low': idx_to_remove = idx2 else: idx_to_remove = idx1 return idx_to_remove def _detect_indexes_to_remove_one_way(candles, idxs1, idxs2, begin): col = 'high' if begin == 'max' else 'low' idxs1_to_remove = list() idxs2_for_loop = idxs2.copy() for prev_idx, idx in zip(idxs1[:-1], idxs1[1:]): if idxs2_for_loop and idx < idxs2_for_loop[0]: idxs1_to_remove.append(_choose_between_two_indexes(candles, prev_idx, idx, col)) elif idxs2_for_loop: idxs2_for_loop = [x for x in idxs2_for_loop if x > idx] else: idxs1_to_remove.append(_choose_between_two_indexes(candles, prev_idx, idx, col)) return idxs1_to_remove def _detect_indexes_to_remove_two_ways(candles, idxs1, idxs2, begin): idxs1_to_remove = _detect_indexes_to_remove_one_way(candles, idxs1, idxs2, begin) new_idxs1 = [x for x in idxs1 if x not in idxs1_to_remove] begin = 'max' if begin == 'min' else 'min' idxs2_to_remove = _detect_indexes_to_remove_one_way(candles, idxs2, new_idxs1[1:], begin) new_idxs2 = [x for x in idxs2 if x not in idxs2_to_remove] return new_idxs1, new_idxs2 def _clean_commun_min_and_max_idxs(idxs_max, idxs_min): commun_idxs = set(idxs_max).intersection(set(idxs_min)) idx_mins_to_remove = list() idx_maxs_to_remove = list() for idx in commun_idxs: idx_max_commun = idxs_max.index(idx) idx_min_commun = idxs_min.index(idx) if idx_max_commun > 0 and idx_min_commun > 0 and idxs_max[idx_max_commun - 1] > idxs_min[idx_min_commun - 1]: idx_maxs_to_remove.append(idx) elif idx_max_commun > 0 and idx_min_commun > 0 and idxs_max[idx_max_commun - 1] < idxs_min[idx_min_commun - 1]: idx_mins_to_remove.append(idx) new_idxs_max = [x for x in idxs_max if x not in idx_maxs_to_remove] new_idxs_min = [x for x in idxs_min if x not in idx_mins_to_remove] return new_idxs_max, new_idxs_min if idx_maxs[0] < idx_mins[0]: idx_maxs_new, idx_mins_new = _detect_indexes_to_remove_two_ways(candles, idx_maxs, idx_mins, begin='max') else: idx_mins_new, idx_maxs_new = _detect_indexes_to_remove_two_ways(candles, idx_mins, idx_maxs, begin='min') idx_maxs_new, idx_mins_new = _clean_commun_min_and_max_idxs(idx_maxs_new, idx_mins_new) return idx_maxs_new, idx_mins_new # # Show result # + def detect_trends(candles, high_idxs, low_idxs): bullish_idx = list() bearish_idx = list() if len(high_idxs) < 2 or len(high_idxs) < 2: return bullish_idx, bearish_idx if high_idxs[0] < low_idxs[0]: new_high_idxs = high_idxs if len(high_idxs) <= len(low_idxs) else high_idxs[:-1] new_low_idxs = low_idxs elif high_idxs[0] > low_idxs[0]: new_low_idxs = low_idxs if len(low_idxs) <= len(high_idxs) else low_idxs[:-1] new_high_idxs = high_idxs for prev_high_idx, prev_low_idx, high_idx, low_idx in zip(new_high_idxs[:-1], new_low_idxs[:-1], new_high_idxs[1:], new_low_idxs[1:]): range1 = candles.loc[prev_high_idx, 'high'] - candles.loc[prev_low_idx, 'low'] range2 = candles.loc[high_idx, 'high'] - candles.loc[low_idx, 'low'] if 0.5 <= range1 / range2 <= 2 and candles.loc[prev_high_idx, 'high'] < candles.loc[high_idx, 'high'] and candles.loc[prev_low_idx, 'low'] < candles.loc[low_idx, 'low']: #if candles.loc[prev_high_idx, 'high'] < candles.loc[high_idx, 'high'] and candles.loc[prev_low_idx, 'low'] < candles.loc[low_idx, 'low']: bullish_idx.append(max([prev_high_idx, prev_low_idx, high_idx, low_idx])) elif 0.5 <= range1 / range2 <= 2 and candles.loc[prev_high_idx, 'high'] > candles.loc[high_idx, 'high'] and candles.loc[prev_low_idx, 'low'] > candles.loc[low_idx, 'low']: #elif candles.loc[prev_high_idx, 'high'] > candles.loc[high_idx, 'high'] and candles.loc[prev_low_idx, 'low'] > candles.loc[low_idx, 'low']: bearish_idx.append(max([prev_high_idx, prev_low_idx, high_idx, low_idx])) return bullish_idx, bearish_idx # - def show_random_cases(candles, nb_examples_to_show=30, nb_candles_to_show=50): cd = candles.copy().reset_index(drop=True) idx = cd.index.tolist() #random.seed(21) random.shuffle(idx) idx_to_show = idx[:nb_examples_to_show] for i, idx_candle in enumerate(idx_to_show): candles_to_show = cd.loc[idx_candle - nb_candles_to_show // 2 : idx_candle + nb_candles_to_show // 2].reset_index(drop=True) #idx_maxs = get_max_extrema(candles_to_show, atr_weight=0.2, atr_weight2=1, min_range=10) max_extrema = Extrema(candles_to_show, max_=True, atr_weight=0.2, atr_weight2=0.5, min_range=6, min_variance=2, ratio_pic_size=0.6) idx_maxs = max_extrema.get_max_extrema() min_extrema = Extrema(candles_to_show, max_=False, atr_weight=0.2, atr_weight2=0.5, min_range=6, min_variance=2, ratio_pic_size=0.6) idx_mins = min_extrema.get_max_extrema() idx_maxs, idx_mins = alternate_max_and_min(candles_to_show, idx_maxs, idx_mins) bullish_idx, bearish_idx = detect_trends(candles_to_show, idx_maxs, idx_mins) if i % 2 == 0: fig = make_subplots(rows=1, cols=2) fig.update_layout(xaxis1_rangeslider_visible=False, xaxis2_rangeslider_visible=False) width = 2 color = 'rgba(46, 134, 193, 0.5)' fig.add_trace(go.Candlestick(x=candles_to_show['date'], open=candles_to_show['open'], high=candles_to_show['high'], low=candles_to_show['low'], close=candles_to_show['close'], showlegend=False), row=1, col= i % 2 + 1) fig.add_trace( go.Scatter( mode='markers', x=candles_to_show.loc[idx_maxs, 'date'], y=candles_to_show.loc[idx_maxs, 'high'], marker=dict( color='mediumpurple', size=8, ), showlegend=False, ), row=1, col= i % 2 + 1 ) fig.add_trace( go.Scatter( mode='markers', x=candles_to_show.loc[idx_mins, 'date'], y=candles_to_show.loc[idx_mins, 'low'], marker=dict( color='mediumblue', size=8, ), showlegend=False, ), row=1, col= i % 2 + 1 ) fig.add_trace( go.Scatter( mode='markers', x=candles_to_show.loc[bullish_idx, 'date'], y=candles_to_show.loc[bullish_idx, 'close'], marker=dict( color='green', size=10, symbol='triangle-up', line=dict(width=2, color='black') ), showlegend=False, ), row=1, col= i % 2 + 1 ) fig.add_trace( go.Scatter( mode='markers', x=candles_to_show.loc[bearish_idx, 'date'], y=candles_to_show.loc[bearish_idx, 'close'], marker=dict( color='red', size=10, symbol='triangle-down', line=dict(width=2, color='black') ), showlegend=False, ), row=1, col= i % 2 + 1 ) if i % 2 == 1: fig.show() show_random_cases(candles_to_show, nb_examples_to_show=10, nb_candles_to_show=120)
notebooks/find_local_maxima.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + data = '''household,dorm,appliance,energy_kWh A,tuscany,phone_energy,10 B,sauv,phone_energy,30 C,tuscany,phone_energy,12 D,sauv,phone_energy,20 A,tuscany,laptop_energy,50 B,sauv,laptop_energy,60 C,tuscany,laptop_energy,45 D,sauv,laptop_energy,50 ''' # %matplotlib inline import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from io import StringIO df = pd.read_csv(StringIO(data)) df # - # In this format, it is much easier to use plotting programs to understand the variations in the data. # # # # The seaborn statistical plotting library also is set up to make very useful visualizations from data in tidy format. sns.stripplot(data=df, x='dorm', y='energy_kWh', hue='appliance') sns.stripplot(data=df, x='appliance', y='energy_kWh', hue='dorm')
data-analysis/plotting by facets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="mafJxHKx20Np" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 40} outputId="4e835c33-ace1-4723-cb6b-76b98b88f9f7" # Import files from google drive from google.colab import files uploaded = files.upload() # + id="V22W1WBt22hg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6e0de461-3b61-407c-e363-182417300d87" # Unzip dataset # !unzip Dataset # + id="nF8KlS8C23ou" colab_type="code" colab={} # Import relevant libraries import numpy as np import pandas as pd from tqdm import tqdm import _pickle as cPickle from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn import preprocessing import pickle import sys import os files=os.listdir("Dataset/") folder="Dataset/" # + id="-gSMnoow243-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="7b1c80ab-8a12-4e74-87ae-77d6a5325b02" features=[] labels=[] #loop over all files to get features and labels with tqdm(total=len(files)) as pbar: for f1 in files: name=folder+f1 f = open(name, 'r') x=f.readline() while(x): x=x.split() labels.append(int(x[-1])) del x[-1] sublist=[] for val in x: sublist.append(val) features.append(np.array(sublist)) x=f.readline() pbar.update(1) # Convert to an array features=np.array(features) labels=np.array(labels) # Print shape of features and labels print(features.shape) print(labels.shape) # Print unique labels print(np.unique(labels)) #saving for future use with open("features", 'wb') as fp: pickle.dump(features, fp) with open("labels", 'wb') as fp: pickle.dump(labels, fp) # + id="Zb75hs_23AjY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1e3a9b6c-44fc-4d8d-e76a-6de9a5f129d7" # Import relevant libraries import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm import csv from matplotlib import style import string from collections import Counter import sys import pickle import glob from keras.layers import Dense from keras.layers import LSTM from keras.models import Model from keras.layers import Input, Dense, Flatten, Dropout from keras.optimizers import Adam from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder # + id="toJctyVW3C4v" colab_type="code" colab={} # getting data with open('features', 'rb') as fp: X=pickle.load(fp) with open('labels', 'rb') as fp: y=pickle.load(fp) # + id="QSmGL1wq3Egv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="f5529d52-2717-4434-b591-921afc454ce2" # One Hot Encode oh=OneHotEncoder() y=oh.fit_transform(np.reshape(y,(-1,1))) # + id="6H928Wod3HHH" colab_type="code" colab={} # Train / Test X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2, random_state=42, shuffle=True) X_train=np.expand_dims(X_train,axis=2) X_test=np.expand_dims(X_test,axis=2) # + id="Z7n_zAgV26Uu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 465} outputId="852892e7-16c9-4204-e9e5-8a12a703a33f" # LSTM network input = Input(shape=(23,1)) cnn=LSTM(8,return_sequences=True)(input) cnn=LSTM(16)(cnn) d=Dense(128, activation='relu')(cnn) d=Dropout(0.4)(d) d=Dense(256, activation='relu')(d) d=Dropout(0.4)(d) d=Dense(512, activation='relu')(d) # Dense layer that has a softmax activation function output = Dense(13, activation='softmax')(d) model = Model(inputs=input, outputs=output) # defining learning rate and optimizer adam=Adam(lr=0.0001) # Compile the model model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) history=model.fit(X_train,y_train,batch_size=32, epochs=4,validation_data=(X_test,y_test)) model.save('lstm.h5') with open("OH", 'wb') as fp: pickle.dump(oh, fp) # plotting stats of model print(history.history.keys()) # "Loss" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('LSTM loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig("LSTM-loss.png") with open("X_test-LSTM", 'wb') as fp: pickle.dump(X_test, fp) with open("y_test_LSTM", 'wb') as fp: pickle.dump(y_test, fp) # + id="AcoK_PXMUagB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="305c20e4-19c2-433d-df44-b1ce18d9a9a5" # print models layers print(model.layers) # + id="IPZ9t7foUbLX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2ec0cc09-9f58-40c0-d48d-704cb0ad5697" # print models input layer print(model.input) # + id="5aMkygrWUcfH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eda54068-fb29-401e-b974-d952b0ab4ee9" # print models output layer print(model.outputs) # + id="XtuzME7TUd0X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="0c1bd21f-2dca-4313-b7b4-00d98d75b850" # summary of the model print(model.summary()) # + id="_v2MsFDYfZzl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="ff62ef96-1832-4cf3-e69e-ba141c4366e4" # "Accuracy" plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('LSTM accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.savefig("LSTM-acc.png") # + id="KuDlrjRI36K7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 458} outputId="a6a6fbe1-a570-428a-857a-53dfe16e456f" import pickle import numpy as np import seaborn as sn import pandas as pd import matplotlib.pyplot as plt from keras.models import load_model from sklearn.metrics import confusion_matrix # getting data with open('X_test-LSTM', 'rb') as fp: X_test=pickle.load(fp) with open('y_test_LSTM', 'rb') as fp: y_test=pickle.load(fp) # load model whose conf matrix you want model=load_model('lstm.h5') # make predictions y_pred=model.predict(X_test) # 12 total classes labs=set() labs.add(0) labs.add(1) labs.add(2) labs.add(3) labs.add(4) labs.add(5) labs.add(6) labs.add(7) labs.add(8) labs.add(9) labs.add(10) labs.add(11) labs.add(12) preds=[] new_test=[] # converting one hot prediction and real label to single integer value for i,p in enumerate(y_pred): preds.append(np.argmax(p)) new_test.append(np.argmax(y_test[i])) y_pred=preds y_test=new_test # making conf matrix array=confusion_matrix(y_test, y_pred) df_cm = pd.DataFrame(array, index = [i for i in list(labs)], columns = [i for i in list(labs)]) plt.figure(figsize = (10,7)) sn.heatmap(df_cm, annot=True,fmt='g') # saving matrix plt.title("Confusion Matrix- LSTM") plt.xlabel("Actual") plt.ylabel("Predicted") plt.savefig("CM-lstm.png") # + id="Lx789CXTg9kv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="95627cad-4e76-4d1b-9c42-fc203d11878c" from sklearn.metrics import classification_report # Classification report classes = ["Null class", "Standing still", "Sitting and relaxing","Lying down", "Walking","Climbing stairs", "Waist bends forward","Frontal elevation of arms", "Knees bending (crouching)","Cycling", "Jogging","Running", "Jump front & back"] print(classification_report(y_test, y_pred, target_names=classes)) # + id="2hoiRD_K8p1a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="77361e07-6510-4277-d95d-e8e7d2977180" # Results: Precision, Recall, f1 score, Accuracy from numpy import argmax from sklearn import metrics from sklearn.metrics import accuracy_score print("LSTM - Accuracy:{}%".format(100*accuracy_score(y_test, y_pred))) print("") print("LSTM - Precision: {}%".format(100*metrics.precision_score(y_test, y_pred, average="weighted"))) print("LSTM - Recall: {}%".format(100*metrics.recall_score(y_test, y_pred, average="weighted"))) print("LSTM - f1_score: {}%".format(100*metrics.f1_score(y_test, y_pred, average="weighted"))) print("") print("Confusion Matrix:") confusion_matrix = metrics.confusion_matrix(y_test, y_pred) print(confusion_matrix) # + id="wgeppapQGmIe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 836} outputId="2e1d3058-15dd-4848-e0af-7b316d4d8ae6" # The following feature importance code is taken from this link. # Referenced in thesis # https://www.oipapio.com/question-3935333 # Import libraries from mlxtend.plotting import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np confusion_matrix = metrics.confusion_matrix(y_test, y_pred) # Convert cm to array for multiclass cm normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100 multiclass = normalised_confusion_matrix # labels (classes) classes = ["Null class", "Standing still", "Sitting and relaxing","Lying down", "Walking","Climbing stairs", "Waist bends forward","Frontal elevation of arms", "Knees bending (crouching)","Cycling", "Jogging","Running", "Jump front & back"] # plot confusion matrix fig, ax = plot_confusion_matrix(conf_mat=multiclass, colorbar=True, show_absolute=False, show_normed=True, figsize = (12,12)) # plot title, xaxis and yaxis, identify tickmarks, ensure tight layout plt.title('Confusion matrix: LSTM \n(normalised to % of total test data') plt.xlabel("Actual") plt.ylabel("Predicted") tick_marks = np.arange(13) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) plt.tight_layout() plt.show() # + id="ZRdO3VznGm-a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 639} outputId="03946478-4ee2-46bd-d308-f64b890b480c" # Fix accuracy # Bar chart feature importance from sklearn.metrics import confusion_matrix as cm import pandas as pd # confusion matrix confusion_matrix=cm(y_test, y_pred) # Create list containing labels for dataframe below list1 = ["Null class","Standing still", "Sitting and relaxing","Lying down", "Walking","Climbing stairs", "Waist bends forward","Frontal elevation of arms", "Knees bending (crouching)","Cycling", "Jogging","Running", "Jump front & back"] list2 = ["Null class","Standing still", "Sitting and relaxing","Lying down", "Walking","Climbing stairs", "Waist bends forward","Frontal elevation of arms", "Knees bending (crouching)","Cycling", "Jogging","Running", "Jump front & back"] # convert to pandas dataframe pd.DataFrame(confusion_matrix, list1,list2) df = pd.DataFrame(confusion_matrix, list1,list2) # Hard code in accuracy df['Accuracy'] = ['89%','63%','97%','100%','0%','0%','53%','70%','38%','42%','47%','68%','0.09%'] df # + id="O9t-UNdIWdJc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="719af87c-66a8-4fc7-fa78-20ac3f173100" # The following feature importance code is taken from this link. # https://www.oipapio.com/question-3935333 # import libraries from xgboost import plot_importance,XGBClassifier from sklearn.model_selection import train_test_split from sklearn import model_selection # import libraries import numpy as np from xgboost import XGBClassifier # Set number of estimators xgb = XGBClassifier(n_estimators=100) # feature importance train test split X_train,X_test,y_train,y_test=model_selection.train_test_split(features,labels,test_size=0.2) # fit the model, set evaluation set and early stopping rounds xgb = xgb.fit(X_train,y_train,early_stopping_rounds=5,eval_set=[(X_test,y_test)]) # + id="OQY3-mOnWfFV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 720} outputId="a615200e-2ccc-43cc-a2d3-e43d07c6babb" # import library import pandas as pd # fit model xgb_model=xgb.fit(X_train,y_train) # convert to pandas dataframe. assign 'feature' and 'importance columns' xgb_fea_imp=pd.DataFrame(list(xgb_model.get_booster().get_fscore().items()), columns=['feature','importance']).sort_values('importance', ascending=False) # print feature importance for each attribute print('',xgb_fea_imp) # plot importance from xgboost import plot_importance plot_importance(xgb_model, )
Long_Short-Term_Memory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.6 64-bit (''base'': conda)' # language: python # name: python396jvsc74a57bd0c0fd1e98e332dfb716c28a5d3d3d6414e8453d2a88c952b74f113bdd5a18a941 # --- # # Single snapshot spatial-association of range-doppler observations from a linear sensor array # __Single snapshot imaging__ provides timely situational awareness, but can utilize neither platform motion, as in synthetic aperture radar, nor track targets across time, as in Kalman filtering and its variants. Associating measurements with targets becomes a fundamental bottleneck in this setting. # # This notebook illustrates a computationally efficient method for spatial association of single set of unordered range-doppler measurements from a network of radar sensors. # # The 2D position and velocity of multiple targets are estimated using a linear array of FMCW radar sensors by identifying and exploiting inherent geometric features to drastically reduce the # complexity of spatial association. The algorithm is robust to detection anomalies at sensors and achieves order of magnitude lower complexity compared to conventional methods. # # More details can be found in our paper: # # --- # <NAME>, <NAME> and <NAME>, "[Multi-Sensor Spatial Association Using Joint Range-Doppler Features](https://ieeexplore.ieee.org/abstract/document/9568701)," in IEEE Transactions on Signal Processing, vol. 69, pp. 5756-5769, 2021, doi: 10.1109/TSP.2021.3119418. # # --- # + import numpy as np import matplotlib.pyplot as plt from time import perf_counter # Custom libs import GAutils.objects as ob import GAutils.config as cfg # Sim parameters import GAutils.proc_est as pr import GAutils.simulate_snapshot2 as sim2 import GAutils.perf_eval as prfe import GAutils.PCRLB as pcrlb import GAutils.ml_est as mle import GAutils.gradient_methods as gm import GAutils.graph_primitives as grpr import GAutils.est_algo as ea # - # ### Simulation params # + assoc_algo = 'SAGA' #Proposed 'SAGA', Brute force 'SAESL' rob = 1 # Robustness level, rho estalgo = 2 #0:DFT (conventional), 1:oversampled DFT, 2:NOMP (proposed super-resolution algo) Nob = 20 # Number of point targets in the scene Nsens = 4 # Number of sensors swidth = 2 # Width of sensor array in m snr = -10 # SNR of FMCW radar sensor beat signal pmiss = 0.1 # Miss probability at each sensor osps = [3,3] # Oversampling factor for DFT est algo, >2 is sufficient # Generate linear array of sensors along x axis sx = np.linspace(-swidth/2, swidth/2,Nsens) sensors = [ob.Sensor(x,0) for x in sx] seed = np.random.randint(1000, size=1) scene = pr.init_random_scene(Nob, sensors, cfg.sep_th, seed) cfgp = {'Nsel': [],# Genie info on # targets 'rd_wt':cfg.rd_wt, 'static_snapshot': cfg.static_snapshot, 'sep_th':cfg.sep_th, 'pmiss':pmiss, 'estalgo':estalgo, 'osps':cfg.osps, 'n_Rc':cfg.n_Rc, 'n_pfa':cfg.n_pfa, # Association 'rob':rob, 'mode': cfg.mode, 'hscale':cfg.hscale, 'incr':cfg.incr, 'hN': cfg.hN, 'ag_pfa':cfg.ag_pfa, 'al_pfa':cfg.al_pfa, 'Tlen':cfg.Tlen, # Gauss Newton 'gn_steps':cfg.gn_steps, 'fu_alg':cfg.fu_alg } # - # ### Generate Range, doppler data for linear arrays of sensors # + tf_list = np.array([sensor.mcs.tf for sensor in sensors]) # FMCW radar pulse time equal for all sensors tfa_list = np.array([sensor.mcs.get_tfa() for sensor in sensors]) # Adjust so that samples vary to keep frame time const. beat = np.zeros(tfa_list.shape, dtype='complex128') #Intialize FMCW sensor beat signal dt = 0 # time between frames = 0:Static, tf_list[0]:moving signal_mag =1 # Normalized signal amplitude for sensor in sensors: sensor.meas_std = 10 **(-snr/20)*signal_mag gardas = [ob.gardEst() for sensor in enumerate(sensors)] targets_list = [] #Genie range, doppler, signal amplitude obs for tno, target in enumerate(scene): [next_target, AbsPos] = pr.ProcDyms(target, dt, tfa_list)# abs position over obs time for sensorID, sensor in enumerate(sensors): random_number = np.random.rand() if random_number>pmiss: #Miss target otherwise pure_beat = pr.get_beat(sensor, target, AbsPos[sensorID]) beat[sensorID, :, :] += pure_beat # Add beat signal for each target garda = pr.get_gard_true(sensor, target) gardas[sensorID].r=np.append(gardas[sensorID].r,garda.r) gardas[sensorID].d=np.append(gardas[sensorID].d,garda.d) gardas[sensorID].g=np.append(gardas[sensorID].g,garda.g) # Add noise to sensor beat signal np.random.seed(seed) # To randomize over parallel runs for sensorID, sensor in enumerate(sensors): beat[sensorID, :, :] = pr.add_cnoise(beat[sensorID, :, :], sensor.meas_std) # print('Target{}: x={},y={},vx={},vy={}'.format(tno, target_current.x, target_current.y,target_current.vx, # - # ## Radar processing: # Estimate Range-doppler from FMCW beat signal using estalgo={0,1,2} # * 0. DFT # * 1. Oversampled DFT # * 2. NOMP, Super-resolution algorithm # + runtime = np.zeros(3) t=perf_counter() if estalgo == 0: garda_sel = ea.meth2(np.copy(beat), sensors, cfgp['Nsel'], [1,1]) elif estalgo == 1: garda_sel = ea.meth2(np.copy(beat), sensors, cfgp['Nsel'], cfgp['osps'], cfgp['n_pfa']) elif estalgo == 2: garda_sel = ea.nomp(np.copy(beat), sensors, cfgp['Nsel'], cfgp['osps'], cfgp['n_Rc'], cfgp['n_pfa']) runtime[0] = perf_counter() - t rd_error = prfe.compute_rd_error(garda_sel, gardas) rde_pack = prfe.compute_rde_targetwise(garda_sel, gardas, sensors) # - # ### Perform spatial association across sensors and estimate position, velocity #%% Graph Algo t=perf_counter() G1, Total_edges = grpr.make_graph(garda_sel, sensors, 0) if assoc_algo=='MCF': min_gsigs, glen, assoc_time = mcft.get_mcfsigs(garda_sel, sensors, cfgp) elif assoc_algo=='mcf_all': min_gsigs, glen, assoc_time = mcft.get_mcfsigs_all(garda_sel, sensors, cfgp) elif assoc_algo=='SAESL': min_gsigs, glen, assoc_time = mle.iterative_prune_pht(garda_sel, sensors, cfgp, sum(len(g.r) for g in garda_sel)//2) elif assoc_algo=='SAGA': min_gsigs, glen, assoc_time = grpr.get_minpaths(G1, sensors, cfgp['mode'], cfgp) else: print('Invalid association algorithm') runtime[1] = perf_counter() - t # Total time (Make graph + graph association) Pruned_edges = sum([len(nd.lkf) for g in G1 for nd in g]) # No of edges, get V from glen # Refine position, velocity estimate t = perf_counter() for sig in min_gsigs: [dob, nlls_var] = gm.gauss_newton(sig, sensors, sig.state_end.mean , cfgp['gn_steps'], cfgp['rd_wt'])#lm_refine, gauss_newton, huber sig.state_end.mean = dob runtime[2] = perf_counter() - t # Time to Refine gr_centers = [] for gtr in min_gsigs: dob = gtr.state_end.mean gr_centers.append(ob.PointTarget(dob[0], dob[1], dob[2], dob[3])) # + jupyter={"source_hidden": true} tags=[] #Plot likelihood maps # [xgrid, ygrid, llr_map] = mle.create_llrmap([-9,9,180], [1,13,110], [-5,5,2], [-5,5,2], sensors, garda_sel) # Position # cmap = plt.get_cmap('PiYG') # plt.figure(3) # im1 = plt.pcolormesh(xgrid, ygrid, (llr_map), cmap=cmap) # plt.colorbar(im1) # pr.plot_scene(plt, scene, sensors, 3, 'Likelihood Map (Brute Force, Only using r)') #%% Compute error measures # ospa_error1, pv_error = prfe.compute_ospa(scene, gr_centers, sensors, gardas) # [cr,cd, rList, dList]=pcrlb.get_FIMrv(sensors, scene) # crb_conv = pcrlb.CRBconverter() # [_,_,_,_,crbp, crbv] = crb_conv.get_CRBposvel_from_rd(cr, cd, sensors, scene) # print('Range-Doppler error'),print(rd_error) # print('Position-Velocity error'),print(pv_error) # - # ## Plot results from single snapshot simulation print ('{} detected {} of {} targets in {}s with sensor observations:{}.'.format(cfg.mode, len(min_gsigs),Nob, round(sum(runtime),2),[len(garda.r) for garda in garda_sel])) print('=== Association complexity (graph edges evaluated) ===') print('Brute Force search (All edges) \t=\t{}'.format(Total_edges)) print('Geometric constraint pruning \t=\t{}'.format(assoc_time[0])) print('{} association algorithm \t=\t{}'.format(assoc_algo, assoc_time[1])) plt.figure(13) for gtr in min_gsigs: dob = gtr.state_end.mean plt.quiver(dob[0], dob[1], dob[2], dob[3],color='r', headwidth = 4) pr.plot_scene(plt, scene, sensors, 13, '{} detected {} of {} targets SNR = {} dB'.format(assoc_algo,len(min_gsigs), Nob, round(snr)))
demo/association.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PoomGamerE/Random-Group-With-Python/blob/main/RandomGroup_With_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="pxF7aoabD35I" outputId="ab99fa38-81d6-470b-9220-bc18960565a9" import random ''' สำคัญ! นี่คือโค้ดที่ค่อนข้างสมบูรณ์ของการสุ่มกลุ่ม โปรดอย่าเผลอลบเพื่อกันความผิดพลาดที่จะเกิดขึ้นด้วยครับ Update: Verion นี้มีการยัดคนเกินเข้ากลุ่มแล้ว และ ระบบเช็คคนเกินด้วยหารเอาเศษ แทนระบบ เช็คค่าไม่เท่ากับ 0''' #ส่วนนำเข้า numOfstudent = int(input("จำนวนนักเรียนทั้งห้องมีจำนวน: ")) NumOfGroup = int(input("จำนวนกลุ่มมีจำนวน: ")) numOfstudentPerGroup = numOfstudent / NumOfGroup print("จำนวนนักเรียนต่อกลุ่มมีจำนวน:", int(numOfstudentPerGroup)) #ตัวแปรเก็บเลขที่ studentnumber = [] for x in range(numOfstudent): studentnumber.append(x + 1) #ตัวแปรของระบบสุ่ม AllGroup = [] #ใส่ทั้งหมด แต่มีแถวตอน Group = [] #ตะกร้าใส่เลขที่ NumGroupOfRandom = 0 #ระบบสุ่ม for p in range(int(NumOfGroup)): for s in range(int(numOfstudentPerGroup)): RandomN = int(random.choice(studentnumber)) #สุ่มค่าจากตัวแปร Group.append(RandomN) #เก็บข้อมูลไปยังกลุ่ม studentnumber.remove(RandomN) #ลบตัวเลขออกจากตัวแปร NumGroupOfRandom = NumGroupOfRandom + 1 AllGroup.append(Group.copy()) Group.clear() #Check จำนวนคนเกิน Limit = True CheckMax = numOfstudent % NumOfGroup if CheckMax == 0: Limit = False #ระบบสุ่ม (กรณีที่มีคนเกิน) if Limit == True: OverStudent = numOfstudent % NumOfGroup #จำนวนคนที่เกิน print("จำนวนคนเกินที่จะถูกบังคับเข้ากลุ่ม:", OverStudent) for Count in range(int(OverStudent)): RandomN = int(random.choice(studentnumber)) #สุ่มค่าจากตัวแปร AllGroup[Count].append(RandomN) #เก็บข้อมูลไปยังกลุ่ม studentnumber.remove(RandomN) #ลบตัวเลขออกจากตัวแปร #แสดงคนที่อยู่ในแต่ละกลุ่ม for y in range(int(NumOfGroup)): print("กลุ่ม ", y + 1, "(", len(AllGroup[y]), "คน) ได้แก่เลขที่:", AllGroup[y]) #เช็คข้อมูลที่เก็บไว้แต่ละกลุ่ม Output SearchG = int(input("ต้องการเช็คกลุ่มไหน: ")) print("กลุ่ม ", SearchG, "(", len(AllGroup[SearchG - 1]), "คน) ได้แก่เลขที่:", AllGroup[SearchG - 1])
RandomGroup_With_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### prolegomena # + # --+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8 # %matplotlib inline # I need the time axis labels in italian, # it's usually better to do this at first from locale import setlocale, LC_TIME ; setlocale(LC_TIME, 'it_IT.utf8') from numpy import array # Astronomical computations will be done using ephem import ephem as x # Not every important city is included in ephem city database from ephem.cities import _city_data, city _city_data.update({ 'Monza':('45.577721','9.300896',160.0), 'Cabiate':('45.679350','9.165230',255.0), 'Meda':('45.661400','9.155734',223.0), }) # We'll need to fiddle a bit with the labeling of the time axis from matplotlib.ticker import MultipleLocator from matplotlib.dates import MonthLocator, DateFormatter # The preferred format for inline plots, alternatives are 'png' and 'svg' # %config InlineBackend.figure_format = 'png' # The time unit is a day, here we define day, hour, etc d, h, m, s = 1.0, 1.0/24, 1.0/24/60, 1.0/24/60/60 # - # # Moonlight # # ### Initialization # # We need an observer, or better two observers, the second one will be used behind the scenes to do some # computation. my_city = 'Cabiate' me = city(my_city) me2 = city(my_city) # Finally, we need a moon. moon = x.Moon() # ### "Vertical" velocity of the moon # # For a given date, i.e., moment in time, # # 1. i compute the position of the moon in the sky of the observer (me2) a little before and a little later, # 2. i extract the height of the moon (moon.alt) in the sky of the observer in these two instants and # 3. i approximate the vertical velocity with the usual finite differences formula. def velocity(date): me2.date=date-0.1*s moon.compute(me2) ; alt0 = moon.alt me2.date=date+0.1*s moon.compute(me2) ; alt1 = moon.alt return alt1/0.2/s - alt0/0.2/s # ### Day-by-day maximum heigth of the moon # # Given that the maximum heigth is reached when the moon is close to transit, after a bit of initialization, # # 1. the position of the moon is computed at the beginning of the day # 2. the transit time is extracted # 3. test if the moon transits during the day # 1. bracketing the transit time, use the default solver to find the time for which the vertical velocity is zero, # 2. save in a container the couple of values time, altitude of the moon (nb, the plotting routines expect that time is in a particular format, hence the `.datetime()` call) # 4. the date of the observer is incremented by one day (`me.date = me.date + d`), # 5. if the new date is in 2014 `break` otherwise repeat the cycle. # # The container (a Python `list`) is converted to an `array`, so that we can index it with a `[i,j]` notation. # + me.date = "2013/01/01 00:00:01" next_year = x.date("2014/01/01 00:00:01") container = [] while 1: moon.compute(me) mtt = moon.transit_time if mtt: time = x.Date(x.newton(velocity,mtt-0.2*h,mtt+0.2*h)) container.append( (time.datetime(), 180*moon.alt/x.pi)) me.date = me.date + d if me.date>next_year: break container=array(container) # - # ### The phases of the moon # # This is ad-hoc for 2013, because i know that the first phase is a last quarter, anyway... the dates of new, 1st q., full and last q. moons are stored in a list, together with an integer 0,..,3 denoting the phase. At the end, the list of moons is converted to an array. # + date = x.Date('2013/01/01') ; moons = [] while 1: date = x.next_last_quarter_moon(date) if date>next_year: break moons.append((date.datetime(),3)) date = x.next_new_moon(date) if date>next_year: break moons.append((date.datetime(),0)) date = x.next_first_quarter_moon(date) if date>next_year: break moons.append((date.datetime(),1)) date = x.next_full_moon(date) if date>next_year: break moons.append((date.datetime(),2)) moons = array(moons) # - # We want to superimpose the envelope curve with different symbols for each phase of the moon. For now # + i = 0 ; delendo = [] for m in moons: tm, phase = m while container[i,0]<tm: i = i+1 t0, alt0 = container[i-1,:]; t0 = x.Date(t0) t1, alt1 = container[ i ,:]; t1 = x.Date(t1) altm = alt0 + (x.Date(tm)-t0)*(alt1-alt0)/(t1-t0) delendo.append((tm,altm,phase)) moons = array(delendo) ; del delendo # - # When applying a test on some elements of an array, we obtain an array of boolean values # (i.e., true vs false) of the same length of the tested sequence. new = moons[:,2] == 0 full = moons[:,2] == 2 qrtr = moons[:,2] %2 == 1 # The important fact is that we can use these boolean arrays to index an array... e.g., # we print the `new` array and then we use it to print the first 3 new moons. print(new) print(moons[new][:3]) # ## Plotting # # ### Prepare the axis on which to plot # # - We want to plot 13 cycles of an almost periodic function, it's better to have an # elongated x-axis. # - The margins around the graph are by default a fixed ratio of the figure size and the # horizontal margins are hence too large, fix this. # - The range of ordinates is ~ 20 to 65 degrees so limits on the y axis would be # too tight, so we specify a range 0-90 degrees and, while at it, we give a precise # range to the x axis. # - By default, we have a label every two months, we want a label for every month and # the year in every label. # - By defalt, no minor ticks... we want a minor tick for every day. # # Show our current results. from matplotlib.pyplot import figure, close f = figure(figsize=(10,2.5)) ax = f.add_subplot(111) close() # + # size of the figure in inches #figsize(10,2.5) # in this figure there is one x-y graph #subplot(111) # save the current figure and the current x-y graph #f = gcf() ; ax = gca() ; close() # the margins of the x-y are fractions of the figure, # having increased the figsize we narrow the margins f.subplots_adjust(left=0.06, right=0.96, bottom=0.08, top=0.92) # adjust the limits of the axes ax.axis(ymin=0, ymax=90, xmin=x.Date("2013-01-01").datetime(), xmax=x.Date("2014/01/01").datetime()) ax.xaxis.set_minor_locator(MultipleLocator(1)) ax.xaxis.set_major_locator(MonthLocator(bymonth=range(13))) ax.xaxis.set_major_formatter(DateFormatter("%b %Y")) ax.set_axisbelow(True) ax.grid(c='#B0B0B0', linestyle='-', linewidth=0.1) show_all = True if show_all: f.show() # - # ### Enhancing the axis # # - The minor ticks on x are too large. # - The x labels almost run one into the other, choose a smaller font and, while # we are at it, change also the size of the y labels. # - I prefer y labels in vertical. # + ax.tick_params(axis='x', which='minor', length=4, width=0.2, color='#909090') [tik.set_size('xx-small') for tik in ax.get_xticklabels()] [tik.set_size('xx-small') for tik in ax.get_yticklabels()] [tik.set_rotation(90.0) for tik in ax.get_yticklabels()] if show_all: f.show() # - # ### Titles for the x-y graph and the individual axes # # I feel no need for a specific label for the whole x-axis, month names are good enough # to infere the meaning of the abscissae. # + # the title and a label for the ordinates ax.set_title( 'La Luna vista da %s (lat = %s, lon = %s, altezza = %5.1f mslm).'% (me.name,x.degrees(me.lat),x.degrees(me.lon), me.elevation), size='small') ax.set_ylabel('Inviluppo dell\'altezza lunare, in gradi', size='x-small') if show_all: f.show() # - # ### Plot the envelope of the moon altitude over the horizon # # We have a function that is defined only in a discrete set of points and we plot it as a continuous function with a very light color, then we superimpose small black dots in the position where we have found a local maximum. # + # plot the envelope of moon altitudes two times, first with a continuous line # & later with the smallest of the available dot typess ax.plot(container[:,0],container[:,1],'-',color='lightgray', linewidth=0.3) ax.plot(container[:,0],container[:,1],',',color="black") if show_all: f.show() # - # ### Plot the New Moons, etc # # First i plot differently colored circles, with a slight transparency so that # the underlying envelope curve is still visible and later i superimpose in the center of # each circle a small black dot to help the eye to exactly position each moon. # + # plot the postion of a) new moons, b) full moons and c) both first quarter and last quarter # of the moon # first time, with circles of different colors ax.plot(moons[new, 0],moons[new, 1],'o',markersize=5, color='#202090', alpha=0.75) ax.plot(moons[full,0],moons[full,1],'o',markersize=5, color="#ffff00", alpha=0.75) ax.plot(moons[qrtr,0],moons[qrtr,1],'o',markersize=5, color="lightgray", alpha=0.75) # second time, with small black dots ax.plot(moons[:,0],moons[:,1],'.k',markersize=2) if show_all: f.show() # - # #### What's up with these random circles? # # I know that the small circles represent the approximate maximum altitude of the moon when # the moon enters a new phase and i think this is almost evident, but an annotation is easy to place. # + xpos = x.Date('2013/07/1').datetime() ax.annotate('''I cerchi sul grafico indicano la posizione delle lune piene (cerchi gialli), delle lune nuove (cerchi blu) e dei quarti di luna (cerchi grigi).''', (xpos,12), size='x-small', ha='center', va='center', bbox=dict(boxstyle="round,pad=0.5",fc='white',ec='lightgray')) if show_all: f.show() # - # #### A gratuitous comment... # # It's in Italian, means that these two full moons are very high in the sky and, # consequently, we will have a sort of _"white nights"_ in the 3 days atound the full # moon date. # + ax.annotate('',(moons[-2,0],moons[-2,1]), xytext=(10./12.,0.85), textcoords='axes fraction', ha='right', arrowprops=dict(width=0.1,headwidth=4.0,shrink=0.15,color='gray')) ax.annotate(u'''Le due ultime lune piene di autunno saliranno molto alte nel cielo e le tre notti della luna piena saranno molto luminose.''', (moons[-6,0],moons[-6,1]), xytext=(10./12.,0.85), textcoords='axes fraction', ha='right', name='sans', size='xx-small', color='#404090', arrowprops=dict(width=0.1,headwidth=4.0,shrink=0.25,color='gray',), bbox={"boxstyle":"round,pad=0.5","fc":"#feffff","alpha":0.9,'ec':'#b0b0ff'}) f.show() # - # ### This fine plot deserves to be saved # # And it's just a single line of code! f.savefig('Moon_at_%s.pdf'%(me.name,))
nb/Moonlight2013.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.8 64-bit ('fmaEnv') # metadata: # interpreter: # hash: 3068ee8ee4c6209545bb32c3ad999d45863c7ccdc6e6adea820113d363fe4d7a # name: python3 # --- # # Preprocessing # # By: <NAME> # Date: February 10th, 2021 # # Note that for baseline-models, pretrained models will set the bar pretty high for our models. import os import librosa import utils import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC #from sklearn.gaussian_process import GaussianProcessClassifier #from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.neural_network import MLPClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.multiclass import OneVsRestClassifier # ### First, make sure your notebook is up to date. In addition, uninstall tornado and downgrade it to version 5.1.1 if you can't connect to the notebook using Python 3.6. Else, proceed. # # https://stackoverflow.com/questions/54955563/jupyter-notebook-cannot-connect-to-kernel base = "./data/fma_metadata" tracks = utils.load(os.path.join(base, "tracks.csv")) raw_tracks = utils.load(os.path.join(base, "raw_tracks")) tracks.head() raw_tracks.head() utils.LibrosaLoader
PlayAround/preprocess.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="9Uc0NS5r_la2" #import libraries import glob from keras.models import Sequential, load_model import numpy as np import pandas as pd from keras.layers import Dense from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler import matplotlib.pyplot as plt import keras as k from sklearn.metrics import accuracy_score # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="dh8pXittGsqK" outputId="699fbfd3-61a7-4035-9746-54a56bd438c0" #Load the data df = pd.read_csv('kidney_disease.csv') #Print the first 5 rows df.head() # + colab={"base_uri": "https://localhost:8080/"} id="8RzTlJXgK0eO" outputId="048ee74b-1c48-44cc-fc9b-3b7518ea60ff" #Get the shape of the data (the number f rows & cols) df.shape # + id="0GxnS8GWN9CF" #Create a list of coumn names to keep columns_to_retain = ['sg', 'al', 'sc', 'hemo', 'pcv', 'wbcc', 'rbcc', 'htn', 'classification'] #Drop the coluns that are not in columns_to_retain df = df.drop( [col for col in df.columns if not col in columns_to_retain] , axis=1 ) #Drop the rows with na or missing values df = df.dropna(axis=0) # + colab={"base_uri": "https://localhost:8080/"} id="cWT_by6EPPG2" outputId="4981ebce-5540-4cd9-b451-2b309890c57b" #Transform the non-numeric data in the columns for column in df.columns: if df[column].dtype == np.number: continue df[column] = LabelEncoder().fit_transform( df[column] ) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="OvlbgwoMQDmk" outputId="60f70e01-fd47-466f-abc8-8df990413c2c" #Print the first 5 rows of the new cleaned data set df.head() # + id="W2_8qu3WRB4z" # + id="l0ySC8_ijgaO" df.replace({"yes":1,"no":0},inplace=True) df.replace({'ckd':0,"notckd":1},inplace=True) # + id="D93CKvKrkEf7" outputId="3f5c752b-f8d1-41fb-db63-3d87a3300492" colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="bH1l9O--ICv4" #Split the data into independent (X) data set (the features) and dependent (y) data set (the target) X = df.drop(['classification'], axis=1) y = df['classification'] # + id="Rr-H_Ro3R8RT" #Feature Scaling #min-max scaler method scales the data set so that all the input features lie between 0 and 1 x_scaler = MinMaxScaler() x_scaler.fit(X) column_names = X.columns X[column_names] = x_scaler.transform(X) # + id="hw_gpeGVImNo" x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,shuffle=True) # + id="KPlQcflaI39_" outputId="0bf711d6-c035-42ec-e8b2-93aa28c3e655" colab={"base_uri": "https://localhost:8080/"} from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(random_state=10) model.fit(x_train,y_train) # + id="snBm41QCI-e4" outputId="90b5b6ed-8cf0-4c97-ca31-54f56b7d5601" colab={"base_uri": "https://localhost:8080/"} y_pred=model.predict(x_test) print(accuracy_score(y_test, y_pred)*100) # + id="1-3cI5QrJCow" outputId="1ebaa6ee-1e13-41e1-bd0b-e28afaad6e92" colab={"base_uri": "https://localhost:8080/"} model.predict([["1.020","1.0","1.2","15.4","44","1"]]) # + id="VcHlcyRCKtqy" import pickle file=open("kidney.pkl","wb") pickle.dump(model,file)
ipynb files/kidney_ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import gym import numpy as np import matplotlib as plt env = gym.make('Acrobot-v1') env.reset() # + for _ in range(1000): env.render() env.step(env.action_space.sample()) env.close() # - # # Action and State # # Reinforcement Learning will learn a mapping of states to the optimal action to perform in that state by exploration, i.e. the agent explores the environment and takes actions based off rewards defined in the environment. [source](https://www.learndatasci.com/tutorials/reinforcement-q-learning-scratch-python-openai-gym/) # # - **Action**, input provided by the agent to the environment # Here, left, nothing or right represented as +1, 0 or -1) # # # - **State**, numeric representation of what the agent is observing at a particular time in the environment # Here, the state consists of the sin() and cos() of the two rotational joint angles and the joint angular velocities : [cos(theta1) sin(theta1) cos(theta2) sin(theta2) thetaDot1 thetaDot2]. For the first link, an angle of 0 corresponds to the link pointing downwards. The angle of the second link is relative to the angle of the first link. An angle of 0 corresponds to having the same angle between the two links. A state of [1, 0, 1, 0, ..., ...] means that both links point downwards. # + env.reset() # reset environment to a new, random state env.render() env.close() print("Action Space (number of input possibility by agent user) {}".format(env.action_space)) print("State Space (encoding of the curent state to be mapped) {}".format(env.observation_space)) # - # # Q-learning # # ![image.png](attachment:image.png) # # To define the maximum expected cumulative award for given pair with hyperparameters : # - learning rate # - discount factor # # The Q learning equation maps state-action pairs to a maximum with combination of immediate reward plus future rewards i.e. for new states learned value is reward plus future estimate of rewards. # # Adapt the Qlearning function # # ![image.png](attachment:image.png) # # **from [moutain_car exemple](https://gist.github.com/gkhayes/3d154e0505e31d6367be22ed3da2e955) # # # Determine size of discretized state space num_states = (env.observation_space.high - env.observation_space.low) * np.array([1, 1, 1, 1, 1, 1]) #multiplication du state incrementé num_states = np.round(num_states, 0).astype(int) + 1 num_states # Initialize Q table Q = np.random.uniform(low = -1, high = 1, size = (num_states[0], num_states[1], env.action_space.n)) Q # Initialize variables to track rewards reward_list = [] ave_reward_list = [] # # Hyperparameters (1/2) : # # # We define **epsilon**, the exploration rate of different possibilities (set to 1 at the beginning). # Then randomly, if **epsilon** is less than this random number, we will explore the possible path. # Start = big **epsilon** # Progressively = reduce the **epsilon** as the agent estimates the Q-values more precisely (the lowest the epsilon, the more chances to select the best option (overfit)) # # **min_eps** : # # **episodes** : # + # Initialize epsilon at 1 epsilon = 0.2 # the lowest the epsilon, the more chances to select the best option (overfit) - the lower the more chances to choose the next action at random (here 20% of random choice) min_eps = 0.05 episodes = 5000 # episodes : will reduce the impact of epsilon every run (handles the progress) # Calculate episodic reduction in epsilon reduction = (epsilon - min_eps) / episodes # - # # 3 basic steps of Qlearning : # # 1. Agent starts in a state (s1) takes an action (a1) and receives a reward (r1) # 2. Agent selects action by referencing Q-table with highest value (max) OR by random (epsilon, ε) # 3. Update q-values # # # Hyperparameters (2/2) : # # **learning**: lr or learning rate (alpha in the equation, α), can simply be defined as how much you accept the new value vs the old value. Above we are taking the difference between new and old and then multiplying that value by the learning rate. This value then gets added to our previous q-value which essentially moves it in the direction of our latest update. # # **discount**: (gamma in the equation, γ) The discount factor is used to balance immediate and future reward. We apply the discount to the future reward upon update. Typically this value can range anywhere from 0.8 to 0.99. # + learning= 0.2 # learning rate discount = 0.9 # discount rate # Run Q learning algorithm for i in range(episodes): # Initialize parameters done = False tot_reward, reward = 0,0 state = env.reset() # Discretize state state_adj = (state - env.observation_space.low) * np.ones((6,)) state_adj = np.round(state_adj, 0).astype(int) while done != True: # Render environment for last 5 episodes if i >= (episodes - 5): env.render() # Determine next action - epsilon greedy strategy if np.random.random() < 1 - epsilon: # if random inferior to 1-epsilon (epsilon has to be between 0.0001-0.999) action = np.argmax(Q[state_adj[0], state_adj[1]]) else: action = np.random.randint(0, env.action_space.n) # Get next state and reward state2, reward, done, info = env.step(action) # Discretize state2 state2_adj = (state2 - env.observation_space.low) * np.ones((6,)) state2_adj = np.round(state2_adj, 0).astype(int) #Allow for terminal states if done and state2[0] >= 0.5: Q[state_adj[0], state_adj[1], action] = reward # Adjust Q value for current state else: delta = learning*(reward + discount*np.max(Q[state2_adj[0],state2_adj[1]]) - Q[state_adj[0], state_adj[1],action]) Q[state_adj[0], state_adj[1],action] += delta # Update variables tot_reward += reward state_adj = state2_adj # Decay epsilon ==== reduce the epsilon as the agent estimates the Q-values more precisely if epsilon > min_eps: epsilon -= reduction # Track rewards reward_list.append(tot_reward) if (i+1) % 100 == 0: # every 100 episodes, get the averaged reward printed on the list ave_reward = np.mean(reward_list) ave_reward_list.append(ave_reward) reward_list = [] if (i+1) % 100 == 0: print('Episode {} Average Reward: {}'.format(i+1, ave_reward)) env.close() ave_reward_list # - reward_list
games_gymOpenAI/acrobot/.ipynb_checkpoints/Gym-acrobot-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0b92bc6a-001" colab_type="text" # #1. Install Dependencies # First install the libraries needed to execute recipes, this only needs to be done once, then click play. # # + id="0b92bc6a-002" colab_type="code" # !pip install git+https://github.com/google/starthinker # + [markdown] id="0b92bc6a-003" colab_type="text" # #2. Get Cloud Project ID # To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. # # + id="0b92bc6a-004" colab_type="code" CLOUD_PROJECT = 'PASTE PROJECT ID HERE' print("Cloud Project Set To: %s" % CLOUD_PROJECT) # + [markdown] id="0b92bc6a-005" colab_type="text" # #3. Get Client Credentials # To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. # # + id="0b92bc6a-006" colab_type="code" CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE' print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS) # + [markdown] id="0b92bc6a-007" colab_type="text" # #4. Enter CM360 Report To BigQuery Parameters # Move existing CM report into a BigQuery table. # 1. Specify an account id. # 1. Specify either report name or report id to move a report. # 1. The most recent valid file will overwrite the table. # 1. Schema is pulled from the official CM specification. # Modify the values below for your use case, can be done multiple times, then click play. # # + id="0b92bc6a-008" colab_type="code" FIELDS = { 'auth_read': 'user', # Credentials used for reading data. 'auth_write': 'service', # Credentials used for writing data. 'account': '', # CM network id. 'report_id': '', # CM report id, empty if using name . 'report_name': '', # CM report name, empty if using id instead. 'dataset': '', # Dataset to be written to in BigQuery. 'table': '', # Table to be written to in BigQuery. 'is_incremental_load': False, # Clear data in destination table during this report's time period, then append report data to existing table. } print("Parameters Set To: %s" % FIELDS) # + [markdown] id="0b92bc6a-009" colab_type="text" # #5. Execute CM360 Report To BigQuery # This does NOT need to be modified unless you are changing the recipe, click play. # # + id="0b92bc6a-010" colab_type="code" from starthinker.util.configuration import Configuration from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields USER_CREDENTIALS = '/content/user.json' TASKS = [ { 'dcm': { 'auth': 'user', 'report': { 'account': {'field': {'name': 'account','kind': 'integer','order': 2,'default': '','description': 'CM network id.'}}, 'report_id': {'field': {'name': 'report_id','kind': 'integer','order': 3,'default': '','description': 'CM report id, empty if using name .'}}, 'name': {'field': {'name': 'report_name','kind': 'string','order': 4,'default': '','description': 'CM report name, empty if using id instead.'}} }, 'out': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 5,'default': '','description': 'Dataset to be written to in BigQuery.'}}, 'table': {'field': {'name': 'table','kind': 'string','order': 6,'default': '','description': 'Table to be written to in BigQuery.'}}, 'header': True, 'is_incremental_load': {'field': {'name': 'is_incremental_load','kind': 'boolean','order': 7,'default': False,'description': "Clear data in destination table during this report's time period, then append report data to existing table."}} } } } } ] json_set_fields(TASKS, FIELDS) execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
colabs/dcm_to_bigquery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome to the first SimpleITK Notebook demo: # # SimpleITK Image Basics # # This document will give a brief orientation to the SimpleITK Image class. # # First we import the SimpleITK Python module. By convention our module is imported into the shorter and more pythonic "sitk" local name. # import matplotlib.pyplot as plt # %matplotlib inline import SimpleITK as sitk # ## Image Construction # # There are a variety of ways to create an image. All images' initial value is well defined as zero. image = sitk.Image(256, 128, 64, sitk.sitkInt16) image_2D = sitk.Image(64, 64, sitk.sitkFloat32) image_2D = sitk.Image([32,32], sitk.sitkUInt32) image_RGB = sitk.Image([128,128], sitk.sitkVectorUInt8, 3) # ### Pixel Types # # The pixel type is represented as an enumerated type. The following is a table of the enumerated list. # # <table> # <tr><td>sitkUInt8</td><td>Unsigned 8 bit integer</td></tr> # <tr><td>sitkInt8</td><td>Signed 8 bit integer</td></tr> # <tr><td>sitkUInt16</td><td>Unsigned 16 bit integer</td></tr> # <tr><td>sitkInt16</td><td>Signed 16 bit integer</td></tr> # <tr><td>sitkUInt32</td><td>Unsigned 32 bit integer</td></tr> # <tr><td>sitkInt32</td><td>Signed 32 bit integer</td></tr> # <tr><td>sitkUInt64</td><td>Unsigned 64 bit integer</td></tr> # <tr><td>sitkInt64</td><td>Signed 64 bit integer</td></tr> # <tr><td>sitkFloat32</td><td>32 bit float</td></tr> # <tr><td>sitkFloat64</td><td>64 bit float</td></tr> # <tr><td>sitkComplexFloat32</td><td>complex number of 32 bit float</td></tr> # <tr><td>sitkComplexFloat64</td><td>complex number of 64 bit float</td></tr> # <tr><td>sitkVectorUInt8</td><td>Multi-component of unsigned 8 bit integer</td></tr> # <tr><td>sitkVectorInt8</td><td>Multi-component of signed 8 bit integer</td></tr> # <tr><td>sitkVectorUInt16</td><td>Multi-component of unsigned 16 bit integer</td></tr> # <tr><td>sitkVectorInt16</td><td>Multi-component of signed 16 bit integer</td></tr> # <tr><td>sitkVectorUInt32</td><td>Multi-component of unsigned 32 bit integer</td></tr> # <tr><td>sitkVectorInt32</td><td>Multi-component of signed 32 bit integer</td></tr> # <tr><td>sitkVectorUInt64</td><td>Multi-component of unsigned 64 bit integer</td></tr> # <tr><td>sitkVectorInt64</td><td>Multi-component of signed 64 bit integer</td></tr> # <tr><td>sitkVectorFloat32</td><td>Multi-component of 32 bit float</td></tr> # <tr><td>sitkVectorFloat64</td><td>Multi-component of 64 bit float</td></tr> # <tr><td>sitkLabelUInt8</td><td>RLE label of unsigned 8 bit integers</td></tr> # <tr><td>sitkLabelUInt16</td><td>RLE label of unsigned 16 bit integers</td></tr> # <tr><td>sitkLabelUInt32</td><td>RLE label of unsigned 32 bit integers</td></tr> # <tr><td>sitkLabelUInt64</td><td>RLE label of unsigned 64 bit integers</td></tr> # </table> # # There is also `sitkUnknown`, which is used for undefined or erroneous pixel ID's. It has a value of -1. # # The 64-bit integer types are not available on all distributions. When not available the value is `sitkUnknown`. # # ### More Information about the Image class be obtained in the Docstring # # # SimpleITK classes and functions have the Docstrings derived from the C++ definitions and the Doxygen documentation. help(image) # ## Accessing Attributes # # If you are familiar with ITK, then these methods will follow your expectations: print image.GetSize() print image.GetOrigin() print image.GetSpacing() print image.GetDirection() print image.GetNumberOfComponentsPerPixel() # Note: The starting index of a SimpleITK Image is always 0. If the output of an ITK filter has non-zero starting index, then the index will be set to 0, and the origin adjusted accordingly. # # The size of the image's dimensions have explicit accessors: print image.GetWidth() print image.GetHeight() print image.GetDepth() # Since the dimension and pixel type of a SimpleITK image is determined at run-time accessors are needed. print image.GetDimension() print image.GetPixelIDValue() print image.GetPixelIDTypeAsString() # What is the depth of a 2D image? print image_2D.GetSize() print image_2D.GetDepth() # What is the dimension and size of a Vector image? print image_RGB.GetDimension() print image_RGB.GetSize() print image_RGB.GetNumberOfComponentsPerPixel() # For certain file types such as DICOM, additional information about the image is contained in the meta-data dictionary. for key in image.GetMetaDataKeys(): print "\"{0}\":\"{1}\"".format(key, image.GetMetaData(key)) # ## Accessing Pixels # # There are the member functions ``GetPixel`` and ``SetPixel`` which provides an ITK-like interface for pixel access. help(image.GetPixel) print image.GetPixel(0, 0, 0) image.SetPixel(0, 0, 0, 1) print image.GetPixel(0, 0, 0) print image[0,0,0] image[0,0,0] = 10 print image[0,0,0] # ## Conversion between numpy and SimpleITK nda = sitk.GetArrayFromImage(image) print nda help(sitk.GetArrayFromImage) nda = sitk.GetArrayFromImage(image_RGB) img = sitk.GetImageFromArray(nda) img.GetSize() help(sitk.GetImageFromArray) img = sitk.GetImageFromArray(nda, isVector=True) print img # ## The order of index and dimensions need careful attention during conversion # # ITK's Image class does not have a bracket operator. It has a GetPixel which takes an ITK Index object as an argument, which is an array ordered as ``(x,y,z)``. This is the convention that SimpleITK's Image class uses for the GetPixel method as well. # # While in numpy, an array is indexed in the opposite order ``(z,y,x)``. print img.GetSize() print nda.shape print nda.shape[::-1] # # ### Are we still dealing with Image, because I haven't seen one yet... # # While SimpleITK does not do visualization, it does contain a built in ``Show`` method. This function writes the image out to disk and than launches a program for visualization. By default it is configured to use ImageJ, because it is readily supports all the image types which SimpleITK has and load very quickly. However, it's easily customizable by setting environment variables. sitk.Show(image) # + # sitk.Show? # - # By converting into a numpy array, matplotlob can be used for visualization for integration into the scientifc python environment. # %matplotlib inline import matplotlib.pyplot as plt z = 0 slice = sitk.GetArrayFromImage(image)[z,:,:] plt.imshow(slice)
01_Image_Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <style>div.container { width: 100% }</style> # <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="assets/PyViz_logo_wm_line.png" /> # <div style="float:right; vertical-align:text-bottom;"><h2>Tutorial 09. Operations and Pipelines</h2></div> # When interactively exploring a dataset you often end up interleaving visualization and analysis code. In HoloViews your visualization and your data are one and the same, so analysis and data transformations can be applied directly to the visualizable data. For that purpose HoloViews provides operations, which can be used to implement any analysis or data transformation you might want to do. Operations take a HoloViews Element and return another Element of either the same type or a new type, depending on the operation. We'll illustrate operations and pipelines using a variety of libraries: # # <div style="margin: 10px"> # <a href="http://holoviews.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/holoviews.png"/></a> # <a href="http://bokeh.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/bokeh.png"/></a> # <a href="http://datashader.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/datashader.png"/></a> # <a href="http://ioam.github.io/param"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/param.png"/></a><br><br> # <a href="http://pandas.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:140px" src="./assets/pandas.png"/></a> # <a href="http://matplotlib.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/matplotlib_wm.png"/></a> # <a href="http://numpy.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="./assets/numpy.png"/></a> # </div> # # Since Operations know about HoloViews you can apply them to large collections of data collected in HoloMap and DynamicMap containers. Since operations work on both of these containers that means they can also be applied lazily. This feature allows us to chain multiple operations in a data analysis, processing, and visualization pipeline, e.g. to drive the operation of a dashboard. # # Pipelines built using DynamicMap and HoloViews operations are also useful for caching intermediate results and just-in-time computations, because they lazily (re)compute just the part of the pipeline that has changed. # + import time import param import numpy as np import holoviews as hv from holoviews.operation.timeseries import rolling, rolling_outlier_std from holoviews.operation.datashader import datashade, dynspread hv.extension('bokeh') # - # # Declare some data # In this example we'll work with a timeseries that stands in for stock-price data. We'll define a small function to generate a random, noisy timeseries, then define a ``DynamicMap`` that will generate a timeseries for each stock symbol: # + def time_series(T=1, N=100, mu=0.1, sigma=0.1, S0=20): """Parameterized noisy time series""" dt = float(T)/N t = np.linspace(0, T, N) W = np.random.standard_normal(size = N) W = np.cumsum(W)*np.sqrt(dt) # standard brownian motion X = (mu-0.5*sigma**2)*t + sigma*W S = S0*np.exp(X) # geometric brownian motion return S def load_symbol(symbol, **kwargs): return hv.Curve(time_series(N=10000), kdims=[('time', 'Time')], vdims=[('adj_close', 'Adjusted Close')]) stock_symbols = ['AAPL', 'FB', 'IBM', 'GOOG', 'MSFT'] dmap = hv.DynamicMap(load_symbol, kdims=['Symbol']).redim.values(Symbol=stock_symbols) # - # We will start by visualizing this data as-is: # %opts Curve [width=600] {+framewise} dmap # ## Applying an operation # Now let's start applying some operations to this data. HoloViews ships with two ready-to-use timeseries operations: the ``rolling`` operation, which applies a function over a rolling window, and a ``rolling_outlier_std`` operation that computes outlier points in a timeseries. Specifically, ``rolling_outlier_std`` excludes points less than one sigma (standard deviation) away from the rolling mean, which is just one example; you can trivially write your own operations that do whatever you like. # %opts Scatter (color='indianred') smoothed = rolling(dmap, rolling_window=30) outliers = rolling_outlier_std(dmap, rolling_window=30) smoothed * outliers # As you can see, the operations transform the ``Curve`` element into a smoothed version and a set of ``Scatter`` points containing the outliers both with a ``rolling_window`` of 30. Since we applied the operation to a ``DynamicMap``, the operation is lazy and only computes the result when it is requested. # + # Exercise: Apply the rolling and rolling_outlier_std operations changing the rolling_window and sigma parameters # - # ## Linking operations to streams # Instead of supplying the parameter values for each operation explicitly as a scalar value, we can also define a ``Stream`` that will let us update our visualization dynamically. By supplying a ``Stream`` with a ``rolling_window`` parameter to both operations, we can now generate our own events on the stream and watch our visualization update each time. # + rolling_stream = hv.streams.Stream.define('rolling', rolling_window=5) stream = rolling_stream() rolled_dmap = rolling(dmap, streams=[stream]) outlier_dmap = rolling_outlier_std(dmap, streams=[stream]) rolled_dmap * outlier_dmap # - for i in range(20, 200, 20): time.sleep(0.2) stream.event(rolling_window=i) # + # Exercise: Create a stream to control the sigma value and add it to the outlier operation, # then vary the sigma value and observe the effect # - # ## Defining operations # # Defining custom Operations is also very straightforward. For instance, let's define an ``Operation`` to compute the residual between two overlaid ``Curve`` Elements. All we need to do is subclass from the ``Operation`` baseclass and define a ``_process`` method, which takes the ``Element`` or ``Overlay`` as input and returns a new ``Element``. The residual operation can then be used to subtract the y-values of the second Curve from those of the first Curve. # + from holoviews.operation import Operation class residual(Operation): """ Subtracts two curves from one another. """ label = param.String(default='Residual', doc=""" Defines the label of the returned Element.""") def _process(self, element, key=None): # Get first and second Element in overlay el1, el2 = element.get(0), element.get(1) # Get x-values and y-values of curves xvals = el1.dimension_values(0) yvals1 = el1.dimension_values(1) yvals2 = el2.dimension_values(1) # Return new Element with subtracted y-values # and new label return el1.clone((xvals, yvals1-yvals2), vdims=[self.p.label]) # - # To see what that looks like in action let's try it out by comparing the smoothed and original Curve. residual_dmap = residual(rolled_dmap * dmap) residual_dmap # Since the stream we created is linked to one of the inputs of ``residual_dmap``, changing the stream values triggers updates both in the plot above and in our new residual plot. for i in range(20, 200, 20): time.sleep(0.2) stream.event(rolling_window=i) # ## Chaining operations # # Of course, since operations simply transform an Element in some way, operations can easily be chained. As a simple example, we will take the ``rolled_dmap`` and apply the ``datashading`` and ``dynspread`` operation to it to construct a datashaded version of the plot. As you'll be able to see, this concise specification defines a complex analysis pipeline that gets reapplied whenever you change the Symbol or interact with the plot -- whenever the data needs to be updated. # %%opts RGB [width=600 height=400] {+framewise} overlay = dynspread(datashade(rolled_dmap)) * outlier_dmap (overlay + residual_dmap).cols(1) # ## Visualizing the pipeline # To understand what is going on we will write a small utility that traverses the output we just displayed above and visualizes each processing step leading up to it. # + # %%opts RGB Curve [width=250 height=200] def traverse(obj, key, items=None): items = [] if items is None else items for inp in obj.callback.inputs[:1]: label = inp.callback.operation.name if isinstance(inp.callback, hv.core.OperationCallable) else 'price' if inp.last: items.append(inp[key].relabel(label)) if isinstance(inp, hv.DynamicMap): traverse(inp, key, items) return list(hv.core.util.unique_iterator(items))[:-1] hv.Layout(traverse(overlay, 'AAPL')).cols(4) # - # Reading from right to left, the original price timeseries is first smoothed with a rolling window, then datashaded, then each pixel is spread to cover a larger area. As you can see, arbitrarily many standard or custom operations can be defined to capture even very complex workflows so that they can be replayed dynamically as needed interactively.
notebooks/09_Operations_and_Pipelines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mapboxgl Python Library for location data visualizaiton # # https://github.com/mapbox/mapboxgl-jupyter # # ### Requirements # # These examples require the installation of the following python modules # # ``` # pip install mapboxgl # pip install pandas # ``` import pandas as pd import os from mapboxgl.utils import * from mapboxgl.viz import * # Load data from sample csv data_url = 'https://raw.githubusercontent.com/mapbox/mapboxgl-jupyter/master/examples/points.csv' df = pd.read_csv(data_url).round(3) df.head(5) df.head(5) # ## Set your Mapbox access token. # Set a `MAPBOX_ACCESS_TOKEN` environment variable or copy/paste your token # If you do not have a Mapbox access token, sign up for an account at https://www.mapbox.com/ # If you already have an account, you can grab your token at https://www.mapbox.com/account/ # Must be a public token, starting with `pk` token = '<KEY>' print(token) # ## Create a visualization from a Pandas dataframe # Create a geojson file export from the current dataframe test = df_to_geojson(df, filename='points1.geojson', properties=['Avg Medicare Payments', 'Avg Covered Charges', 'date'], lat='lat', lon='lon', precision=3) # + # Generate data breaks using numpy quantiles and color stops from colorBrewer measure = 'Avg Medicare Payments' #color_breaks = [round(df[measure].quantile(q=x*0.1), 2) for x in range(1,9)] #color_stops = create_color_stops(color_breaks, colors='YlGnBu') color_breaks = [0,10,100,1000,10000] color_stops = create_color_stops(color_breaks, colors='YlGnBu') # Create the viz from the dataframe viz = CircleViz(test, access_token=token, height='300px', center = (-95, 40), zoom = 3, ) # - # ## Add labels to the viz # + # CircleViz?? # - viz.label_property = "Avg Medicare Payments" viz.show() # ## Change viz data property and color scale # + # Generate a new data domain breaks and a new color palette from colorBrewer2 measure = 'Avg Covered Charges' color_breaks = [round(df[measure].quantile(q=x*0.1), 1) for x in range(1,9)] color_stops = create_color_stops(color_breaks, colors='YlOrRd') # Show the viz viz.color_property='Avg Covered Charges' viz.color_stops=color_stops viz.show() # - # ### Change the viz map style viz.style_url='mapbox://styles/mapbox/dark-v9?optimize=true' viz.show() # ## Create a graduated cricle viz based on two data properties # + # Generate data breaks and color stops from colorBrewer measure_color = 'Avg Covered Charges' color_breaks = [round(df[measure_color].quantile(q=x*0.1), 2) for x in range(1,9)] color_stops = create_color_stops(color_breaks, colors='Spectral') # Generate radius breaks from data domain and circle-radius range measure_radius = 'Avg Medicare Payments' radius_breaks = [round(df[measure_radius].quantile(q=x*0.1), 2) for x in range(1,9)] radius_stops = create_radius_stops(radius_breaks, 0.5, 10) # Create the viz viz2 = GraduatedCircleViz('points1.geojson', access_token=token, color_property = "Avg Covered Charges", color_stops = color_stops, radius_property = "Avg Medicare Payments", radius_stops = radius_stops, center = (-95, 40), zoom = 3, opacity=0.75, below_layer = 'waterway-label') viz2.show() # - # ## Create a heatmap viz # + #Create a heatmap measure = 'Avg Medicare Payments' heatmap_color_stops = create_color_stops([0.01,0.25,0.5,0.75,1], colors='RdPu') heatmap_radius_stops = [[0,1], [15, 40]] #increase radius with zoom color_breaks = [round(df[measure].quantile(q=x*0.1), 2) for x in range(1,9)] color_stops = create_color_stops(color_breaks, colors='Spectral') heatmap_weight_stops = create_weight_stops(color_breaks) #Create a heatmap viz3 = HeatmapViz('points1.geojson', access_token=token, weight_property = "Avg Medicare Payments", weight_stops = heatmap_weight_stops, color_stops = heatmap_color_stops, radius_stops = heatmap_radius_stops, opacity = 0.9, center = (-95, 40), zoom = 3, below_layer='waterway-label' ) viz3.show() # - # ## Create a clustered circle map # + #Create a clustered circle map color_stops = create_color_stops([1,10,50,100], colors='BrBG') viz4 = ClusteredCircleViz('points1.geojson', access_token=token, color_stops = color_stops, radius_stops = [[1,5], [10, 10], [50, 15], [100, 20]], cluster_maxzoom = 10, cluster_radius = 30, opacity = 0.9, center = (-95, 40), zoom = 3 ) viz4.show() # - # # Save our viz to an HTML file for distribution # ### Note # Viz export contains a reference to the data in this visualization. Serve data from the same directory as the HTML file to vis your visualization. with open('viz4.html', 'w') as f: f.write(viz4.create_html()) # ### Run exported HTML example # # Python2: `python -m SimpleHTTPServer 8080` # # Python3: `python3 -m http.server 8080` # # Now navigate your browser to `http://localhost:8080/viz4.html` to see the viz
notebooks/point-viz-types-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Fashion classification model # + # Version 1 # # Original lesson: # https://www.tensorflow.org/tutorials/keras/classification # + # Imports import tensorflow as tf from tensorflow import keras print(f"tf_version = {tf.__version__}") import numpy as np import matplotlib.pyplot as plt # + # Prepare data fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() print(train_images.shape) class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # - # show sample plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.grid(False) plt.show() # normalize train_images = train_images / 255.0 test_images = test_images / 255.0 # + # Visual test for test_images plt.figure(figsize=(20, 20)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]], size=25) plt.grid(False) plt.show() # + model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(100, activation='relu'), keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # - # training & test model.fit(train_images, train_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=1) predictions = model.predict(test_images) def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks([]) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') i = 12 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) plt.show() num_rows = 20 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) plt.show() # + img = test_images[0] print(img.shape) img = (np.expand_dims(img,0)) print(img.shape) predictions_single = model.predict(img) print(predictions_single.argmax()) plot_value_array(0, predictions_single, test_labels) _ = plt.xticks(range(10), class_names, rotation=90)
fashion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <figure> # <IMG SRC="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/Fachhochschule_Südwestfalen_20xx_logo.svg/320px-Fachhochschule_Südwestfalen_20xx_logo.svg.png" WIDTH=250 ALIGN="right"> # </figure> # # # Skriptsprachen # ### Sommersemester 2021 # Prof. Dr. <NAME> # + [markdown] slideshow={"slide_type": "-"} # # Beispiele für Pakete aus der Standardbibliothek (und Drittanbieter-Pakete) # In diesem Arbeitsblatt geht es um weiterführende Themen rund um die Programmiersprache Python. Zu der enormen Popularität von Python hat nicht zuletzt die Vielzahl an frei verfügbaren Paketen zu verschiedensten Anwendungsgebieten beigetragen. Durch den Einsatz bestehender Module lassen sich Entwicklungszeiten drastisch verkürzen. Zusammen mit der relativ leichten Erlernbarkeit, hat dies Python den Ruf eingebracht, eine sehr produktive Programmiersprache zu sein. # # In den folgenden Abschnitten werden die Themen Web-Zugriffe, Server-seitige Programmierung, Wissenschaftliches Rechnen und GUI-Programmierung kurz angerissen. Die Behandlung der Themen ist weder vollständig noch repräsentativ. Es soll lediglich darum gehen, Einblicke in verschiedene Aspekte zu vermitteln und Sie motivieren, im Selbststudium etwas tiefer in die vorgestellten, oder auch weitere Themen rund um Python einzusteigen. # - # ## 1. Installieren von Drittanbieter-Paketen # # Um Pakete von Drittanbietern zu installieren, sollten Sie in jedem Fall einen Paketmanager benutzen. Der am meisten verwendete Paketmanager für Python ist [pip](https://pip.pypa.io/en/stable/). Mit diesem Tool können Sie auf die Pakete zugreifen, die im _Python Package Index_ ([PyPI](https://pypi.org)) enthalten sind. Ab Version 3.4 ist pip bereits in der Python Standard-Distribution enthalten. # Um ein Paket zu installieren, führen Sie folgendes Kommando (auf der Kommandozeile) aus: # ```python # pip3 install <paketname> # ``` # So aufgerufen, versucht _pip_ das Paket _systemweit_ im Python-Installationspfad zu installieren. Falls Sie nicht als Administrator auf Ihrem Computer angemeldet sind, kann dies zu Problemen führen. Daher wird allgemein empfohlen, dass Nutzer zusätzliche Pakete innerhalb von _Virtuellen Umgebungen_ ([virtualenv](https://packaging.python.org/guides/installing-using-pip-and-virtualenv/)) zu installieren. # Eine Alternative dazu ist, neue Pakete in einem Dateipfad zu installieren, auf den der Benutzer vollen Zugriff hat. Im Python-Interpreter sind verschiedene Suchpfade bereits voreingestellt, in denen zur Laufzeit eines Programms nach verwendeten Paketen (siehe `import`-Anweisung) gesucht wird. Die entsprechenden Dateipfade sind natürlich vom dem verwendeten Betriebssystem abhängig und können z.B. wie folgt mit Python selbst gelistet werden: import sys for path in sys.path: print(path) # Um ein Paket zu installieren, können Sie prinzipiell `!pip install` aufrufen. # Dies hat aber einen Nachteil: Ihr Jupyter Notebook könnte eine Python Installation verwenden, die nicht die standard Python Installation auf Ihrem System ist. Der `pip`-Aufruf würde das Paket dann in diese Installation integrieren und Sie könnten es aus dem Notebook heraus nicht verwenden. # # Daher ist es besser, die Python-Version zu verwenden, die Ihr Notebook benutzt. # Unter welchem Programmpfad der Python-Interpreter abgelegt ist, den das Notebook verwendet, kann man ebenfalls über das Modul `sys` herausfinden. `sys.executable` lifert den kompletten Pfad zum Python-Kommando. # Dies ruft man über das `!` Zeichen in der *Shell* auf und verwendet die Option `-m` mit dem Modul `pip`. # Mit `-m` kann man ein Modul wie ein Skript ausführen. # Über das Kommando `install` kann dann ein Modul, z.B. `python-chess` installiert werden. import sys # !{sys.executable} -m pip install --user python-chess # Mit _pip_ können Sie bequem Python Pakete installieren und verwalten. _pip_ stößt aber an seine Grenzen, wenn man Abhängigkeiten installieren möchte, die nicht als Python Paket vorliegen. Beispiele dafür sind laufzeitoptimierte Bibliotheken für mathematisch/wissenschaftliche Zwecke, die oftmals in maschinennahen Code vorliegen (z.B. in C entwickelt und für eine entsprechende _Hardware Architektur_ kompiliert). # # Hier schaffen Python Distributionen Abhilfe, die auch diese Abhängigkeiten enthalten und bei Bedarf mitinstallieren. # [Anaconda](https://www.anaconda.com/distribution/) ist ein Beispiel für eine solche Distribution. Der Paketmanager in Anaconda heißt _conda_ und lässt sich ähnlich wie _pip_ bedienen. # ## 2. Bibliotheken einsetzen # Dank Pythons großer Entwickler-Community existiert eine riesige Menge an nützlichen Modulen, nicht nur in der umfangreichen Standardbibliothek sinder vor allem in den öffentlichen Repositories wie dem *Python Package Index*. # # Wenn Sie ein Drittanbieter-Paket wie *python-chess* verwenden, ist die zugehöre Dokumentation in der Regel über das Web verfügbar. import webbrowser url = 'https://python-chess.readthedocs.io/en/latest/' webbrowser.open(url) # **Achtung:** Wenn an dieser Stelle False erscheint, ist das ein Zeichen dafür, dass Sie die Funktion servereseitig nicht ausführen können. Laden Sie sich dieses Notebook herunter, und verwenden Sie Ihre lokale Python Installation. # Wenn Sie ein passendes Paket gefunden haben, können Sie dies häufig mit recht wenigen Schritten für Ihre Anwendung verwenden. # # Im Folgenden Beispiel wollen wir ein Simples Schachprogramm schreiben. Es soll einen menschlichen und einen Computer-Spieler geben. # Das Modul *python-chess* liefert fast alles, was man dazu benötigt. import chess board = chess.Board() board move = chess.Move.from_uci('e2e4') board.push(move) board # Schwieriger ist es an dieser Stelle, die Ein- und Ausgaben für das Modul entsprechend einzustellen. # Die Züge sollen schließlich nicht über den Quelltext des Programms eingegeben werden. # Außerdem ist es unübersichtlich, wenn jede neue Spielsituation in einer neuen, zusätzlichen Ausgabe erscheinen. # # Die Eingabe des Zuges können wir über die Python Eingabe-Funktion `input()` erledigen, die vom Benutzer eine Tastatureingabe abfragt. text = input() print("Sie haben " + text + " eingegeben!") # Um die Ausagben in einem Notebook schöner darzustellen kann man einige Funktionen aus dem Modul `IPython.display` verwenden: # # - `IPython.display.SVG` Stellt ein svg-Bild dar # - `clear_output` löscht die Ausgabe einer Code-Zelle import webbrowser url = 'https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html' webbrowser.open(url) # Um Züge darzustellen, kann man dann folgendes Muster verwenden. # + import chess, time from IPython.display import SVG, clear_output sizilianisch = ['e2e4','c7c5','g1f3','d7d6','d2d4','c5d4'] board = chess.Board() display(SVG(board._repr_svg_())) for zug in sizilianisch: time.sleep(1) board.push(chess.Move.from_uci(zug)) clear_output(wait=True) display(SVG(board._repr_svg_())) # - # Um nun ein sipmples Schachprogramm zu schreiben, ist die Funktion `list(board.legal_moves)` hilfreich. # Damit kann man eine Liste der aktuell auf dem Speilbrett gültigemn Züge berechen lassen. # # Nutzen Sie die beschribenen (und weitere) Funktionen, um ein Schachprogramm zu schreiben. # Es ist Ihnen überlassen, wie *schlau* Ihr Computerprogramm spielt und welchen Funktionsumfang es hat. # + deletable=false nbgrader={"cell_type": "code", "checksum": "1ddbda05f33f5d65cc09bf77ff9ad24b", "grade": true, "grade_id": "cell-aa291117e58eef45", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} import chess import time import random from IPython.display import SVG, clear_output board = chess.Board() # YOUR CODE HERE raise NotImplementedError() # - # ## 3. Web-Zugriffe mit der *Requests* Bibliothek # **Hinweis: Das Lehrbuch behandelt ausschließlich die *urllib* und zwar auf den Seiten 643-651.** # # [Requests](http://docs.python-requests.org/de/latest/user/quickstart.html) ist eine frei verfügbare Python Bibliothek zum Durchführen von HTTP-Anfragen. Im Vergleich zu der in der Standardbibliothek enthaltenen _urllib_ führt der Einsatz der _requests_ Bibliothek meistens zu etwas kürzerem und übersichtlicherem Code. # # Vom Funktionsumfang sind beide Bibliotheken sehr ähnlich. Sie erlauben den (lesenden und schreibenden) Zugriff auf URL-Ressourcen über verschiedene Methoden. # # Mittels `r = requests.get(<URL>)` kann eine Internetseite gelesen werden. Auf den Inhalt der Seite kann dann z.B. mit `r.text` (Inhalt als Schriftzeichen) oder `r.content` (Inhalt als Rohdaten/Bytes) zugegriffen werden. # Im folgenden Beispiel wird der aktuelle Börsenkurs einiger US-Unternehmen vom [Yahoo! Finance Portal](finance.yahoo.com) ausgelesen. # Die Börsen-Kürzel von *Apple*, *Facebook*, *Google*, *Netflix* und *Microsoft* stehen im Dictionary `stocks`. # Mit der `get`-Methode laden wir die entsprechende Seite unter Angabe des Query -Parameters `s` herunter. # # Den Inhalt (ASCII-Text) der Seite erhalten wor über das Attribut `page.text`. # Darauf wir ein [Regulärer Ausdruck](https://docs.python.org/3/library/re.html) angewendet, der im html-Code nach dem ersten Auftreten eines HTML-Tags `<span class="Trsdu` sucht. # Wir überspringen die nächsten Zeichen, bis zum abschließenden `>` des Tags. # Die nächsten Zeichen (bis ausschließlich dem abschließenden `</span>` markieren wir mit dem Regulären Ausdruck `(.*?)` als Gruppe. # # Mit `re.search(pattern,page.text)` finden wir das erste Auftreten des Musters im HTML-Text. # Aus der Fundstelle wird dann die erste Gruppe mit `group(1)` extrahiert und ausgegeben. # + import requests import re stocks = {'Apple': 'aapl', 'Facebook':'fb', 'Google':'goog', 'Netflix':'nflx', 'Microsoft':'msft'} for comp in stocks: r = requests.get("https://stocktwits.com/symbol/{}".format(stocks[comp])) regex = '<span class="st_3zYaKAL">(.*?)</span>' pattern = re.compile(regex) match = re.search(pattern,r.text) print("Der Kurs von {} ist ${}".format(comp, match.group(1))) # - # Das gleiche kann man auch über die _urllib_ aus der Standardbibliothek erreichen, allerdings ist der Code etwas komplexer. Hier sind z.B. einige zusätzliche Aufrufe nötig um den Zugriff auf eine verschlüsselte _https_ Seite zu ermöglichen. # + import urllib import ssl import re stocks = {'Apple': 'aapl', 'Facebook':'fb', 'Google':'goog', 'Netflix':'nflx', 'Microsoft':'msft'} for comp in stocks: url = "https://stocktwits.com/symbol/{}".format(stocks[comp]) ssl_handler = urllib.request.HTTPSHandler(context=ssl.SSLContext()) opener = urllib.request.build_opener(ssl_handler) urllib.request.install_opener(opener) resp = opener.open(url) page = resp.read().decode('utf-8') regex = '<span class="st_3zYaKAL">(.*?)</span>' pattern = re.compile(regex) match = re.search(pattern,page) print("Der Kurs von {} ist ${}".format(comp, match.group(1))) # - # Das Parsen von Web-Inhalten allein mit regulären Ausdrücken ist recht umständlich. # Hierfür gibt es besserer Bibliotheken, die die Verarbeitung von HTML oder XML Texten deutlich vereinfachen. # Die am verbreitetsten Bibliothek für diese Zwecke ist Beautiful Soup 4 (oder kurz *bs4*). # + import requests from bs4 import BeautifulSoup as bs stocks = {'Apple': 'aapl', 'Facebook':'fb', 'Google':'goog', 'Netflix':'nflx', 'Microsoft':'msft'} for comp in stocks: r = requests.get("https://finance.yahoo.com/q?s={}".format(stocks[comp])) soup = bs(r.content, 'lxml') stock = soup.select_one('.Mb\(-4px\)').text print("Der Kurs von {} ist ${}".format(comp, stock)) # - # Man kann die _requests_ Bibliothek auch verwenden um Binärdaten, wie z.B. Bilder, aus dem Internet zu laden. Im folgenden Beispiel laden wir eine _png_ Datei von _wikimedia.org_ und speichern sie lokal ab. # + import requests url = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/87/Karl_Marx.png/478px-Karl_Marx.png" r = requests.get(url) if r.status_code == 200: try: f = open("image.png", 'wb') f.write(r.content) except: print("Irgendetwas ist schief gegangen!") from IPython.display import Image Image(filename = "image.png") # - # ### 4. Webserver-Entwicklung mit _Flask_ # **Hinweis: Das Lehrbuch verwendet _Django_ (Kapitel 40, Seiten 889-938) als Beispiel für die Web-Anwendungsentwicklung mit Python. Flask wird im Lehrbuch nicht behandelt.** # # Mit den Paketen _request_ oder _urllib_ können Sie Zugriffe auf Inhalte durchführen, die auf Web-Servern verfügbar sind. Sie können aber auch mit sehr einfachen Mitteln eigene Web-Anwendungen mit Python implementieren. Ein Paket dazu, dass sich in den letzten Jahren wachsender Beliebtheit erfreut, ist [_Flask_](http://flask.pocoo.org). # # Flask bezeichnet sich als Micro-Framework für Web-Anwendungen. Das Paket enthält einen elementaren Web-Server, der vor allem bei der Entwicklung der Web-Anwendungen benutzt wird. Im Produktiveinsatz können Flask-Anwendungen über nahezu beliebige Webserver bereitgestellt werden. # # Flask und seine Abhängigkeiten (z.B. die [_Template Engine_](https://de.wikipedia.org/wiki/Template-Engine) _Jinja2_ und die [_WSGI_](https://de.wikipedia.org/wiki/Web_Server_Gateway_Interface) Bibliothek _Werkzeug_) können bequem über den Paketmanager _pip_ installiert werden. (Hinweis: Die aktuelle flask Version scheint ein Problem mit Jupyter zu haben. [Hier](https://github.com/plotly/dash/issues/257) ist ein work-around beschrieben um das "Not writable"-Problem zu umgehen.) import sys # !{sys.executable} -m pip install --user python-chess flask # Mit dem Dekorator `route` kann eine Funktion an eine bestimmten URL-Pfad gebunden werden. Wird diese URL zugegriffen, so wird die entsprechende Funktion serverseitig ausgeführt. Der URL-Pfad kann auch Variablen enthalten, diese sind mit spitzen Klammern markiert (z.B. `<name>`). # + import webbrowser from flask import Flask app = Flask(__name__) @app.route('/') def hello(): return "Hello World!" @app.route('/<name>') def hello_name(name): return "Hallo %s!" % name webbrowser.open("http://127.0.0.1:5000/Heiner") # Der folgende Aufruf blockiert und muss über # Kernel->Interrupt gestoppt werden: app.run() # - # **Aufgabe (funktioniert nur mit lokaler Python Installation auf Ihrem PC/Laptop): Erweitern Sie die obige Flask-Applikation um eine Methode `umdrehen` die ein Wort "umgedreht" ausgibt. Die Methode soll über die URL `/umdrehen/<wort>` erreichbar sein.** # + deletable=false nbgrader={"cell_type": "code", "checksum": "163ba699bd0e3ac68281573b45ba33ff", "grade": true, "grade_id": "cell-bb64874728a4792a", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} from flask import Flask app = Flask(__name__) @app.route('/') def hello(): return "Hello World!" @app.route('/<name>') def hello_name(name): return "Hallo %s!" % name # YOUR CODE HERE raise NotImplementedError() app.run() # - # ### 5. Wissenschaftliches Rechnen mit Numpy, Scipy und Matplotlib # **Hinweis: Dieses Thema wird im Lehrbuch auf den Seiten 939-959 behandelt.** # # Python hat sich in den letzten Jahren als Standard-Programmiersprache in Bereichen des Wissenschaftlichen Rechnens und der Datenanalysen etabliert. Dies ist auch schon anhand der Vielzahl von Buchveröffentlichungen zu dem Thema zu erkennen. # Auf den ersten Blick erscheint der Einsatz von Python in diesem Bereich etwas unerwartet, denn ingenieursmäßige oder naturwissenschaftliche Anwendungen erfordern oft eine hohe Rechenleistung. Python, als interpretierte Programmiersprache ist in Punkto Performanz kompilierten Sprachen (wie etwa C/C++) normalerweise unterlegen. # Mehrere Aspekte sprechen allerdings für den Einsatz von Skriptsprachen im wissenschaftlichen Rechnen: # 1. Skriptsprachen erlauben häufig eine deutlich kompaktere und übersichtliche Programmstruktur. Bei Aufgaben, in denen es vor allem um eine korrekte und nachvollziehbare Implementierung eines algorithmischen Verfahrens geht, ist dies besonders wichtig. # 2. Der Umfang an (frei verfügbaren) Bibliotheken und Paketen für Python ist enorm, was Entwicklern die Arbeit ungemein erleichtert. Außerdem ist der Einsatz von Drittanbieter-Software sehr einfach. Pakete sind direkt auf allen Plattformen lauffähig und müssen nicht, wie in kompilierten Programmiersprachen, zunächst in Maschinencode übersetzt werden. # 3. Die laufzeitkritischen Elemente vieler Algorithmen lassen sich auf wenige *Standardroutinen* reduzieren. Für diese Routinen gibt es oft hoch-effiziente Implementationen, die sogar auf die speziellen Eigenschaften der vorliegen CPU optimiert werden. Sind solche Bibliotheken auf dem Computer verfügbar, so können sie von Python aus benutzt werden. Die rechenintensiven Teile eines Programms werden dann nicht mehr im Python Interpreter ausgeführt, sondern durch eine externe Bibliothek. Somit können die Performanz-Nachteile, die Python als interpretierte Sprache mitbringt, weitestgehend bereinigt werden. # # In der Vielzahl der verfügbaren Pakete für numerische Berechnungen mit Python gibt es einige Bibliotheken, die als quasi-Standard die Basis für viele Anwendungen und andere Pakete bilden: # # **NumPy** ist die elementare Python-Bibliothek für wissenschaftliches Rechnen. NumPy definiert Objekte für mehrdimensionale Arrays und Matrizen sowie mathematische Grundoperationen auf diesen Objekten. NumPy's "Datentypen" sind zwar eingeschränkter als die bekannten sequentiellen Typen in Python (*list*, *tuple*, etc.), dafür sind die Daten aber kompakter im Hauptspeicher abgelegt, so dass Operationen auf mehrdimensionalen Arrays effizienter durchgeführt werden können. Für Vektor- und Matrix-Operationen besitzt NumPy effiziente Implementierungen und benutzt, sofern auf dem Computer installiert, optimierte Bibliotheken für *Lineare Algebra* ([BLAS](https://de.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) und [LAPACK](https://de.wikipedia.org/wiki/LAPACK)) # # **SciPy** ist eine Bibliothek von Mathematischen Algorithmen die größtenteils auf NumPy aufbauen. SciPy ist sehr umfangreich und enthält unter anderem Module zur numerischen Berechnung von Integralen, zum Lösen von Differentialgleichungen, zur Berechnung von Optimierungsproblemen, zur digitalen Signalverarbeitung und zur Datenvisualisierung. # # **Matplotlib** ist die Standard-Bibliothek zum Erstellen von (mathematischen) Diagrammen. Sie Syntax von matplotlib orientiert sich an den Diagramm-Funktionen von [Matlab](https://de.mathworks.com) was Entwicklern den Umstieg von dem kommerziellen Tool auf Python deutlich erleichtert. # ### NumPy # Im wissenschaftlichen Rechnen und in den datengetriebenen Wissenschaften sind Berechnungen mit Vektoren und Matrizen allgegenwärtig. # In NumPy werden diese mathematischen Datenstrukturen als n-dimensionale Arrays mit dem Datentyp `ndarray` abgebildet. Wenn Sie die NumPy-Bibliothek mittels `import numpy as np` eingebunden haben, können Sie ein NumPy Array mit der Funktion `np.array()` anlegen: import numpy as np x = np.array([1,2,3]) print(x, type(x)) # Es gibt auch den Datentyp `matrix` in NumPy. Dieser Typ ist von `ndarray` abgeleiteten. # Matrizen haben immer 2-dimensionale Struktur und Operatoren funktionieren etwas anders als bei "normalen" NumPy Arrays. # Um Missverständnisse zu vermeiden, werden wir im folgenden vornehmlich den Typ `ndarray` benutzen. # Ein `ndarray` kann aus Folgen von Zahlen gebildet werden. Dies sind üblicherweise Tupel oder Listen. Die Dokumentation zur Funktion `array` sagt, dass ein *Array-artiger* Parameter übergeben werden soll. Es ist also so, dass alle Objekte, *die NumPy zu einem Array konvertieren kann*, an dieser Stelle Funktionieren: a = np.array([1, 7, 1, 2]) b = np.array((1, 7, 1, 2)) print("a: %s" % a) print("b: %s" % b) # Auf einzelne Elemente von eindimensionalen Arrays greift man über einen "einfachen" Index in `[]`-Klammern zu. # Bei mehrdimensionalen Arrays werden die Zugriffe etwas komplizierter. b[2] # NumPy liefert auch einige Funktionen, um spezielle Arrays zu erzeugen. Über `arange` können z.B. Arrays über Zahlenfolgen gebildet werden: a = np.arange(8) a # Die Länge eines Arrays erhält man über das Attribut `size`: # a.size # Die Dimension wiederum, kann man mit dem Attribut `ndim` abfragen. Eindimensionalen Arrays haben die Dimension 1. Wir werden diese Arrays von nun an auch **Vektoren** nennen. Für zweidimensionale Arrays verwenden wir auch den Begriff **Matrix**. a.ndim # Als eine Art Kombination der Attribute `size` und `ndim` kann man `shape` verstehen. # Dieses Attribut liefert ein Tupel mit `ndim`-Elementen zurück, wobei das $i$-te Element die Größe der $i$-ten Dimension angibt. (Vielleicht fragen Sie sich, warum in dem Tupel `(8,)` das einzelne Komma steht? Das ist dazu da, die Schriftweise eindeutig zu halten. Ansonsten könnte man die Ausgabe mit einem `int` in Klammern verwechseln.) a.shape # Die Indizierung von NumPy Arrays beginnt immer bei der $0$. # Neben der Adressierung von konkreten Indizes gibt es noch weitere Zugriffsregeln: print(a[0]) # Das erste Element print(a[-1]) # Das letzte Element print(a[2:7]) # Die Elemente von Index 2 bis 7 (ausschließlich) print(a[2:7:2]) # Wie oben, nur mit einer Schrittweite von 2 print(a[::3]) # Alle Elemente mit einer Schrittweite von 3 # ### Mehrdimensionale Arrays # # Wie schon angesprochen, ist `ndarray` ein mehrdimensionaler Datentyp. Sie können also ohne Weiteres NumPy Arrays aus verschachtelten Listen oder Array erzeugen: a = np.arange(6) b = np.arange(6,12) c = np.arange(12,18) d = np.arange(18,24) A = np.array((a,b,c,d)) A # Dabei müssen aber immer alle niedrigeren Dimensionen voll besetzt sein, damit `np.array` ein "echtes" Array generieren kann: A = np.array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]]) A # Passen die Größen der einzelnen Vektoren oder Matrizen nicht zusammen, so liefert die Funktion ein vermutlich ungewolltes Resultat. Im folgenden Beispiel, hat die 3. Zeile der Matrix nur 2 Elemente, und nicht 6 wie alle anderen. `np.array` legt daher ein eindimensionales Array mit Listen als Elemente an: B = np.array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13], [18, 19, 20, 21, 22, 23]]) B # Einzelne Elemente eines mehrdimensionalen `ndarrays` adressieren Sie mit einer Folge von Index-Klammern. `A[3][1]` z.B. liefert das zweite Element der vierten Zeile der Matrix. A[3][1] # Etwas komplizierter wird es, wenn wir nicht nur auf einzelne Werte, sondern ganze Bereiche einer Matrix zugreifen wollen. # Mit `[x:y]` greift man auf die Zeilen $X$ bis einschließlich $y-1$ zu. Der $x$-Wert kann auch weg gelassen werden, `[:2]` liefert z.B. die ersten 2 Zeilen der Matrix print(A[:3]) print(A[1:3]) # Auf einzelne Spalten der Matrix greift man über den Komma-Operator: print(A[:,3]) # Das ist in etwa so zu verstehen, dass das Komma die einzelnen Dimensionen voneinander abgrenzt. # Man nimmt also von der ersten Dimension alle Elemente (angegeben durch das Fehlen vonj Grenzen bei dem `:`-Operator) und von der zweiten Dimension nur die "dritten". # Das folgende Beispiel liefert von den Elementen der dritten Zeile die Elemente im Bereich der zweiten bis dritten Spalte. print(A[2,1:3]) # ### Arrays Anlegen # Wir haben bereits gesehen, wie man NumPy Arrays mit den Funktionen `array` und `arange` anlegen kann. # Es gibt aber noch weitere Methoden, mit denen Arrays angelegt werden können. # So kann man z.B. Arrays generieren, die nur aus Nullen oder Einsen bestehen np.zeros(9) np.ones((4,4)) # Die Methode `linspace(Start, Ende, Anzahl-Werte)` ist eine Erweiterung von `arange` mit der fortlaufende Folgen von Zahlen generiert werden können. Die Funktion liefert `Anzahl-Werte` Zahlen im Bereich `[Start,Ende]`. x = np.linspace(-1,1,20) x # Die Werte steigen bei `linspace` linear an. Falls Sie eine logarithmische Skalierung benötigen, können Sie die Funktion `logspace` verwenden. Dabei ist darauf zu achten, dass `Start` und `Ende` als Exponenten angenommen werden. `np.logspace(0,2,20)` etwa, generiert 20 Werte im Bereich 1 (10 hoch 0) bis 100 (10 hoch 2). # + start = 0 # 10^0 = 1 ende = 2 # 10^2 = 100 n = 20 np.logspace(0,2,20) # - # Wir haben gesehen, wie wir eindimensionale Arrays generieren können. # Oftmals benötigt man aber mehrdimensionale Arrays. # NumPy stellt einige Methoden bereit, um die Struktur von Arrays zu verändern. # Die Daten selbst, bleiben von diesen Operationen unverändert. # # Die wichtigsten Funktionen zum Umstrukturieren von Matrizen sind `reshape` und `flatten`. a = np.arange(20) b = a.reshape((4,5)) print("b als 4x5 Matrix:\n", b) b = b.reshape((5,4)) print("\nb als 5x4 Matrix:\n", b) # Eine Wichtige Operation in der Linearen Algebra ist das Transponieren von Matrizen. Dabei werden die Spalten und Zeilen der Matrix vertauscht. Die Werte in der Matrix bleiben gleich, werden aber in einer umgedrehten Rehenfolge durchlaufen. # In NumPy greift man auf die Transponierte Form eines Arrays über das Attribut `T` zu. b.T # Das Umstrukturieren und Transponieren funktioniert auch bei Arrays mit einer Dimension >2 a = np.arange(24).reshape((2,3,4)) a a = a.T a # Mit der Methode `flatten` kann man mehrdimensionale Arrys linearisieren. a.flatten() # ### Zufallszahlen # Zufallszahlen und die Erzeugung von bestimmten Wahrscheinlichkeitsverteilungen ist an vielen Stellen der Mathematik wichtig. # Das *Modul* `np.random` liefert Methoden um Zufallswerte und -verteilungen zu generieren. # # Wie es Ihnen vielleicht aus Sprachen wie C oder Java geläufig ist, köönen Sie auch in Python vor Benutzung des Zufallszahlengenerators mit einem Ausgangswert, dem sogenannten *seed*, initialisieren. Der Zufallszahlengenerator selbst ist *deterministisch*, d.h., er erzeugt zu einem seed immer die gleiche Folge von Zufallszahlen. np.random.seed(seed=1) np.random.random(4) np.random.random(5) np.random.seed(seed=1) np.random.random(5) # `random` liefert gleichverteilte Werte im Bereich `[0,1[`. # Wenn Sie normalverteilte (also nach der Gaußschen Normalverteilung verteilte) Werte benötigen, können Sie die Funktion `np.random.normal(loc, scale, size)` verwenden. Der Parameter `loc` bezeichnet den Erwartungswert und `scale` die Standardabweichung. Mit `size` können Sie die Anzahl der zu generierenden Werte angeben. np.random.normal(0.0, 4.0, 10) # Über ihre Namen, können Sie in Python auch nur einzelne Parameter angeben. Z.B. funktioniert auch der folgende Aufruf, in dem wir nur die Anzahl der Zahlen in der Funktion `normal` angeben. Für die Standardabweichung und die Varianz werden dann Default-Werte angenommen (0 bzw. 1). np.random.normal(size=20) # NumPy bietet auch einige elementare statistische Funktionen, z.B. für den Mittelwert (`mean`) oder die Standardabweichung (`std`). a = np.random.normal(3,7,10000) print("Erwartungswert: ", a.mean()) print("Standardabweichung: ", a.std()) # ### Operationen # # Wir haben nun sehr ausführlich betrachtet, wie man Arrays anlegt und mit Werten füllen kann. # Was wir bisher ausgelassen haben ist, wie man Operationen mit und auf NumPy Arrays durchführt. # Dies wollen wir nun nachholen. # # Wenn man mit Vektoren und Matrizen rechnet, unterscheidet man Skalar- und Matrix-Operationen. # Eine Skalar-Addition mit einem Vektor führt z.B. zu folgendem Resultat: np.arange(8) + 10 # Addieren wir 2 Vektoren, so werden alle Werte an ihrer jeweiligen Stelle miteinander addiert. np.arange(8) + np.arange(8) # Gleiches gilt für die Multiplikation np.arange(10) * 5 np.arange(8) * np.arange(8) # Arrays kann man auch mit Skalaren und Arrays vergleichen np.arange(8) > 2 np.arange(8) == (np.arange(8) *2) # Das Skalarprodukt (auch inneres Produkt genannt) ist eine eigene Form der Multiplikation zweier Vektoren. Dabei wird die Summe der Produkte aller Komponenten der beiden Vektoren. a = np.arange(5) print("a: ", a) b = np.arange(5)*2 print("b: ", b) c=a*b print("c = a*b: ", c) d=a.dot(b) print("d = a.b: ", d) # Die Summe aller Elemente eines Arrays bilden Sie mit der Funktion `sum`. np.arange(8).sum() # Darüberhinaus gibt es noch Operationen für Matrizen A = np.arange(20).reshape((4,5)) B = np.arange(20).reshape((4,5)) print("A+B:\n", A+B) print("A∘B:\n", A*B) # Beachten Sie, dass die Multiplikation mit dem `*`-Operator die elementweise Multiplikation ist. Diese Operation wird auch Hadamard-Produkt oder Schur-Produkt genannt. Bei der elementweisen Multiplikation müssen beide Matrizen dieselbe Struktur besitzen. # # Unter einer Matrixmultiplikation versteht man eine andere Operation. Zwei Matrizen $A$ und $B$ werden miteinander multipliziert, indem man sämtliche Skalarprodukte der Zeilenvektoren von $A$ mit den Spaltenvektoren von $B$ bildet. # Die Spaltenzahl von $A$ muss daher mit der Zeilenzahl von $B$ übereinstimmen. A = np.arange(20).reshape((4,5)) B = np.arange(20).reshape((5,4)) print("A⋅B:\n", A@B) # ### Warum ist NumPy effizient # Im folgenden wollen wir kurz analysieren, warum NumPy-Datentypen für Operationen auf großen Datensätzen besser geeignet sind, als die eingebauten Typen von Python. # Wir Vergleichen hier 2 Vektoren $X$ und $Y$: $X$ wird dabei als NumPy Array erzeugt, $Y$ ist ein reguläres Tupel-Objekt. Die Daten/Werte in $X$ und $Y$ sind aber gleich. import math N = 1000000 # X ist ein NumPy Array X = np.linspace(0,N-1,num=N)/N # Y Ist ein Tupel Y = tuple(y/N for y in range(0,N)) print(sum(X-Y)) # X und Y sind 'gleich' # Dass die unterschiedlichen Datentypen (im Beisiel, Tupel und NumPy Array) sehr unterschiedliche Speicherbedarfe haben, ist nicht ganz leicht nachzuprüfen. Zwar besitzt das Modul `sys` die Funktion `getsizeof`, welche auf beliebeige Objekte angewendet werden kann. Wenn man aber `getsizeof` auf ein Objekt eines Sequentiellen Datentyps anwendet, so werden nur die enthaltenen Objektreferenzen in die Berechnung der _Größe_ miteinbezogen; nicht die referenzierte Objekte selbst. Die folgende Funktion `deep_getsizeof` analysiert die Größe eines Objekts und exploriert dabei alle enthaltenen Objekte in rekursiever Weise. Damit erhält man den "echten" Speicherbedarf eines Objektes. from sys import getsizeof from collections.abc import Mapping, Container def deep_getsizeof(o, ids=None): if not ids: ids = set() d = deep_getsizeof if id(o) in ids: return 0 r = getsizeof(o) ids.add(id(o)) if isinstance(o, str) or isinstance(0, str): return r if isinstance(o, Mapping): return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems()) if isinstance(o, Container): return r + sum(d(x, ids) for x in o) return r sX = deep_getsizeof(X) sY = deep_getsizeof(Y) print("NumPy Array X ist %d kByte groß." % (sX/1024)) print("Tupel Y ist %d kByte groß." % (sY/1024)) # Wenn Sie wissen möchten, welche mathematischen Bibliotheken NumPy intern verwendet, können Sie sich die entsprechenden Systempfade mit `np.__config__.show()` ausgeben lassen. np.__config__.show() # ### Matplotlib # Mit der Matplotlib Bibliothek können in Python mit recht einfachen Mitteln gutaussehende Grafiken erstellt werden. Der Funktionsumfang der Bibliothek ist sehr groß, daher werden wir Sie hier nur anhand einiger Beispiele vorstellen. Für die Darstellung spezieller Graphen gibt es viele Beispiele in der [Matplotlib Galerie](https://matplotlib.org/gallery/index.html). # # Denken Sie daran, zuerst die Bibliotheksfunktionen einzubindnen. import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline # #%matplotlib notebook # Die obigen `import` Anweisungen sind _boilerplate code_, also ein Textbaustein, den Sie immer in gleicher Form verwenden, wenn Sie mit _numpy_ und _matplotlib_ arbeiten. Auch die Abkürzungen der Modulnamen haben sich in dieser Form etabliert. # # `%matplotlib` hingegen ist eine _magic function_ in ipython. Mit diesen Funktionen lassen sich generelle Einstellungen für die interaktive shell vornehmen. Mit dem Parameter `inline` stellt man ein, das die Grafiken im interaktiven Modus direkt unter dem Code dargestellt werden. Die Option `notebook` ist eine erweiterte Variante mit interaktiven Elementen für Python Notebooks. # Die folgende Code-Zelle zeigt ein einfaches Beispiel, in dem eine Sinus- und eine Cosinus-Funktion mittels NumPy erzeugt und die Graphen der Funktionen mit dem Modul _pyplot_ aus dem Paket matplotlib dargestellt werden. x = np.linspace(0,2*np.pi) fig = plt.figure() plt.plot(x,np.sin(x),label="Sinus") plt.plot(x,np.cos(x),label="Cosinus") l_sine, l_cos = plt.gca().lines l_cos.set_linewidth(10) plt.legend(loc='lower left') plt.show() # Numpy stellt den Dekorator `numpy.vectorize` zur Vektorisierung von Funktionen zur Verfügung. Wird dieser Dekorator auf eine Funktion angewendet, so wird die Funktion zur Laufzeit auf alle Elemente der als Argumente übergebenen NumPy Arrays angewendet. Dieser Dekorator dient nicht unbedingt der Effizienz (intern ist der Dekorator als einfache Schleife über alle Elemente implementiert) erlaubt es aber, Funktionen mit skalaren Parametern auch auf Vektoren anzuwenden. # + @np.vectorize def vect_exp(x,y): return np.exp(x) * np.sin(y) print("Mit List Comprehension:") # %time A = tuple(math.exp(a)*math.sin(a) for a in Y) print("\nMit der map Funktion:") # %time B = tuple(map(lambda a,b: math.exp(a)*math.sin(b), Y, Y)) print("\nMit numpy Funktionen:") # %time C = np.exp(X)*np.sin(X) print("\nMit einer vektorisierten Funktion:") # %time D = vect_exp(X,X) print("\nTesten, ob die Arrays gleich sind:") if sum(B-C)==0.0 and sum(B-D)==0.0: print("OK") else: print("Der Fehler ist %e" % max(sum(B-C),sum(B-D))) # - # Matplotlib kann nicht nur Funktionsgraphen zeichnen, sondern bietet eine Fülle von verschiedenen Diagrammtypen. Eine gute Übersicht finden Sie [hier](https://matplotlib.org/gallery.html). Im folgenden Beispiel benutzen wir ein Histogramm um die Verteilung einer Zufallsvariablen darzustellen. Mit dem NumPy Modul _random_ generieren wir uns einen Vektor mit 20000 Einträgen auf Basis der Normal-Verteilung (auch Gauß-Verteilung genannt). Ein Histogramm ist ein Säulendiagramm, das darstellt, wie viele Elemente in einen bestimmten Wertebereich fallen. Der Parameter `bins` gibt an, in wie viele Bereiche die Darstellung aufgeteilt werden soll. Im Beispiel wollen wir also ein Sälendiagramm mit 200 Säulen zeichnen. Man erkennt im Diagramm die typische _Glockenkurve_ mit dem Erwartungswert (hier: 0) in der "Mitte". fig = plt.figure() N = 20000 W = np.random.standard_normal(size=N) plt.hist(W,bins=(N//100)) plt.show() # Zufallszahlen sind in vielen Bereichen des wissenschaftlichen Rechnens und der angewandten Mathematik (z.B. in der Finanzmathematik) wichtig. Häufig geht es darum, komplexe Prozesse zu simulieren, deren Ausgang von Wahrscheinlichkeiten abhängt. # Im nächsten Beispiel, generieren wir wieder Folgen von (normalverteilten) Zufallszahlen. Auf dieser Folge berechnen wir dann mit `numpy.cumsum` die kumulierte Summe (auch [Präfixsumme](https://de.wikipedia.org/wiki/Präfixsumme) genannt). Das bedeutet, wir berechnen für jede Position in der Folge die Summe aller Folgenglieder bis zu dieser Position. Dazu addieren wir noch einen Startwert. Da der Erwartungswert der Normalverteilung Null ist und die einzelnen Elemente der Folge unabhängig sind, ist auch der Erwartungswert der Summe gleich Null. # Wir sehen aber im Beispiel, dass sich einige der Zufallsprozesse extremer in positive oder negative Richtung entwickeln. fig = plt.figure() N = 100 Startwert=10 Runden=100 Mittelwert=0 for i in range(0,Runden): X = np.random.standard_normal(size=N) X = np.cumsum(X)+Startwert plt.plot(X) Mittelwert += np.average(X) Mittelwert /= Runden plt.show() Mittelwert # Wenn Sie diese Zufallsprozesse mathematisch etwas erweitern kommen Sie zu Modellen, die heutzutage von Banken und Finanzdienstleistern eingesetzt werden, um Optionspapiere zu bewerten. # # Auch wenn an dieser Stelle die Details des Beispiels nicht weiter behandelt werden, sehen Sie, dass der Code sehr übersichtlich ist. Das ist sowohl bei der Entwicklung, als auch beim Verstehen von Algorithmen sehr vorteilhaft. Python, mit den Erweiterungen NumPy, SciPy und Matplotlib, hat sich für Ingenieure und Wissenschaftler zu einer echten Alternative zu kommerziellen Tools wie etwa Matlab entwickelt. # fig = plt.figure() Laufzeit = 250 Drift = 0.0005 Volatilitaet = 0.01 Startpreis = 20 t = np.linspace(0, Laufzeit-1, Laufzeit) Endpreis = 0 Simulationen=200 for i in range(0,Simulationen): # Standard-Wiener-Prozess simuliert durch einen Gaußschen Random Walk W = np.random.standard_normal(size = Laufzeit) W = np.cumsum(W) # # Geometrische Brownsche Bewegung mit Drift X = (Drift-0.5*Volatilitaet**2)*t + Volatilitaet*W S = Startpreis*np.exp(X) plt.plot(t, S) Endpreis += S[-1] plt.plot(t, [Startpreis]*Laufzeit, lw=3, color='black') plt.show() print("Erwarteter Preis: %f" % (Endpreis/Simulationen)) # Das Paket **SciPy** liefert eine Reihe weiterer mathematischer Funktionen, die über den Umfang von NumPy hinaus gehen. # Ein relativ einfaches Beispiel ist das Ableiten von Funktionen mit der Methode `derivative` aus dem Module `scipy.misc`. Im Beispiel erzeugen wir eine Kubische Funktion $f(x)=x^3+x^2$ und stellen sie dann, zusammen mit ihrer ersten und zweiten Ableitung' mit der _matplotlib_ dar. import sys # !{sys.executable} -m pip install --user Scipy # + from scipy.misc import derivative def f(x): return x**3 + x**2 fig = plt.figure() X = np.linspace(-3,3) plt.plot(X,f(X),label="f",lw=3) plt.plot(X,derivative(f,X),label="f'") plt.plot(X,derivative(f,X,n=2),label="f''") plt.legend(loc='best',fontsize='large') plt.show() # - # ### Interaktion # Das die Plots direkt im Jupyter Notebook erscheinen ist sehr praktisch. # So können Sie Ihre Daten analysieren und direkt in nächsten Code-Zelle weiter bearbeiten. # # Jupyter bietet aber noch mehr Möglichkeiten, um auf Ausgaben einzuwirken. # Für IPython kibt es Zusatzmodule, die interaktive Widgets im Browser bereitstellen. # Mit diesen Widgets kann man den Code-Zellen Bedienelemente hinzufügen, mit denen der Code interaktiv gesteuert werden kann. # # Ein recht einfaches Bedienelement ist ein Schieberegler, mit sich ein skalarer Parameter einstellen lässt. # Ein solcher Slider lässt sich mit der Methode `interact` aus dem Modul `ipywidgets.widgets` leicht umsetzen. # `interact` ist dabei recht flexibel. # Falls der Parameter kein Skalar, sonder ein Boolean ist, wird eine Chackbox dargestellt. # Bei einem String entsprechend ein Eingabefeld. # # Die Methode verlangt als erstes Argument eine Funktionsreferenz, danach folgen die einzustellenden Parameter der Funktion. import webbrowser url = 'https://jupyter-tutorial.readthedocs.io/de/latest/workspace/jupyter/ipywidgets/examples.html' webbrowser.open(url) # + from ipywidgets.widgets import interact, interactive, fixed from ipywidgets import widgets def f(x): print(x) interact(f, x=10) # - # `interact` kann übrigens auch als Dekorator verwendet werden: @interact def f(x=10): print(x) # In der folgenden Code-Zelle greifen wir das Beispiel mit der Ableitung von oben nochmal auf. # Statt einer festen Funktion $f(x)=x^3+x^2$ nehmen wir hier eine allgemeine Polynomfunktion $f(x)=ax^3+bx^2+cx+d$ an. # Die Parameter $a$ bis $d$ werden über einzelne Regler interaktiv bedienbar gemacht. # + # %matplotlib notebook from scipy.misc import derivative def g(a,b,c,d): def foo(x): return a*x**3 + b*x**2 + c*x +d return foo def plotte_funktionen(a,b,c,d): fig = plt.figure() X = np.linspace(-3,3) f = g(a,b,c,d) plt.plot(X,f(X),label="f",lw=5) plt.plot(X,derivative(f,X),label="f'") plt.plot(X,derivative(f,X,n=2),label="f''") plt.legend(loc='best',fontsize='large') plt.draw() interact(plotte_funktionen, a=1.0, b=1.0, c=1.0, d=1.0) # - # In einem abschließenden Beispiel geht es nochmal um Matrix-Berechnungen mit NumPy. # Wir wollen Bilder bearbeiten und mit matplotlib anzeigen. Hierzu laden wir zuerst das Graustufen Bild aus dem Beispiel in [Abschnit 3](#3.-Web-Zugriffe-mit-der-Requests-Bibliothek) als Instanz `img`. Im zweiten Schritt formen wir das Graustufenbild in ein RGB-Format um, indem wir aus dem Grauwert eines Pixels ein Array mit 3 identischen Werten generieren (hierzu benutzen wir die `stack`Funktion). # + import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np # %matplotlib inline img=mpimg.imread('image.png') #Mache aus dem Graustufenbild ein RGB Bild #Dazu muss der "Grau-Kanal" verdreifacht werden #(in Form eines 3-Tupels): rgb_img = np.stack((img,)*3, axis=-1) imgplot = plt.imshow(rgb_img) # - # Nun können wir z.B. die Farbgebung der Bilder verändern, indem wir einzelnen Farbkanäle im additiven RGB (Rot-Grün-Blau) Farbraum, auf Null setzen: # # <!-- ![](https://upload.wikimedia.org/wikipedia/commons/2/28/RGB_illumination.jpg) --> # + rg_img = np.copy(rgb_img) #Blau-Kanal auf Null -> Gelb rg_img[:,:,2] = 0 rb_img = np.copy(rgb_img) #Grün-Kanal auf Null -> Violett rb_img[:,:,1] = 0 gb_img = np.copy(rgb_img) #Rot-Kanal auf Null -> Türkis gb_img[:,:,0] = 0 plt.figure() plt.subplot(131) plt.imshow(rg_img) plt.gca().axes.get_xaxis().set_visible(False) plt.gca().axes.get_yaxis().set_visible(False) plt.subplot(132) plt.imshow(rb_img) plt.gca().axes.get_xaxis().set_visible(False) plt.gca().axes.get_yaxis().set_visible(False) plt.subplot(133) plt.imshow(gb_img) plt.gca().axes.get_xaxis().set_visible(False) plt.gca().axes.get_yaxis().set_visible(False) # - # **Aufgabe: Erzeugen Sie ein neues Bild, indem Sie Bereiche (horizontale "Streifen") aus den Arrays `rg_img`, `rb_img` und `gb_img` selektieren und zu einem neuen Bild zusammenfügen.** # + deletable=false nbgrader={"cell_type": "code", "checksum": "ed0fbc235ba5c0b397eec8f5541abb55", "grade": true, "grade_id": "cell-7a2f19c119a3089c", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} # YOUR CODE HERE raise NotImplementedError() # - # ### 6. Grafische Benutzeroberflächen mit Tkinter # **Tkinter wird auf den Seiten 791-839 im Lehrbuch behandelt.** # # Für Python existieren mehrere Toolkits zur Programmierung grafischer Oberflächen. Im Lehrbuch finden Sie auf den Seiten 791-793 eine gute Übersicht dazu. # In der Standardbibliothek enthalten ist das Paket Tkinter, das eine objektorientierte Schnittstelle für Tk-Anwendungen bereitstellt. Tk ist ein GUI-Toolkit das ursprünglich für die Skriptsprache _Tcl_ entworfen wurde. # # Die Programmierung graphischer Benutzerschnittstellen ist ein komplexes Thema, das weit über den Umfang dieser Veranstaltung hinausgeht. Daher wird an dieser Stelle nur ein Minimalbeispiel kommentarlos vorgestellt. import tkinter class CtoF(tkinter.Frame): def __init__(self, master=None): super().__init__(master) self.pack() self.createWidgets() def createWidgets(self): self.celEntry = tkinter.Entry(self) self.celEntry.pack() self.cel = tkinter.DoubleVar() self.cel.set("Temperatur in Celsius") self.celEntry["textvariable"] = self.cel self.ok = tkinter.Button(self) self.ok["text"] = "Schließen" self.ok["command"] = self.master.destroy self.ok.pack(side="right") self.conv = tkinter.Button(self) self.conv["text"] = "Umrechnen" self.conv["command"] = self.convert self.conv.pack(side="right") def convert(self): self.cel.set(self.cel.get()*1.8+32) root = tkinter.Tk() app = CtoF(root) app.mainloop() # **Aufgabe (funktioniert nur mit lokaler Python Installation auf Ihrem PC/Laptop): Schreiben Sie ein GUI-Anwendung mit Tkinter, die in einem Textfeld einen String erwartet. Fügen Sie einen Button hinzu der bewirkt, dass der String im Text umgedreht erscheint.** # + deletable=false nbgrader={"cell_type": "code", "checksum": "7e97be6c7b6e5d3f962d0f01761512c3", "grade": true, "grade_id": "cell-fcd878b50d199a28", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} import tkinter class MyApp(tkinter.Frame): # YOUR CODE HERE raise NotImplementedError() # - root = tkinter.Tk() app = MyApp(root) app.mainloop()
p7/ex17_Standardbibliothek.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Image Pyramiding import cv2 from matplotlib import pyplot as plot import numpy as nm img =cv2.imread("apple.jpg")[:,:,::-1] plot.imshow(img) plot.title("Original Image") # #### Dimension Of Orignal Image print(img.shape) # ### Increasing Size larger_img=cv2.pyrUp(img) plot.imshow(Small) # #### Dimension after using pyrUp print(larger_img.shape) # ### Decreasing Size smaller_img=cv2.pyrDown(img) plot.imshow(smaller_img) # #### Dimension after using pyrDown print(smaller_img.shape)
ImagePyramids/image_pyramiding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytorch16 # language: python # name: pytorch16 # --- # + import argparse import easydict from utils.config import * from utils.count_model import * from agents import * from torchsummaryX import summary # + import random RandomListOfIntegers = [random.randint(0, 99999) for iter in range(5)] print(RandomListOfIntegers) # - # # 40 % drop # + total_best = [] total_history = [] for i, seed in enumerate(RandomListOfIntegers): config = easydict.EasyDict() config.exp_name = "vgg16_exp_cifar100_0" config.log_dir = os.path.join("experiments", config.exp_name, "logs/") config.load_file = os.path.join("experiments", "vgg16_exp_cifar100_0","checkpoints", "checkpoint.pth") config.cuda = True config.gpu_device = 0 config.seed = seed config.milestones = [10, 20,30,40] config.gamma = 0.9 config.img_size = 32 config.num_classes = 100 config.data_mode = "download" config.data_loader_workers = 4 config.pin_memory = True config.async_loading = True config.batch_size = 128 config.async_loading = True config.max_epoch = 100 torch.cuda.init() agent = globals()["VGG_BN_cifar"](config) agent.init_graph() agent.load_checkpoint(config.load_file) agent.compress(method = 'lasso',k=0.78) best,history = agent.train(specializing=False, freeze_conv=False,cosine_decay = False) summary(agent.model, torch.zeros((1, 3, 32, 32)).to(torch.device("cuda"))) # - summary(agent.model, torch.zeros((1, 3, 32, 32)).to(torch.device("cuda"))) print(sum(total_best)/5) print() for i in np.array(total_history).sum(0)/5: print(i) import matplotlib.pyplot as plt plt.plot(agent.lr_list) # # 60 % drop # + total_best = [] total_history = [] for i, seed in enumerate(RandomListOfIntegers): config = easydict.EasyDict() config.exp_name = "vgg16_exp_cifar100_0" config.log_dir = os.path.join("experiments", config.exp_name, "logs/") config.load_file = os.path.join("experiments", "vgg16_exp_cifar100_0","checkpoints", "checkpoint.pth") config.cuda = True config.gpu_device = 0 config.seed = seed config.milestones = [10, 20,30,40] config.gamma = 0.9 config.img_size = 32 config.num_classes = 100 config.data_mode = "download" config.data_loader_workers = 4 config.pin_memory = True config.async_loading = True config.batch_size = 128 config.async_loading = True config.max_epoch = 100 torch.cuda.init() agent = globals()["VGG_BN_cifar"](config) agent.init_graph() agent.load_checkpoint(config.load_file) agent.compress(method = 'lasso',k=0.65) best,history = agent.train(specializing=False, freeze_conv=False,cosine_decay = False) summary(agent.model, torch.zeros((1, 3, 32, 32)).to(torch.device("cuda"))) # - print(sum(total_best)/5) print() for i in np.array(total_history).sum(0)/5: print(i) import matplotlib.pyplot as plt plt.plot(agent.lr_list) # # 80 % drop # + total_best = [] total_history = [] for i, seed in enumerate(RandomListOfIntegers): config = easydict.EasyDict() config.exp_name = "vgg16_exp_cifar100_0" config.log_dir = os.path.join("experiments", config.exp_name, "logs/") config.load_file = os.path.join("experiments", "vgg16_exp_cifar100_0","checkpoints", "checkpoint.pth") config.cuda = True config.gpu_device = 0 config.seed = seed config.milestones = [10, 20,30,40] config.gamma = 0.9 config.img_size = 32 config.num_classes = 100 config.data_mode = "download" config.data_loader_workers = 4 config.pin_memory = True config.async_loading = True config.batch_size = 128 config.async_loading = True config.max_epoch = 100 torch.cuda.init() agent = globals()["VGG_BN_cifar"](config) agent.init_graph() agent.load_checkpoint(config.load_file) agent.compress(method = 'lasso',k=0.48) best,history = agent.train(specializing=False, freeze_conv=False,cosine_decay = False) summary(agent.model, torch.zeros((1, 3, 32, 32)).to(torch.device("cuda"))) # - print(sum(total_best)/5) print() for i in np.array(total_history).sum(0)/5: print(i) import matplotlib.pyplot as plt plt.plot(agent.lr_list)
.ipynb_checkpoints/Test_vgg16_lasso_stepdecay-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Playground and experiments # # _This is not the notebook you want to read..._ # + pycharm={"name": "#%%\n"} import xarray from pathlib import Path import geopandas as gp from IPython.display import display from shapely.geometry import Point data_dir = Path("data") # + [markdown] pycharm={"name": "#%% md\n"} # ## What happens if we combine two DataArrays with different coordinates in a Dataset? # + pycharm={"name": "#%%\n"} da1 = xarray.open_rasterio(data_dir / "grassland_25.tiff") da2 = xarray.open_rasterio(data_dir / "small_woody_features_27.tiff") da3 = xarray.open_rasterio(data_dir / "tree_cover_density_25.tiff") # + pycharm={"name": "#%%\n"} print(da1) # + pycharm={"name": "#%%\n"} print(da2) # + pycharm={"name": "#%%\n"} ds = xarray.Dataset({"grass": da1, "swf": da2}) # + pycharm={"name": "#%%\n"} print(ds) # + [markdown] pycharm={"name": "#%% md\n"} # -> we get the union of coordinates in the Dataset # # ## What about identical coordinates? # + pycharm={"name": "#%%\n"} ds = xarray.Dataset({"grass": da1, "trees": da3}) # + pycharm={"name": "#%%\n"} print(ds) assert (ds.coords["x"].values == da1.coords["x"].values).all() assert (ds.coords["y"].values == da1.coords["y"].values).all() assert (ds.coords["x"].values == da3.coords["x"].values).all() assert (ds.coords["y"].values == da3.coords["y"].values).all() # + [markdown] pycharm={"name": "#%% md\n"} # -> we get one Dataset with the same coordinates as the input DataArrays # # ## Interpolate to join arrays with different coordinates # # This fails: Tries to interpolate on 'band', which has only one value. # + pycharm={"name": "#%%\n"} da2_resampled = da2.interp_like(da1) # + [markdown] pycharm={"name": "#%% md\n"} # Squeeze out 'band' dimension before resampling: # + pycharm={"name": "#%%\n"} da2_resampled = da2.squeeze('band').interp_like(da1, method='nearest') # + pycharm={"name": "#%%\n"} print(da2_resampled) assert (da2_resampled.coords["x"].values == da1.coords["x"].values).all() assert (da2_resampled.coords["y"].values == da1.coords["y"].values).all() # + [markdown] pycharm={"name": "#%% md\n"} # Now we can nicely merge all three DataArrays into one Dataset # + pycharm={"name": "#%%\n"} ds = xarray.Dataset({"grass": da1, "swf": da2_resampled, "trees": da3}) print(ds) assert (ds.coords["x"].values == da1.coords["x"].values).all() assert (ds.coords["y"].values == da1.coords["y"].values).all() # + pycharm={"name": "#%%\n"} ds.max() # + pycharm={"name": "#%%\n"} ds["grass"] # + pycharm={"name": "#%%\n"} # - # ### Build voronoi regions # # _Insert this into the demo after preprocessing the raster data_ # # + pycharm={"name": "#%%\n"} voronoi_polys, _ = geovoronoi.voronoi_regions_from_coords( coords=geovoronoi.points_to_coords(school_points["geometry"]), geo_shape=munich_df_metric.iloc[0]["geometry"] ) print(voronoi_polys) # + pycharm={"name": "#%%\n"} school_polys = gp.GeoSeries(voronoi_polys) # + pycharm={"name": "#%%\n"} school_polys.plot(figsize=(12, 10)) school_points.plot(ax=plt.gca(), color='red') # + pycharm={"name": "#%%\n"} da1.crs # + [markdown] pycharm={"name": "#%% md\n"} # # Experiments on accuracy of distances in difference CRSs # + pycharm={"name": "#%%\n"} from math import radians, cos, sin, asin, sqrt def haversine(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 6371_000 # radius of Earth in m return c * r # + pycharm={"name": "#%%\n"} # Accurate (haversine) distance for 0.1 degree increment from lon=10, lat=50 print("0.1 degrees North:", haversine(10, 50, 10, 50.1)) print("0.1 degrees East:", haversine(10, 50, 10.1, 50)) # + pycharm={"name": "#%%\n"} # Project to EPSG 3857 and EPSG 25832, measure by coordinate difference lonlat = gp.GeoSeries([ Point(10, 50), Point(10, 50.1), Point(10.1, 50) ], crs="EPSG:4326") for p in ["3857", "25832"]: proj = lonlat.to_crs("EPSG:" + p) print(f"0.1 degrees North, delta in EPSG {p} coords:", proj.iloc[1].distance(proj.iloc[0])) print(f"0.1 degrees East, delta in EPSG {p} coords:", proj.iloc[2].distance(proj.iloc[0])) # + [markdown] pycharm={"name": "#%% md\n"} # -> EPSG 3857 is off by a lot on the East/West distance # # + pycharm={"name": "#%%\n"}
playground.ipynb
# # Density test # Here, we compare the two unmatched networks by treating each as an Erdos-Renyi network # and simply compare their estimated densities. # ## The Erdos-Renyi (ER) model # The [**Erdos-Renyi (ER) model** # ](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) # is one of the simplest network models. This model treats # the probability of each potential edge in the network occuring to be the same. In # other words, all edges between any two nodes are equally likely. # # ```{admonition} Math # Let $n$ be the number of nodes. We say that for all $(i, j), i \neq j$, with $i$ and # $j$ both running # from $1 ... n$, the probability of the edge $(i, j)$ occuring is: # # $$ P[A_{ij} = 1] = p_{ij} = p $$ # # Where $p$ is the the global connection probability. # # Each element of the adjacency matrix $A$ is then sampled independently according to a # [Bernoulli distribution](https://en.wikipedia.org/wiki/Bernoulli_distribution): # # $$ A_{ij} \sim Bernoulli(p) $$ # # For a network modeled as described above, we say it is distributed # # $$ A \sim ER(n, p) $$ # # ``` # # Thus, for this model, the only parameter of interest is the global connection # probability, $p$. This is sometimes also referred to as the **network density**. # ## Testing under the ER model # In order to compare two networks $A^{(L)}$ and $A^{(R)}$ under this model, we # simply need to compute these network densities ($p^{(L)}$ and $p^{(R)}$), and then # run a statistical test to see if these densities are significantly different. # # ```{admonition} Math # Under this # model, the total number of edges $m$ comes from a $Binomial(n(n-1), p)$ distribution, # where $n$ is the number of nodes. This is because the number of edges is the sum of # independent Bernoulli trials with the same probability. If $m^{(L)}$ is the number of # edges on the left # hemisphere, and $m^{(R)}$ is the number of edges on the right, then we have: # # $$m^{(L)} \sim Binomial(n^{(L)}(n^{(L)} - 1), p^{(L)})$$ # # and independently, # # $$m^{(R)} \sim Binomial(n^{(R)}(n^{(R)} - 1), p^{(R)})$$ # # To compare the two networks, we are just interested in a comparison of $p^{(L)}$ vs. # $p^{(R)}$. Formally, we are testing: # # $$H_0: p^{(L)} = p^{(R)}, \quad H_a: p^{(L)} \neq p^{(R)}$$ # # Fortunately, the problem of testing for equal proportions is well studied. # In our case, we will use Fisher's Exact test to run this test for the null and # alternative hypotheses above. # ``` # + import datetime import time import matplotlib.path import matplotlib.pyplot as plt import matplotlib.transforms import numpy as np import pandas as pd import seaborn as sns from giskard.plot import merge_axes, soft_axis_off from graspologic.simulations import er_np from matplotlib.collections import LineCollection from pkg.data import load_network_palette, load_node_palette, load_unmatched from pkg.io import FIG_PATH from pkg.io import glue as default_glue from pkg.io import savefig from pkg.plot import SmartSVG, networkplot_simple, set_theme from pkg.plot.er import plot_density from pkg.stats import erdos_renyi_test from pkg.utils import sample_toy_networks from svgutils.compose import Figure, Panel, Text from pkg.plot import draw_hypothesis_box, rainbowarrow DISPLAY_FIGS = True FILENAME = "er_unmatched_test" def gluefig(name, fig, **kwargs): savefig(name, foldername=FILENAME, **kwargs) glue(name, fig, figure=True) if not DISPLAY_FIGS: plt.close() def glue(name, var, **kwargs): default_glue(name, var, FILENAME, **kwargs) t0 = time.time() set_theme(font_scale=1.25) network_palette, NETWORK_KEY = load_network_palette() node_palette, NODE_KEY = load_node_palette() left_adj, left_nodes = load_unmatched("left") right_adj, right_nodes = load_unmatched("right") # + # describe ER model np.random.seed(8888) ps = [0.2, 0.4, 0.6] n_steps = len(ps) fig, axs = plt.subplots( 2, n_steps, figsize=(6, 3), gridspec_kw=dict(height_ratios=[2, 0.5]), constrained_layout=True, ) n = 18 for i, p in enumerate(ps): A = er_np(n, p) if i == 0: node_data = pd.DataFrame(index=np.arange(n)) ax = axs[0, i] networkplot_simple(A, node_data, ax=ax, compute_layout=i == 0) label_text = f"{p}" if i == 0: label_text = r"$p = $" + label_text ax.set_title(label_text, pad=10) fig.set_facecolor("w") ax = merge_axes(fig, axs, rows=1) soft_axis_off(ax) rainbowarrow(ax, (0.15, 0.5), (0.85, 0.5), cmap="Blues", n=100, lw=12) ax.set_xlim((0, 1)) ax.set_ylim((0, 1)) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel("Increasing density") gluefig("er_explain", fig) # + A1, A2, node_data = sample_toy_networks() node_data["labels"] = np.ones(len(node_data), dtype=int) palette = {1: sns.color_palette("Set2")[2]} fig, axs = plt.subplots(2, 2, figsize=(6, 6), gridspec_kw=dict(wspace=0.7)) ax = axs[0, 0] networkplot_simple(A1, node_data, ax=ax) ax.set_title("Compute global\nconnection density") ax.set_ylabel( "Left", color=network_palette["Left"], size="large", rotation=0, ha="right", labelpad=10, ) ax = axs[1, 0] networkplot_simple(A2, node_data, ax=ax) ax.set_ylabel( "Right", color=network_palette["Right"], size="large", rotation=0, ha="right", labelpad=10, ) stat, pvalue, misc = erdos_renyi_test(A1, A2) ax = axs[0, 1] ax.text( 0.4, 0.2, r"$p = \frac{\# \ edges}{\# \ potential \ edges}$", ha="center", va="center", ) ax.axis("off") ax.set_title("Compare ER\nmodels") ax.set(xlim=(-0.5, 2), ylim=(0, 1)) ax = axs[1, 1] ax.axis("off") x = 0 y = 0.55 draw_hypothesis_box("er", -0.2, 0.8, ax=ax, fontsize="medium", yskip=0.2) gluefig("er_methods", fig) # - stat, pvalue, misc = erdos_renyi_test(left_adj, right_adj) glue("pvalue", pvalue, form="pvalue") # + n_possible_left = misc["possible1"] n_possible_right = misc["possible2"] glue("n_possible_left", n_possible_left) glue("n_possible_right", n_possible_right) density_left = misc["probability1"] density_right = misc["probability2"] glue("density_left", density_left, form="0.2g") glue("density_right", density_right, form="0.2g") n_edges_left = misc["observed1"] n_edges_right = misc["observed2"] # + coverage = 0.95 glue("coverage", coverage, form="2.0f%") plot_density(misc, palette=network_palette, coverage=coverage) gluefig("er_density", fig) # - # ## Reject bilateral symmetry under the ER model # # ```{glue:figure} fig:er_unmatched_test-er_density # :name: "fig:er_unmatched_test-er_density" # # Comparison of estimated densities for the left and right hemisphere networks. The # estimated density (probability of any edge across the entire network), $\hat{p}$, for # the left # hemisphere is ~{glue:text}`er_unmatched_test-density_left:0.3f`, while for the right # it is # ~{glue:text}`er_unmatched_test-density_right:0.3f`. Black lines denote # {glue:text}`er_unmatched_test-coverage_percentage`**%** # confidence intervals for this estimated parameter $\hat{p}$. The p-value for testing # the null hypothesis that these densities are the same is # {glue:text}`er_unmatched_test-pvalue:0.3g` (two # sided Fisher's exact test). # ``` # # {numref}`Figure {number} <fig:er_unmatched_test-er_density>` shows the comparison of # the network densities between the left and right hemisphere induced subgraphs. We see # that the density on the left is ~{glue:text}`er_unmatched_test-density_left:0.3f`, and # on the right it is ~{glue:text}`er_unmatched_test-density_right:0.3f`. To determine # whether this is a difference likely to be observed by chance under the ER model, # we ran a two-sided Fisher's exact test, which tests whether the success probabilities # between two independent binomials are significantly different. This test yields a # p-value of {glue:text}`er_unmatched_test-pvalue:0.3g`, suggesting that we have strong # evidence to reject this version of our hypotheis of bilateral symmetry. We note that # while the difference between estimated densities is not massive, this low p-value # results from the large sample size for this comparison. We note that there are # {glue:text}`er_unmatched_test-n_possible_left:,.0f` and # {glue:text}`er_unmatched_test-n_possible_right:,.0f` potential edges on the left and # right, # respectively, making the sample size for this comparison quite large. # # To our knowledge, when neuroscientists have considered the question of bilateral # symmetry, they have not meant such a simple comparison of proportions. In many ways, # the ER model is too simple to be an interesting description of connectome structure. # However, we note that *even the simplest network model* yields a significant # difference between brain hemispheres for this organism. It is unclear whether this # difference in densities is biological (e.g. a result of slightly differing rates of # development for this individual), an artifact of how the data was collected (e.g. # technological limitations causing slightly lower reconstruction rates on the left # hemisphere), or something else entirely. Still, the ER test results also provide # important considerations for other tests. Almost any network statistic (e.g. # clustering coefficient, number of triangles, etc), as well as many of the model-based # parameters we will consider in this paper, are strongly related to the network # density. Thus, if the densities are different, it is likely that tests based on any # of these other test statistics will also reject the null hypothesis. Thus, we will # need ways of telling whether an observed difference for these other tests could be # explained by this difference in density alone. # + FIG_PATH = FIG_PATH / FILENAME fontsize = 12 methods = SmartSVG(FIG_PATH / "er_methods.svg") methods.set_width(200) methods.move(10, 20) methods_panel = Panel( methods, Text("A) Density test methods", 5, 10, size=fontsize, weight="bold") ) density = SmartSVG(FIG_PATH / "er_density.svg") density.set_height(methods.height) density.move(10, 15) density_panel = Panel( density, Text("B) Density comparison", 5, 10, size=fontsize, weight="bold") ) density_panel.move(methods.width * 0.9, 0) fig = Figure( (methods.width + density.width) * 0.9, (methods.height) * 0.9, methods_panel, density_panel, ) fig.save(FIG_PATH / "composite.svg") fig # - elapsed = time.time() - t0 delta = datetime.timedelta(seconds=elapsed) print(f"Script took {delta}") print(f"Completed at {datetime.datetime.now()}")
docs/er_unmatched_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Dataset Exploration # # Here we'll be exploring how each of the features we have so far relates to the target variable "status" # ## Importing the dataset import pandas as pd startups = pd.read_csv('data/startups_2.csv', index_col=0) startups[:3] # ### Let's start exploring the numerical features # Let's see a heatmap chart of the average features for 'acquired' startups against the complete set of startups # + import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline def plot_avg_status_against_avg_total(df, status): startups_numeric = df.filter(regex=('(number_of|avg_).*|.*(funding_total_usd|funding_rounds|_at|status)')) startups_acquired = startups_numeric[startups_numeric['status'] == status] startups_numeric = startups_numeric.drop('status', 1) startups_acquired = startups_acquired.drop('status', 1) fig, ax = plt.subplots(figsize=(20,20)) ax.set_title(status+' startups heatmap') sns.heatmap((pd.DataFrame(startups_acquired.mean()).transpose() -startups_numeric.mean())/startups_numeric.std(ddof=0), annot=True, cbar=False, square=True, ax=ax) plot_avg_status_against_avg_total(startups, 'acquired') # - # The same for 'closed': plot_avg_status_against_avg_total(startups, 'closed') plot_avg_status_against_avg_total(startups, 'ipo') plot_avg_status_against_avg_total(startups, 'operating') # We can see some logic behavior here. Acquired startups tend to have high venture_funding_rounds and low seed_funding_rounds, while closed startups have few funding_rounds in general and relatively high angel_funding_rounds. # # Regarding the dates variables we also have logical results. Acquired and closed startups haven't had a funding for a higher amount of time. # While operating startups had a funding not so long ago when compared to the rest of the startups. # + # Produce a scatter matrix for each pair of features in the data #startups_funding_rounds = startups_numeric.filter(regex=('.*funding_total_usd')) #pd.scatter_matrix(startups_funding_rounds, alpha = 0.3, figsize = (14,8), diagonal = 'kde'); # - # ## Applying PCA to discover which features best explain the variance in the dataset # + from sklearn.decomposition import PCA import visuals as vs startups_numeric = startups.filter(regex=('(number_of|avg_).*|.*(funding_total_usd|funding_rounds|_at)')) # TODO: Apply PCA by fitting the good data with the same number of dimensions as features pca = PCA(n_components=4) pca.fit(startups_numeric) # Generate PCA results plot pca_results = vs.pca_results(startups_numeric, pca) startups_numeric[:3] # + good_data = startups_numeric import numpy as np dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)] components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys()) components.index = dimensions components # - # The most important variables here are: # # Dimension1: funding_rounds, -last_funding_at, debt_financing_funding_rounds, venture_funding_rounds # # Dimension2: -funding_rounds, -last_funding_at, -seed_funding_rounds, venture_funding_rounds # # Dimension3: -last_funding_at, equity_crowdfunding_funding_rounds, -seed_funding_rounds # # Dimension4: last_funding_at, equity_crowdfunding_funding_rounds, seed_funding_rounds # ### Now I'll apply the same PCA algorithm, but just for startups with acquired status # + startups_numeric_acquired = startups.filter(regex=('(number_of|avg_).*|.*(funding_total_usd|funding_rounds|_at|status)')) startups_numeric_acquired = startups_numeric_acquired[startups_numeric_acquired['status'] == 'acquired'] startups_numeric_acquired = startups_numeric_acquired.drop('status', 1) pca = PCA(n_components=4) pca.fit(startups_numeric_acquired) # Generate PCA results plot pca_results = vs.pca_results(startups_numeric_acquired, pca) # - # Okay. We see now that some features tend to express more variance than others. # # We also see that funding_rounds variable tend to dominate against funding_total_usd values. # And also, that last_funding_at is a very expressing variable. # # # ### Let's start playing with non-numerical variables: dates and Categories #startups_numeric = df.filter(regex=('.*(funding_total_usd|funding_rounds|status)')) startups_non_numeric = startups.filter(regex=('^((?!(_acquisitions|_investments|_per_round|funding_total_usd|funding_rounds|_at)).)*$')) startups_non_numeric[:3] # ### Let's try some DecisionTrees for categories and see which performance we get. startups_non_numeric['status'].value_counts() startups_non_numeric['acquired'] = startups_non_numeric['status'].map({'operating': 0, 'acquired':1, 'closed':0, 'ipo':0}) startups_non_numeric = startups_non_numeric.drop('status', 1) startups_non_numeric[:3] from sklearn import tree def visualize_tree(tree_model, feature_names): """Create tree png using graphviz. Args ---- tree_model -- scikit-learn DecsisionTree. feature_names -- list of feature names. """ with open("dt.dot", 'w') as f: tree.export_graphviz(tree_model, out_file=f, feature_names=feature_names) command = ["dot", "-Tpng", "dt.dot", "-o", "dt.png"] try: subprocess.check_call(command) except: exit("Could not run dot, ie graphviz, to " "produce visualization") # + #import visuals_tree as vs_tree #vs_tree.ModelLearning(startups_non_numeric.drop(['acquired','state_code'], 1), startups_non_numeric['acquired']) from sklearn import tree from sklearn.cross_validation import cross_val_score from sklearn import tree from sklearn import grid_search from sklearn import preprocessing #clf = tree.DecisionTreeClassifier(random_state=0) #cross_val_score(clf, startups_non_numeric.drop(['acquired','state_code'], 1), startups_non_numeric['acquired'], cv=10) #Drop state_code feature features = startups_non_numeric.drop(['acquired','state_code'], 1) #Convert state_code feature to number #features = startups_non_numeric.drop(['acquired'], 1) #features['state_code'] = preprocessing.LabelEncoder().fit_transform(features['state_code']) #Convert state_code to dummy variables features = pd.get_dummies(startups_non_numeric.drop(['acquired'], 1), prefix='state', columns=['state_code']) #Merge numeric_features to non-numeric-features features_all = pd.concat([features, startups_numeric], axis=1, ignore_index=False) #features = features_all features = startups_numeric parameters = {'max_depth':range(5,20)} clf = grid_search.GridSearchCV(tree.DecisionTreeClassifier(), parameters, n_jobs=5, scoring='roc_auc') clf.fit(X=features, y=startups_non_numeric['acquired']) tree_model = clf.best_estimator_ print (clf.best_score_, clf.best_params_) print tree.export_graphviz(clf.best_estimator_, feature_names=list(features.columns)) # - import visuals_tree as vs_tree vs_tree = reload(vs_tree) vs_tree.ModelComplexity(features_all, startups_non_numeric['acquired']) # ### Only categories and states are not enough for making a good prediction. With that, maximum (roc_auc) of 0.64 was achieved. With attributes, a simple decisionTreeClassifier achieved 0.84 roc_auc. # ## Saving the dataset ready to be tested by different learning algorithms all = pd.concat([features_all, startups_non_numeric['acquired']], axis=1, ignore_index=False) all.to_csv('data/startups_3.csv') all_with_status = all.join(startups['status']) all_with_status_without_operating = all_with_status[all_with_status['status'] != 'operating'] all_with_status_without_operating.shape all_without_operating = all_with_status_without_operating.drop('status', 1) all_without_operating.to_csv('data/startups_not_operating_3.csv')
exploratory_code/3_dataset_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os import pandas as pd import webbrowser import seaborn as sns sns.set() import plotly import plotly.plotly as py import plotly.graph_objs as go plotly.tools.set_credentials_file(username='sriharis', api_key='<KEY>') from DataExtractor import DataExtractor from DataUtils import * from VizTools import * # + # Set the path variables _path = os.path.join(os.getcwd(), "datasets/crime") db = 'crime_data' start_year = 2001 end_year = 2017 all_year_data = [] columns_to_select = ["Arrest", "Date", "Year", "crime_count", "day", "month", # "quarter", # "week_no" ] for year in range(start_year, end_year+1): # print("Pulling year:", year) cols = ''.join(s+", " for s in columns_to_select) query = "SELECT " + cols[:-2] + " FROM crime_" + str(year) + ";" # print(query) data_extractor = DataExtractor() year_df = data_extractor.read_db(db=db, query=query) print("Pulled", year_df.shape[0], "rows of data for year", year) # print(year_df.shape) all_year_data.append(year_df) # - # What are out columns? print_columns(all_year_data[0]) # + # How has crime behaved over the months for all these years? # Aggregate crime at a day level to check for trends agg_data = pd.DataFrame(columns=["Year", "month", "day", "Arrest", "crime_count"]) for year_data in all_year_data: year_agg = year_data.groupby(by=["Year", "month", "day"], as_index=False).sum() year_agg = year_agg[["Year", "month", "day", "Arrest", "crime_count"]] agg_data = agg_data.append(year_agg, ignore_index=True) def get_date_df(row): df = pd.DataFrame({ 'year':row["Year"], 'month':row["month"], 'day':row["day"], }, index=[0]) return df agg_data["Date"] = agg_data.apply(lambda row: pd.to_datetime(get_date_df(row)), axis=1) agg_data["day_name"] = agg_data.apply(lambda row: row["Date"].strftime("%A"), axis=1) agg_data.head() # + data = [] # Create traces for each year for year in agg_data["Year"].unique(): trace = go.Scatter( x = agg_data[agg_data["Year"] == year]["Date"], y = agg_data[agg_data["Year"] == year]["crime_count"], name = str(year) ) data.append(trace) py.iplot(data) # - # If we assume that crime behaves consistently through a year, let's plot distribution plots for 2017's months temp_data = agg_data[agg_data["Year"] == 2017][["Year", "month", "crime_count"]] temp_data["Year"] = temp_data["Year"].astype(float) temp_data["month"] = temp_data["month"].astype(float) temp_data["crime_count"] = temp_data["crime_count"].astype(float) plt.figure(figsize=(16, 10)) plt.title("Crime behaviour over 2017") g = sns.boxplot(x="month", y="crime_count", data=temp_data) # + # What about arrests made? arrest_map = {} for year_data in all_year_data: arrest_map[year_data["Year"].unique()[0]] = dict({ 0:year_data["Arrest"].value_counts()[0], 1:year_data["Arrest"].value_counts()[1], }) arrest_df = pd.DataFrame(arrest_map).transpose() arrest_df.columns = ["not_arrested", "arrested"] # But was there a percentage change? arrest_df["not_arrested_%"] = arrest_df.apply(lambda row: (row["not_arrested"] / (row["not_arrested"] + row["arrested"])) * 100, axis=1 ) arrest_df["arrested_%"] = arrest_df.apply(lambda row: 100-row["not_arrested_%"], axis=1) arrest_df.head() # - # How many were arrested? arrest_df[["arrested", "not_arrested"]].plot(kind='bar', stacked=True, figsize=(16,10)) # What about in percentage? Was there any improvement there? arrest_df[["arrested_%", "not_arrested_%"]].plot(kind='bar', stacked=True, figsize=(16,10), ylim=[0,120]) # But when do crimes happen? agg_month_dayname = agg_data.groupby(by=["month", "day_name"], as_index=False).sum() agg_month_dayname["crime_count_1000s"] = agg_month_dayname["crime_count"] / 1000 heatmap_data = agg_month_dayname.pivot("day_name", "month", "crime_count_1000s") col_palette = sns.diverging_palette(240, 1, n=20) plt.figure(figsize=(16, 10)) plt.title("Crime counts in thousands") g = sns.heatmap(data=heatmap_data, cmap=col_palette, annot=True) # and arrests? agg_month_dayname["arrest_1000s"] = agg_month_dayname["Arrest"] / 1000 heatmap_data = agg_month_dayname.pivot("day_name", "month", "arrest_1000s") col_palette = sns.diverging_palette(240, 1, n=20) plt.figure(figsize=(16, 10)) plt.title("Arrests in thousands") g = sns.heatmap(data=heatmap_data, cmap=col_palette, annot=True)
CrimeDataAnalysis_Part_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # # Face Detection with OpenCv # **Import library's** import cv2 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # **Load the image** nadia = cv2.imread("/Users/neemiasbsilva/Downloads/Computer-Vision-with-Python/DATA/Nadia_Murad.jpg", 0) denis = cv2.imread("/Users/neemiasbsilva/Downloads/Computer-Vision-with-Python/DATA/Denis_Mukwege.jpg", 0) solvay = cv2.imread("/Users/neemiasbsilva/Downloads/Computer-Vision-with-Python/DATA/solvay_conference.jpg", 0) # show the nadia image plt.imshow(nadia, cmap='gray') # show the denis image plt.imshow(denis, cmap='gray') plt.imshow(solvay, cmap='gray') # **Load the Frontal Cascade Classifier** face_cascade = cv2.CascadeClassifier('/Users/neemiasbsilva/Downloads/Computer-Vision-with-Python/DATA/haarcascades/haarcascade_frontalface_default.xml') def detect_face(img): face_img = img.copy() face_rectangles = face_cascade.detectMultiScale(face_img) for (x, y, w, h) in face_rectangles: cv2.rectangle(face_img, (x, y), (x+w, y+h), (255, 0, 255), 10) return face_img result_denis = detect_face(denis) plt.imshow(result_denis, cmap='gray') result_nadia = detect_face(nadia) plt.imshow(result_nadia, cmap='gray') result_solvay = detect_face(solvay) plt.imshow(result_solvay, cmap='gray') # **Adjusting the Detect Face** def adj_detect_face(img): face_img = img.copy() face_rectangles = face_cascade.detectMultiScale(face_img, scaleFactor=1.2, minNeighbors=5) for (x, y, w, h) in face_rectangles: cv2.rectangle(face_img, (x, y), (x+w, y+h), (255, 0, 255), 10) return face_img result_solvay = adj_detect_face(solvay) plt.imshow(result_solvay, cmap='gray') # **Detect Eyes Cascade Classifier** eye_cascade = cv2.CascadeClassifier("/Users/neemiasbsilva/Downloads/Computer-Vision-with-Python/DATA/haarcascades/haarcascade_eye.xml") def detect_eyes(img): face_img = img.copy() eyes_rectangles = eye_cascade.detectMultiScale(face_img, scaleFactor=1.2, minNeighbors=5) for (x, y, w, h) in eyes_rectangles: cv2.rectangle(face_img, (x, y), (x+w, y+h), (255, 255, 255), 10) return face_img result_nadia_eyes = detect_eyes(nadia) plt.imshow(result_nadia_eyes, cmap='gray') # doesn't work because the region of eyes is the same of the region # of interest, for more details check the Viola Algorithms result_denis_eyes = detect_eyes(denis) plt.imshow(result_denis_eyes, cmap='gray')
face-detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Execution Plan # # In this notebook we try to understand Spark execution plans. We will use the weather example and analyse all the steps in order to get a better understanding. # # ## Exeuction Model of Spark # # In contrast to many other (mainly non-distributed) frameworks, Spark does not execute any transformation immediately, but only records the step and builds a so called execution plan. This plan is the basis for Sparks resilience against failure of individual nodes (since the result can be reconstructed from the execution plan), but also allows Spark to perform optimizations which span all transformation steps. # # Specifically with Spark DataFrames (as opposed to the more low level RDD interface), Spark uses an advanced optimizer. The general steps of query processing in response to an action (like a "show" or "save" action)" are always as follows: # 1. Parse logical execution plan # 2. Analyze logical execution plan and resolve all symbols (tables, columns, functions) # 3. Optimize logical execution plan # 4. Create physical execution plan by mapping all steps to RDD operations # # ## Relation to RDDs # Note that RDDs are only used in the very last step, although the general conception is that DataFrames sit on top of RDDs. But the point is, that a DataFrame first collects all transformations on a higher level of abstraction and RDDs only come into play in this very last step. # # Actually you can access an RDD of any DataFrame. BUT: This access will actually create the physical execution plan for this specific RDD. Before accessing this RDD it even didn't exist. This also means that using a DataFrames RDD actually is an optimization barrier. # # ## Weather Example # # In the following steps, we will try to understand how Spark executes a simplified version of the weather analysis including aggregations and joins. spark.conf.set("spark.sql.adaptive.enabled", False) # # 1. Load Data # # First we load the weather data, which consists of the measurement data and some station metadata. storageLocation = "s3://dimajix-training/data/weather" # ## 1.1 Load Measurements # # Measurements are stored in multiple directories (one per year) # + from pyspark.sql.functions import * from functools import reduce # Read in all years, store them in an Python array raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2015)] # Union all years together raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year) # Display first 10 records raw_weather.limit(10).toPandas() # - # ### Extract Measurements # # Measurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple `SELECT` statement. # + weather = raw_weather.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) weather.limit(10).toPandas() # - # ## 1.2 Load Station Metadata # # We also need to load the weather station meta data containing information about the geo location, country etc of individual weather stations. # + stations = spark.read \ .option("header", True) \ .csv(storageLocation + "/isd-history") # Display first 10 records stations.limit(10).toPandas() # - # ## 1.3 Perform Analysis # # Now for completeness sake, let's reperform the analysis (minimum and maximum temperature per year and country) using `JOIN` and `GROUP BY` operations. # + df = weather.join(stations, (weather.usaf == stations.USAF) & (weather.wban == stations.WBAN)) result = df.groupBy(df.CTRY, df.year).agg( min(when(df.air_temperature_qual == lit(1), df.air_temperature)).alias('min_temp'), max(when(df.air_temperature_qual == lit(1), df.air_temperature)).alias('max_temp'), min(when(df.wind_speed_qual == lit(1), df.wind_speed)).alias('min_wind'), max(when(df.wind_speed_qual == lit(1), df.wind_speed)).alias('max_wind') ) pdf = result.toPandas() pdf # - # # 2 Investigate Execution Plans # # Now that we have redone the whole analysis, let's try to understand how Spark actually executes these steps. In order to understand the whole aggregation, we start simple and add one step after the other and look how execution plans change. # ## 2.1 Reading Data # # The first step is to read in data. In order to start simple, we only load a single year into a DataFrame called `raw_weather_2003`. We can inspect the execution plan that would create the records of that DataFrame with the `explain()` method. raw_weather_2003 = spark.read.text(storageLocation + "/2003") ## YOUR CODE HERE # As we can see, the execution plan actually contains a single operation - reading data from disk. Note two things: # * The phyiscal execution plan has been created specifically for the `explain()` command. It is not stored in the DataFrame, the DataFrame only contains the basis for a *parsed logical plan* # * The plan is not executed, only printed to the console # # We can also inspect a more detailed execition plan, if we pass `True` to the `explain()` method as follows: # + ## YOUR CODE HERE # - # As you can see, the explanation now contains all four steps: # * Parsed logical execution plan. This directly corresponds to the operations as specified. # * Analyzed logical plan. This resolves all relations and columns and data types. # * Optimized logical plan. This plan is already optimized (we'll see some optimizations later) # * Physical execution plan. This maps all operations and transformations to RDD operations. # ## 2.2 Adding Columns # # Let's see how the execution plan changes if we add a new column. raw_weather_2003 = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) ## YOUR CODE HERE # ### Remarks # We see that a `Project` operation was inserted to all execution plans which is responsible for adding the `year` column. # ## 2.3 SELECT Operation # # Now let's perform an additional `SELECT` operation after adding the year. We do not add all columns yet in order to keep the output small and more readable. We will add more columns later when we really require them. weather_2003 = raw_weather_2003.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban") ) ## YOUR CODE HERE # ### Remarks # Here we see that the original parsed plan and analyzed plan actually contains two `Project` operations. Each of them corresponds to a single transformation (`withColumn` and `select`). But the optimizer merged these operations into a single one, thus simplifying execution. # ## 2.4 UNION Operation # # Just for completeness, let's see what a `UNION` operation does. We required it after loading all years into individual DataFrames. # + # Read in all years, store them in an Python array raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2015)] # Union all years together raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year) # Print execution plan ## YOUR CODE HERE # - # ## 2.5 JOIN Operation # # The next operation we had to perform was a `JOIN` between the measurements and the station metadata. We will use only a single year instead of the unioned data to keep output small and thereby increase readability of the execution plans. df = ## YOUR CODE HERE # ### Remarks # Now a `JOIN` results in an interesting execution plan: # * Spark filters columns, since an inner JOIN require non-null values # * Filtering is actually pushed down before the projection. This reduces amount of data as soon as possible # * JOIN operation is performed in two steps: # * Load data and broadcast it to all nodes (`BroadcastExchange`) # * Perform the join (`BroadcastHashJoin`) # # In addition to the *broadcast join* Spark also supports a different join implementation - more on that later. # ### Implicit Filtering # # Actually let's have a look at what happens with a left outer join. This should not filter away `NULL` values on the left side: # + ## YOUR CODE HERE # - # ## 2.6 Aggregation # # Finally we want to perform an aggregation on the joined data. We need to restart from measurement extraction, since we did not extract all required columns so far. So we will perform the following steps # * Reuse `raw_weather_2003` which already contains the `year` column # * Extract all requirement measurements # * Join with stations metadata # * Perform grouped aggregation # Again we will only analyze the temperature, just to keep execution plans a little bit smaller. This means that some columns are missing, but the basic operations are all the same. # ### Extract Measurements weather_2003 = raw_weather_2003.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) ## YOUR CODE HERE # ### Join with Stations Metadata df = weather_2003.join(stations, (weather_2003.usaf == stations.USAF) & (weather_2003.wban == stations.WBAN)) ## YOUR CODE HERE # ### Perform Grouped Aggregation # + ## YOUR CODE HERE # - # ### Remarks # # Again we can see that Spark performs some simple but clever optiomizations: # * Projections only contains the columns required, not all available columns of df. The required columns are recursively *pushed up* the transformation chain from the last operation (grouped aggregation) to the first transformations # * The aggregation is performed in three steps: # * Partial aggregation (`HashAggregate` with `partial_...` functions) # * Shuffle (`Exchange hashpartitioning`) # * Final aggregation of partial results (`HashAggregate`) # ## 2.7 Sorting # # The last operation we like to analyze is sorting. To keep execution plans simple, we just sort the `stations` DataFrame by the stations IDs. # + ## YOUR CODE HERE # - # ### Remarks # # In order to have a globally sorted result, it is not enough to sort within each Spark partition. This implies that some kind of shuffle operation has to be executed. In contrast to all our previous examples, this time Spark uses a `rangepartitioning` by which it simply splits up all data according to the range of the sorting key. After that is done, records will be sorted independently within each partition. Since the ranges were non-overlapping this is enough for a global ordering covering all partitions.
pyspark-advanced/jupyter-execution/Execution Plan - Skeleton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="N96HymjLB_gR" import json # + id="lJzBIB4JNmHf" fd =open("product_sales.json",'x') # + id="gTmt1cwHG81t" sales=[] # + colab={"base_uri": "https://localhost:8080/"} id="NohuYaILAJBj" outputId="a561a276-7da6-4ad9-968d-b5bbeb4411dc" fd = open("products.json",'r') r = fd.read() fd.close() products = json.loads(r) print("Welcome to shop") ui_prod = input("Enter the product Id: ") ui_quan = int(input("Enter the quantity of the Product: ")) for i in products.keys(): while ui_prod == i: if ui_quan <= products[i]['quan']: print("****************************************") print(" product:",products[ui_prod]['name']) print(" price:",products[ui_prod]['price']) print(" Expiry date:",products[ui_prod]['expiry']) print(" Manufacturing Brand:",products[ui_prod]['brand']) print("****************************************") print("----------BILLING DETAILS:---------- ") print("Total Amount: ",products[ui_prod]['price']*ui_quan) print("Enjoy Your Day") print("Thank You. Visit Again") print("**********************************") products[ui_prod]["quan"] = products[ui_prod]["quan"] - ui_quan num = len(sales)+1 sales.append([num,ui_prod,products[ui_prod]['name'],ui_quan,products[ui_prod]['price']*ui_quan]) else: print("Sorry. Stock of product you enterd is: ",products[ui_prod]['quan'],"\nPlease accept our apologies for this inconvenience") break if ui_prod not in products.keys(): print("Sorry. No product found for ID: ",ui_prod,"\Please enter valid product ID") js = json.dumps(products) fd = open("products.json",'w') fd.write(js) fd.close() st = json.dumps(sales) fd = open("sales.json","w") fd.write(st) fd.close()
Inventory_Management_Product_Purchase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="bash" # # mkdir -p ../data/dataset1 # mkdir -p ../data/dataset2 # # python3 ../utils/download_dataset.py # python3 ../utils/convert_to_tfrecords.py # + import sys import os root_dir = os.path.split(os.getcwd())[0] sys.path.append(root_dir) from utils.configurations.config import Config # - # # Content of the Table # # >- [Data Ingestion](#Data-Ingestion) # >-[ What is InteractiveContext?](#What-is-InteractiveContext?) # >-[Output of the component](#Output-of-the-component) # >-[what metadata store is for?](#what-metadata-store-is-for?) # >>- [atrifacts Tables](#atrifacts-Tables) # >>- [Contexts Tables](#Contexts-Tables) # >>- [Executions Tables](#Executions-Tables) # >- [Loding dataset from tf_records](#Loding-dataset-from-tf_records) # >-[Configuration Options](#Configuration-Options) # >>- [splitting](#splitting) # >>- [If data is stored in spitted manner](#If-data-is-stored-in-spitted-manner) # >>- [Span](#Span) # >-[Add-ons](#Add-ons) # ## Data Ingestion # # This component of the pipeline is used to read data files or request the data for our pipeline run or from an external service (e.g., Google Cloud BigQuery) and outputs an artifact for the further step. Before passing the ingested dataset to the next component, we divide the available data into training and validation datasets (split ratio and no of splits are configurable) and then convert the datasets into TFRecord files containing the data represented as tf.Example data structures. import warnings warnings.filterwarnings('ignore', 'absl') # + import pprint import shutil import pandas as pd from collections import defaultdict import tensorflow as tf from tfx.components import CsvExampleGen from tfx.utils.dsl_utils import external_input from tfx.orchestration.experimental.interactive.interactive_context \ import InteractiveContext pp = pprint.PrettyPrinter() # - from ml_metadata.metadata_store import metadata_store from ml_metadata.proto import metadata_store_pb2 # ## What is InteractiveContext? # The notebook is also used as an orchestater to run the pipeline components manually. The InteractiveContext class will be used in notebooks which helps us to reviewed the components artifacts immediately. # # Once you have confirmed the full functionality of your pipeline Components, you can convert your interactive pipeline to a production-ready pipeline by orchestrating it with DataFlow, kubeflow etc. # + pipeline_name = Config.PIPELINE_NAME base_root = os.path.split(os.getcwd())[0] pipeline_root = os.path.join(base_root, Config.PIPELINE_FOLDER) beam_args = [ '--runner=DirectRunner' ] if not os.path.exists(pipeline_root): os.makedirs(pipeline_root) context = InteractiveContext(pipeline_name = pipeline_name, pipeline_root = pipeline_root, beam_pipeline_args = beam_args) # + data_dir = os.path.join(root_dir, 'data', 'dataset1') print(*os.listdir(data_dir), sep = '\n') # - # CsvExampleGen is used to read multiple csv data file from given direct and outputs data in TFRecords format (split no and ratio will be depends on the configuration given) which will be used by further compents # # # >note: # configuring split ratio and number, span patter is demonstrated in 'Configuration Option' Section examples = external_input(data_dir) example_gen = CsvExampleGen(input = examples) # Below cell will run the component and shows the artifact and its property # # the metadata of the # run will be shown in the Jupyter Notebook. The outputs of the component, highlighting the storage locations of the training and the evaluation # dataset context.run(example_gen) # DataIngestion compent with default configuration will create train and eval folder and the data will be split in 2:1 ration # + hide_input=true example_gen_prop = example_gen.outputs['examples'].get()[0] print('Artifact Location: ') print(f'\t {example_gen_prop.uri}') print() print('Files: ') print('\t train') print(f'\t\t {os.listdir(os.path.join(example_gen_prop.uri, "train"))}') print('\t eval') print(f'\t\t {os.listdir(os.path.join(example_gen_prop.uri, "eval"))}') # - # ## Output of the component # + split_names = eval(example_gen_prop.split_names) artifact = os.path.join(example_gen_prop.uri, split_names[0]) files = [os.path.join(artifact, i) for i in os.listdir(artifact)] train = tf.data.TFRecordDataset(filenames = files, compression_type = 'GZIP') # - for data in train.take(1): serialized_example = data.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) # ## what metadata store is for? # # The Metadata Store uses the following data model to record and retrieve metadata from the storage backend. # # - ArtifactType describes an artifact's type and its properties that are stored in the metadata store. You can register these types on-the-fly with the metadata store in code, or you can load them in the store from a serialized format. Once you register a type, its definition is available throughout the lifetime of the store. # - An Artifact describes a specific instance of an ArtifactType, and its properties that are written to the metadata store. # - An ExecutionType describes a type of component or step in a workflow, and its runtime parameters. # - An Execution is a record of a component run or a step in an ML workflow and the runtime parameters. An execution can be thought of as an instance of an ExecutionType. Executions are recorded when you run an ML pipeline or step. # - An Event is a record of the relationship between artifacts and executions. When an execution happens, events record every artifact that was used by the execution, and every artifact that was produced. These records allow for lineage tracking throughout a workflow. By looking at all events, MLMD knows what executions happened and what artifacts were created as a result. MLMD can then recurse back from any artifact to all of its upstream inputs. # - A ContextType describes a type of conceptual group of artifacts and executions in a workflow, and its structural properties. For example: projects, pipeline runs, experiments, owners etc. # - A Context is an instance of a ContextType. It captures the shared information within the group. For example: project name, changelist commit id, experiment annotations etc. It has a user-defined unique name within its ContextType. # - An Attribution is a record of the relationship between artifacts and contexts. # - An Association is a record of the relationship between executions and contexts. # # # # For the execution tracking of the artifacts and the lineage tracking capabilities (for example, telling which model or statistics correspond to which dataset or pipeline run), we’ve to deal with Events, Contexts and Executions. # # - Events associate artifact_ids with execution_ids # - Executions only track type_ids and timestamps # - Contexts correlate type_ids with Pipeline runs and timestamp information # # The tables ExecutionProperty and ContextProperty contain extra data # - ExecutionProperties contain input and output configuration passed to each component, along with pipeline and step root directories, and IO locations of artifacts. # - ContextProperties associate context_ids with pipeline component names and timestamps # # + connection_config = context.metadata_connection_config store = metadata_store.MetadataStore(connection_config) base_dir = connection_config.sqlite.filename_uri.split('metadata.sqlite')[0] # + hide_input=false def display_properties(input): data = defaultdict(list) for artifact in input: properties = artifact.properties custom_properties = artifact.custom_properties for key, value in properties.items(): data['artifact id'].append(artifact.id) data['type_id'].append(artifact.type_id) data['name'].append(key) data['is_customproperty'].append(0) data['value'].append(value.string_value) for key, value in custom_properties.items(): data['artifact id'].append(artifact.id) data['type_id'].append(artifact.type_id) data['name'].append(key) data['is_customproperty'].append(1) data['value'].append(value.string_value) return pd.DataFrame(data) def display_types(types): table = {'id': [], 'name': []} for a_type in types: table['id'].append(a_type.id) table['name'].append(a_type.name.split('.')[-1]) return pd.DataFrame(data=table) def display_artifacts(store, artifacts): table = defaultdict(list) for a in artifacts: table['artifact id'].append(a.id) artifact_type = store.get_artifact_types_by_id([a.type_id])[0] table['type'].append(artifact_type.name) table['uri'].append(a.uri.replace(base_dir, './')) table['create_time_since_epoch'].append(a.create_time_since_epoch) table['last_update_time_since_epoch'].append(a.last_update_time_since_epoch) return pd.DataFrame(data=table) # + hide_input=false def display_context(store, artifacts): table = defaultdict(list) for a in artifacts: table['artifact id'].append(a.id) artifact_type = store.get_context_types_by_id([a.type_id])[0] table['type'].append(artifact_type.name) table['name'].append(a.name) table['create_time_since_epoch'].append(a.create_time_since_epoch) table['last_update_time_since_epoch'].append(a.last_update_time_since_epoch) return pd.DataFrame(data=table) def display_executions(store, artifacts): table = defaultdict(list) for a in artifacts: table['artifact id'].append(a.id) artifact_type = store.get_execution_types_by_id([a.type_id])[0] table['type'].append(artifact_type.name.split('.')[-1]) e_state = a.last_known_state if e_state == 2: table['last_known_state'].append('Running') elif e_state == 3: table['last_known_state'].append('Success') else: table['last_known_state'].append(e_state) table['create_time_since_epoch'].append(a.create_time_since_epoch) table['last_update_time_since_epoch'].append(a.last_update_time_since_epoch) return pd.DataFrame(data=table) # - # ### atrifacts Tables display_artifacts(store, store.get_artifacts()) display_types(store.get_artifact_types()) display_properties(store.get_artifacts()) # ### Contexts Tables display_context(store, store.get_contexts()) display_types(store.get_context_types()) display_properties(store.get_contexts()) # ### Executions Tables display_executions(store, store.get_executions()) display_properties(store.get_executions()) display_types(store.get_execution_types()) # ## Loding dataset from tf_records # Why TFRecord? # # If you are working with large datasets, using a binary file format for storage of your data can have a significant impact on the performance of your import pipeline and as a consequence on the training time of your model. Binary data takes up less space on disk, takes less time to copy and can be read much more efficiently from disk. This is especially true if your data is stored on spinning disks, due to the much lower read/write performance in comparison with SSDs. # # However, pure performance isn’t the only advantage of the TFRecord file format. It is optimized for use with Tensorflow in multiple ways. To start with, it makes it easy to combine multiple datasets and integrates seamlessly with the data import and preprocessing functionality provided by the library. Especially for datasets that are too large to be stored fully in memory this is an advantage as only the data that is required at the time (e.g. a batch) is loaded from disk and then processed. Another major advantage of TFRecords is that it is possible to store sequence data — for instance, a time series or word encodings — in a way that allows for very efficient and (from a coding perspective) convenient import of this type of data. # # [reference](https://www.quora.com/Is-it-especially-good-to-use-tfRecord-as-input-data-format-if-I-am-using-Keras-Tensorflow) # + from tfx.components import ImportExampleGen root_dir = os.path.split(os.getcwd())[0] data_dir = os.path.join(root_dir, 'data', 'dataset2') print(*os.listdir(data_dir), sep = '\n') # - # ImportExampleGen is used to load TFRecord files into the pipeline. # # It will make sense to load nlp data as TFRecord file were text corpora can snowball to a considerable size.To ingest such datasets efficiently, it is always recommend to converting the datasets as TFRecord or Apache Parquet representations. # # Image datasets from the image files has to be convert into TFRecord files, but # not to decode the images. Any decoding of highly compressed images only increases # the amount of disk space needed to store the intermediate tf.Example records. examples = external_input(data_dir) example_gen = ImportExampleGen(input=examples) context.run(example_gen) display_executions(store, store.get_executions()) display_properties(store.get_artifacts()) # ## Configuration Options # ### splitting # # Later in our pipeline, we will want to evaluate our machine learning model during the # training and test it during the model analysis step. Therefore, it is beneficial to split # the dataset into the required subsets. from tfx.proto import example_gen_pb2 # Configuring output as train, test and eval with 6:2:2 ration # The following cell is volentierly scripted to go under exception and complete the run succesfully after the exception accor. # # This is done to demonstrate the use of metadatastore when the execution of pipeline gone under some execption in production try: data_dir = os.path.join(os.pardir, "data/dataset") output = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig(splits=[ example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=6), example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=2), example_gen_pb2.SplitConfig.Split(name='test', hash_buckets=2)] )) examples = external_input(data_dir) example_gen = CsvExampleGen(input=examples, output_config=output) context.run(example_gen) except: data_dir = os.path.join(os.pardir, "data/dataset1") output = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig(splits=[ example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=6), example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=2), example_gen_pb2.SplitConfig.Split(name='test', hash_buckets=2)] )) examples = external_input(data_dir) example_gen = CsvExampleGen(input=examples, output_config=output) context.run(example_gen) folder = Config.PIPELINE_FOLDER # + magic_args="-s \"$folder\"" language="bash" # tree ../$1 # - # Exception run is marked with red color. you can find that last_known_state is marked as 'Running' using this you can trace the exception component and backtrack to find the reason for that happend # + def highlight(s): if s.last_known_state == 'Running': return ['background-color: red']*5 else: return ['background-color: white']*5 execution = display_executions(store, store.get_executions()) execution.style.apply(highlight, axis = 1) # + artifact_id = execution['artifact id'].loc[execution.last_known_state == 'Running'].values[0] def highlight(s): if s['artifact id'] == artifact_id: return ['background-color: lightblue']*5 elif s['artifact id'] == artifact_id + 1: return ['background-color: lightgreen']*5 else: return ['background-color: white']*5 execution_prop = display_properties(store.get_executions()) execution_prop = execution_prop.loc[(execution_prop['artifact id'] == artifact_id) | (execution_prop['artifact id'] == artifact_id+1)].sort_values(by=['name','artifact id']) execution_prop.style.apply(highlight, axis = 1) # - # ### If data is stored in spitted manner # # In some situations, we have already generated the subsets of the datasets externally, # and we would like to preserve these splits when we ingest the datasets. We can ach‐ # ieve this by providing an input configuration. # + example_gen_prop = example_gen.outputs['examples'].get()[0] shutil.copytree(example_gen_prop.uri, '../data/dataset3') # + from tfx.proto import example_gen_pb2 root_dir = os.path.split(os.getcwd())[0] data_dir = os.path.join(root_dir, 'data', 'dataset3') input = example_gen_pb2.Input(splits=[ example_gen_pb2.Input.Split(name='train', pattern='train/*'), example_gen_pb2.Input.Split(name='eval', pattern='eval/*'), example_gen_pb2.Input.Split(name='test', pattern='test/*') ]) examples = external_input(os.path.join(base_dir, data_dir)) example_gen = ImportExampleGen(input=examples, input_config=input) context.run(example_gen) # - execution_property = display_properties(store.get_executions()) execution_property.loc[execution_property['artifact id'] == max(execution_property['artifact id'])] # ### Span # # One of the significant use cases for machine learning pipelines is that we can update # our machine learning models when new data becomes available. For this scenario, # the ExampleGen component allows us to use spans. Think of a span as a snapshot of # data. Every hour, day, or week, a batch extract, transform, load (ETL) process could # make such a data snapshot and create a new span. # A span can replicate the existing data records. As shown in the following, export-1 # contains the data from the previous export-0 as well as newly created records # # We can now specify the patterns of the spans. The input configuration accepts a # {SPAN} placeholder, which represents the number (0, 1, 2, ...) shown in our folder # structure. With the input configuration, the ExampleGen component now picks up # the “latest” span. In our example, this would be the data available under folder # export-2 # + language="bash" # # mkdir -p ../data/dataset4/export-0 # mkdir -p ../data/dataset4/export-1 # mkdir -p ../data/dataset4/export-2 # # file_l_count=$(wc -l < ../data/dataset1/consumer_complaints_with_narrative.csv) # head -n $(( file_l_count/3 )) ../data/dataset1/consumer_complaints_with_narrative.csv >> ../data/dataset4/export-0/consumer_complaints_with_narrative_$(( file_l_count/3 )).csv # head -n $(( file_l_count/2)) ../data/dataset1/consumer_complaints_with_narrative.csv >> ../data/dataset4/export-1/consumer_complaints_with_narrative_$(( file_l_count/2 )).csv # cp ../data/dataset1/consumer_complaints_with_narrative.csv ../data/dataset4/export-2/consumer_complaints_with_narrative_$file_l_count.csv # # tree ../data/dataset4 # + base_dir = os.path.split(os.getcwd())[0] data_dir = os.path.join(base_dir, "data", "dataset4") input = example_gen_pb2.Input(splits=[ example_gen_pb2.Input.Split(pattern='export-{SPAN}/*') ]) examples = external_input(data_dir) example_gen = CsvExampleGen(input=examples, input_config=input) context.run(example_gen) # - execution_prperties = display_properties(store.get_executions()) temp_val = execution_prperties.loc[(execution_prperties['name'] == 'input_base') | (execution_prperties['name'] == 'span')] temp_val = temp_val.reset_index() temp_val.drop('index', axis = 1, inplace = True) temp_val = temp_val.sort_values(['artifact id', 'name']) # you can find that the span for current run is stored as 2 which means that ExampleGen component automaticaly fetched the current datafile from the given folder based on pattern configured in input_config temp_val.style.highlight_max(subset = ['value'], color = 'lightgreen', axis = 0) # ## Add-ons # ### Ingesting Data from avro or parquest file format # # #### from Avro-serialized data # # ``` # from tfx.components import FileBasedExampleGen # from tfx.components.example_gen.custom_executors import avro_executor # from tfx.utils.dsl_utils import external_input # examples = external_input(avro_dir_path) # # example_gen = FileBasedExampleGen( # input=examples, # executor_class=avro_executor.Executor) # ``` # # #### from Parquet-serialized data # # ``` # from tfx.components.example_gen.custom_executors import parquet_executor # example_gen = FileBasedExampleGen( # input=examples, # executor_class=parquet_executor.Executor) # ``` # ### Ingesting data from Data Base # # #### from bigquery database # ``` # from tfx.components import BigQueryExampleGen # query = """ # SELECT * FROM `<project_id>.<database>.<table_name>` # """ # example_gen = BigQueryExampleGen(query=query) # ``` # # # # >Note: # In TFX versions greater than 0.22.0, the BigQueryExampleGen # component needs to be imported from tfx.extensions.goo # gle_cloud_big_query : # >``` # from tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component # big_query_example_gen_component.BigQueryExampleGen(query=query) # >``` # # #### from presto database # ``` # from proto import presto_config_pb2 # from presto_component.component import PrestoExampleGen # # query = """ # SELECT * FROM `<project_id>.<database>.<table_name>` # """ # presto_config = presto_config_pb2.PrestoConnConfig( # host='localhost', # port=8080) # example_gen = PrestoExampleGen(presto_config, query=query) # ```
notebooks/Data Ingestion and mldatastore.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Setup # cd .. # + import torch.optim as optim import torch.nn as nn import torch import matplotlib.pyplot as plt import numpy as np import random import pickle import cv2 import time from neural_nets_library import training from tree_to_sequence.tree_decoder_batch import TreeDecoderBatch from tree_to_sequence.tree_to_tree_attention import TreeToTreeAttention from tree_to_sequence.tree_to_tree_attention_batch import TreeToTreeAttentionBatch from tree_to_sequence.program_datasets import * from functools import partial from math_expressions.translating_math_trees import math_tokens_short as math_tokens from tree_to_sequence.translating_trees import pretty_print_tree # - np.random.seed(3) torch.cuda.set_device(1) use_cuda = True image_width = 40 image_height = 64 one_hot = False binarize_output = True eos_token = True long_base_case = True output_as_seq = False num_samples = None max_num_children = 2 if binarize_output else 3 batch_size = 32 normalize_input = True # ## Make Dataset # + def split_dataset(data, split): all_trees = [] for img, tree in data: if not tree in all_trees: all_trees.append(tree) split_cutoff = int(len(all_trees) * split) first_split = all_trees[:split_cutoff] second_split = all_trees[split_cutoff:] first_data = [(img, tree) for img, tree in data if tree in first_split] second_data = [(img, tree) for img, tree in data if tree in second_split] return first_data, second_data def make_dset(data): return MathExprDatasetBatched(program_pairs=data, batch_size=batch_size, binarize_output=binarize_output, max_children_output=max_num_children, eos_token=eos_token, normalize=normalize_input, trim_factor=1, left_align=False, num_samples=num_samples) # + start = time. time() # Split into train/val/test sets # test_data = load_shuffled_data("math_expressions/test_data_short.pkl") all_data = load_shuffled_data("math_expressions/train_data_short.pkl") train_cutoff = 0.7 train_data, val_data = split_dataset(all_data, train_cutoff) print("Train set size: ", len(train_data)) print("Val set size: ", len(val_data)) # print("Test set size: ", len(test_data)) train_dset = make_dset(train_data) val_dset = make_dset(val_data) # test_dset = make_dset(test_data) max_size = max([tree.size() for batch in train_dset for tree in batch[1]]) print("max size", max_size) print(len(train_dset)) end = time.time() print("dataset generation took: seconds", end - start) # - # ## Visualizations def display_normally(pic, title=None): if not title is None: plt.title(title) plt.imshow(np.repeat(np.int0(pic)[:,:,np.newaxis]*255, 3, axis=2)) plt.show() # import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [10, 10] # Print the dset for batched_img, batched_trees in train_dset[1:2]: for i in range(10): img = batched_img[i] tree = batched_trees[i] display_normally(img[0]) # plt.hist(img[0]) # plt.show() print("Img shape", img.shape) print("MIN", torch.min(img)) print("MAX", torch.max(img)) # pretty_print_tree(tree, math_tokens) # ## Make Model # + class ImageEncoder(nn.Module): def __init__(self, nchannels, nhidden, num_layers, num_cnn_layers, attention=True): super(ImageEncoder, self).__init__() # self.core = nn.Sequential(CNN_Sequence_Extractor(nchannels, num_cnn_layers), nn.LSTM(512, nhidden, num_layers, bidirectional=True)) self.core = nn.Sequential(CNN_Sequence_Extractor(nchannels, num_cnn_layers)) self.lstm = nn.LSTM(512, nhidden, num_layers, bidirectional=True) self.register_buffer('reverse_indices', torch.LongTensor(range(1, num_layers*2, 2))) self.attention = attention def forward(self, input, widths=None, training=True): # print("before core") # get_gpu_memory_map() # output, (all_hiddens, all_cell_state) = self.core(input) first_output = self.core(input) # print("after core") # get_gpu_memory_map() output, (all_hiddens, all_cell_state) = self.lstm(first_output) # print("after lstm") # get_gpu_memory_map() if widths is not None: output = nn.utils.rnn.pad_packed_sequence(output) forward_hiddens = all_hiddens.index_select(0, self.reverse_indices - 1) reverse_hiddens = all_hiddens.index_select(0, self.reverse_indices) #TODO: does this need a gradient del all_hiddens hiddens = torch.cat([forward_hiddens, reverse_hiddens], 2) del forward_hiddens del reverse_hiddens forward_cell_state = all_cell_state.index_select(0, self.reverse_indices - 1) reverse_cell_state = all_cell_state.index_select(0, self.reverse_indices) #TODO: does this need a gradient del all_cell_state cell_state = torch.cat([forward_cell_state, reverse_cell_state], 2) del forward_cell_state del reverse_cell_state if self.attention: if training: # get_gpu_memory_map() return output, hiddens.squeeze(0), cell_state.squeeze(0) # TODO: This is here b/c currently training is batched but testing isn't. Someday we should fix this return output.squeeze(1), hiddens.squeeze(0), cell_state.squeeze(0) else: return reverse_hiddens class CNN_Sequence_Extractor(nn.Module): def __init__(self, nchannels, num_layers, leakyRelu=False): super(CNN_Sequence_Extractor, self).__init__() assert(num_layers >= 7) # Size of the kernel (image filter) for each convolutional layer. ks = [3] * (num_layers - 1) + [2] # Amount of zero-padding for each convoutional layer. ps = [1] * (num_layers - 1) + [0] # The stride for each convolutional layer. The list elements are of the form (height stride, width stride). ss = [(2,2), (3,2)] + [(2,1) if i % 2 else (3,1) for i in range(num_layers - 2)] # Number of channels in each convolutional layer. nm = [64, 128, 245, 256] + [512] * (num_layers - 4) # Initializing the container for the modules that make up the neural network the neurel netowrk. cnn = [] # Represents a convolutional layer. The input paramter i signals that this is the ith convolutional layer. The user also has the option to set batchNormalization to True which will perform a batch normalization on the image after it has undergone a convoltuional pass. There is no output but this function adds the convolutional layer module created here to the sequential container, cnn. def convRelu(i, batchNormalization=False): nIn = nchannels if i == 0 else nm[i - 1] nOut = nm[i] cnn.append(nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i])) if batchNormalization: cnn.append(nn.BatchNorm2d(nOut)) else: cnn.append(nn.ReLU(True)) batch_norm_on = True # Creating the 7 convolutional layers for the model. convRelu(0) convRelu(1) convRelu(2, batch_norm_on) convRelu(3) convRelu(4, batch_norm_on) convRelu(5) convRelu(6, batch_norm_on) for layer in cnn: layer.cuda() self.cnn = cnn def forward(self, input, widths=None): for i, layer in enumerate(self.cnn): input = layer(input) # print("layer", i, type(layer)) # get_gpu_memory_map() output = input _, _, h, _ = output.size() assert h == 1, "the height of conv must be 1" output = output.squeeze(2) # [b, c, w] output = output.permute(2, 0, 1) #[w, b, c] if widths is not None: sorted_widths, idx = widths.sort(descending=True) output = output.index_select(1, idx) output = nn.utils.pack_padded_sequence(output, sorted_widths / 4) return output # - def reset_all_parameters_uniform(model, stdev): for param in model.parameters(): nn.init.uniform_(param, -stdev, stdev) # + eos_bonus = 1 if eos_token else 0 nclass = len(math_tokens) + 26*2 + 10 # TODO: FIGURE THIS OUT plot_every = 100 hidden_size = 64 #128 #256 embedding_size = 100 alignment_size = 50 n_channels = 1 num_layers = 1 # TODO: Later consider making this work for num_layers > 1 align_type = 1 num_cnn_layers = 7 encoder = ImageEncoder(n_channels, hidden_size, num_layers, num_cnn_layers, attention=True) decoder = TreeDecoderBatch(embedding_size, hidden_size*2, max_num_children, nclass=nclass) program_model = TreeToTreeAttentionBatch(encoder, decoder, hidden_size * 2, embedding_size, nclass=nclass, max_size=max_size, alignment_size=alignment_size, align_type=align_type, use_cuda=use_cuda) reset_all_parameters_uniform(program_model, 0.1) decoder.initialize_forget_bias(3) # - if use_cuda: program_model = program_model.cuda() # ## Training Setup optimizer = optim.Adam(program_model.parameters(), lr=0.001) #0.001 lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True, patience=500, factor=0.8) # + # Counts the number of matches between the prediction and target. def count_matches(prediction, target): matches = 0 if int(prediction.value) == int(target.value): matches += 1 for i in range(min(len(target.children), len(prediction.children))): matches += count_matches(prediction.children[i], target.children[i]) return matches # Program accuracy (1 if completely correct, 0 otherwise) def program_accuracy(prediction, target): target = target[0] if prediction.size() == count_matches(prediction, target) and \ prediction.size() == target.size(): return 1 else: return 0 # Calculate validation accuracy (this could either be program or token accuracy) def validation_criterion(prediction, target): return program_accuracy(prediction, target) # - program_model = torch.load("math_expressions/models/larger_batch_model") # + import subprocess def get_gpu_memory_map(): """Get the current gpu usage. Returns ------- usage: dict Keys are device ids as integers. Values are memory usage as integers in MB. """ result = subprocess.check_output( [ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8') # Convert lines into a dictionary gpu_memory = [int(x) for x in result.strip().split('\n')] gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory)) print("Current usage: %i of 11178" % gpu_memory_map[1]) # return gpu_memory_map get_gpu_memory_map() # - # ## Train # + best_model, train_plot_losses, train_plot_accuracies, _, _ = training.train_model_tree_to_tree( program_model, train_dset, optimizer, lr_scheduler=None, num_epochs=50, plot_every=plot_every, batch_size=1, print_every=50, validation_criterion=validation_criterion, validation_dset=val_dset, save_folder ="math_expressions/models", save_file="retrained_larger_batch", use_cuda=use_cuda, skip_output_cuda=False, tokens=math_tokens, save_current_only=True, input_tree_form=False) # - # ## Visualize Results # + plt.plot([x * plot_every for x in range(len(train_plot_losses))], train_plot_losses) plt.title("Loss") plt.show() plt.plot([x * plot_every for x in range(len(train_plot_accuracies))], train_plot_accuracies) plt.title("Accuracy") plt.show() # - img, tree = train_dset[0] tree = tree[0] pretty_print_tree(tree) pretty_print_tree(tree, math_tokens) # Check errors def view_errors(dataset, num): for input_tree, target_tree in dataset[:num]: input_tree = input_tree.cuda() target_tree = [actual_tree.cuda() for actual_tree in target_tree] program_model.eval() program_model.print_img_tree_example(input_tree, target_tree, math_tokens) view_errors(train_dset, 5) view_errors(val_dset, 5) # ## Test def cudafy_pair(pair): img_cuda = pair[0].cuda() tree_cuda = [tree.cuda() for tree in pair[1]] return (img_cuda, tree_cuda) # + val_dset_cuda = [cudafy_pair(pair) for pair in val_dset] program_model.eval() acc = training.test_model_tree_to_tree(program_model, val_dset_cuda, validation_criterion, use_cuda=False) print("accuracy", acc) # + train_dset_cuda = [cudafy_pair(pair) for pair in train_dset] program_model.eval() acc = training.test_model_tree_to_tree(program_model, train_dset_cuda, validation_criterion, use_cuda=False) print("accuracy", acc)
math_expressions/MathExprModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Test version of the soil exercise NGEN16: simple diffusion model. # Course: NGEN16-2019 Code: Python</p> # Author: <NAME> # This is a test version of a Jupyter notebook to present some principles of diffusion of $CO_2$ in the soil. The exercise also introduces Jupyter Notebook as an working environment. This notebook combines (explaining) text with coding cells that contain the simulation model. Actually, notebooks can be used for data analysis and eg. shared between users to work together. Think that it can also be used to publish data analysis or to present a 'work-report' for an assignment (see an introduction to the use in [Shen, 2014](https://www.nature.com/news/interactive-notebooks-sharing-the-code-1.16261)). # In this presentation we use the programming language Python which is the default for Jupyter notebooks. # <br> # The background of the diffusion model of the production and transport of $CO_2$ is described in the exercise document. This notebooks goes stepwise through the simulation and presents the results in graphs. You can use it and for example change settings to see what the effect is in the resuls. As described in the exercise document, the notebooks presents three sub-sections: first the simulation of soil temperature, then the simulation of $CO_2$ production and finally the calculation of flux between the soil layers, the $CO_2$ concentration in each layer and the efflux of $CO_2$ out of the soil. # # A programming language makes use of function libraries. So we start the calculation model with including some libraries for eg function as sinus and functions for drawing the figures. Also we define here the figure legend and line colors. As you can see we will make use of 8 layers in the top soil, including the surface # %matplotlib inline import numpy as np from matplotlib import pyplot as plt plt.style.use('classic') import numpy as np lines=['Surface','Layer1','Layer2','Layer3','Layer4','Layer5','Layer6','Layer7'] lincolor=['black','red', 'blue', 'orange','black','red', 'blue', 'green'] # <p><b> Soil temperature </b></p> # $CO_2$ production and flux are depending on soil temperature. We do not have actual measurements of the temperature for each layer, so we will estimate the temperature fluctuations following a sinus function around a constant average temperature with a given amplitude. We define a period of 10 days and a timestep of 300 s. # First we declare a number of variables and the midpoint of each layer in m below surface (below is expressed with the minus) TimeStep=300 #length of time step in seconds Day=10 #Number of days SecDay=86400 #Number of seconds in a day TotTime=Day*SecDay #Total number of seconds over 10 days TotSteps=int(TotTime/TimeStep)# Number of timesteps over the period defined as integer TempAvg=10 #average temperature over the 10 days A0=8 #Pre-set artificial amplitude in the temperature from average w=2*np.pi/SecDay a=1.5E-6 zd=np.sqrt(2*a/w) #damping depth z=[0.0,-0.05,-0.15,-0.25,-0.35,-0.45,-0.55,-0.65] #Midpoint of each 'soillayer' # The results will be stored as arrays. Here we define already arrays for the temperture and the produced CO2 (the second variable is for total production over all layers). Each array is a double array with layer and time step. Time is set to zero. t=0.0 #Time is set to zero layers= range (8) Temp=np.zeros((8,TotSteps)) SProd=np.zeros((8,TotSteps)) SProdTot=np.zeros(TotSteps) # In the next code section the temperature is calculated in a 'for' loop: with counter k going from 0 to 'Total Timestep' the temperature is calculated for each layer (i=layer, k=TimeStep). Compare with function in exercise text: for k in range(0,TotSteps): #wfor k is zero to TotSteps: for i in layers: # the temperature function Temp[i,k]=TempAvg+A0*np.exp(z[i]/zd)*np.sin(w*t+z[i]/zd) t=t+TimeStep # The result is plotted in figure 1: # + fig1=plt.figure(figsize=(12,8)) ax=plt.axes() plt.rc('lines', linewidth=2) for i in layers: plt.plot(Temp[i,:], color=lincolor[i], label=lines[i]) plt.legend(fontsize=10) plt.title("Soil temperature per layer") plt.ylabel("Temperature (°C)") plt.xlabel("Time step") ax.tick_params(axis='both', which='major', direction='out', labelsize=10) # - # <p><b>$CO_2$-production</b></p> Check the function for production of $CO_2$ in the exercise text. In the next code block the variables of the equation are given, as well as the SOM and root content of each layer (defined in an array) #Application of CO2 production based on Michaelis-Menten temperature function kSom=3.85e-6 #Decomposition rate organic matter mg g-1 s-1 kRoots=4.3e-5 #Respiration rate of roots mg g-1 s-1 energy=7.93E4 #Activation energy kJ mol-1 r=8.314 #Gas constant J K-1 mol-1 mSOM=[0, 3800, 2600, 1400, 500, 200, 100, 0] #Organic matter content in each layer in mg m-3 mRoots=[ 0, 578, 163, 58, 37, 5, 0, 0] #Root content in each layer in mg m-3 # In the following block the $CO_2$ production from roots and SOM per layer for each time step is calculated. The Michaelis Menten temperature part of the function is taken in two steps, just to read the equation more easily. The block after that presents the resulting figure. # + for k in range(TotSteps): for i in layers: # we split the temperature part into sections TempFrac1=energy/(r*(Temp[i,k]+273.15)) #Temperature from Celsius to Kelvin TempFrac2=((Temp[i,k]+273.15)-283.15)/283.15 TempFrac=np.exp(TempFrac1*TempFrac2) #Combine it into the Michaelis-Menten based equation for CO2 production from respiration and decomposition SProd[i,k]=(kRoots*mRoots[i]+kSom*mSOM[i])*TempFrac # Sum the production per layer to total production over all layers. #Production is per m3, so divided by 10 for a layer of 10 cm SProdTot[k]=SProdTot[k]+(SProd[i,k]/10) # - # Plot the resulting production per layer in figure 2: # + fig2=plt.figure(figsize=(12,8)) ax=plt.axes() plt.rc('lines', linewidth=2) for i in layers: plt.plot(SProd[i,:], color=lincolor[i], label=lines[i]) plt.legend(fontsize=10) ax.set_title('CO2 production per layer mg m-3 s-1') ax.set_ylabel('CO2 production in mg/m3') ax.set_xlabel('Time step') ax.tick_params(axis='both', which='major', labelsize=10) # - # <p><b>$CO_2$-flux</b></p>Next step is to calculate the $CO_2$ concentration per layer for each timestep. The concentration is the result of the concentration present from previous timestep, the production of $CO_2$ and the net flux out or in the layer from other layers during the actual timestep. First we calculate the diffusion coefficent for each layer depending on temperature and we define a number of start settings on $CO_2$ concentration: starting value at t=0 and keep contant concentration at the surface. # + #Settings for the CO2 concentration and flux calculation CO2= np.zeros((8,TotSteps)) #Definition CO2 concentration variable DiffCoeffSoil=np.zeros((8,TotSteps)) #Definition diffusion coefficient CO2 in soil depending on temperature and tortuosity # Estimation of diffusion coefficient for each layer and timestep DCO2Air=1.39e-5 #Diffusion coeff for CO2 in air AFPorosity=0.1515 #Porosity is set to 15.15% for k in range(TotSteps): #Calculate Diffusion coeff for each layer DiffCoeffSoil[0,k]=DCO2Air*(((Temp[0,k]+273.15)/273.15)**1.75) for i in range(1,8): DiffCoeffSoil[i,k]=0.66*AFPorosity*DCO2Air*(((Temp[i,k]+273.15)/273.15)**1.75) #Setting ambient concentration at surface to a constant value at 695 mg/m3 for k in range(TotSteps): CO2[0,k]=695 # Initial concentration is set to 695 mg/m3 for the first timestep in all layers for i in layers: CO2[i,0]=695 # - # In the next cell Fick's law for diffusion is applied. First the two average diffusion coefficients between the three mid-points of the adjoining layers (i-1, i and i+1) is estimated. Then the concentration differences between the layers and vertical distance. Then Fick's law gives the net flux of the layer with the two adjoining layers (FluxLayer). The deepest layer has the lower boundary as is this calculated separate. To present the net flux into the atmosphere, the flux from the top soil into the air layer is calculated once more. # + FluxLayer=np.zeros((8,TotSteps)) for k in range(1,TotSteps): for i in range(1,7): # for first soil layer to soillayer 6: last layer is treated separate due to boundary #First calculation of average diffusion coeff between the layers in this step, note approximation by division of 2 Dtm1=(DiffCoeffSoil[i,k]+DiffCoeffSoil[i-1,k])/2 #Average of Diff coeff between layer i and layer above i-1 Dtm2=(DiffCoeffSoil[i,k]+DiffCoeffSoil[i+1,k])/2 #Average of Diff coeff between layer i and layer below i+1 #Then calculation of concentration differences and thickness of layers (although set constant to 0.1 m) C1=CO2[i-1,k-1]-CO2[i,k-1] #difference in concentration layer i-1 and i C2=CO2[i,k-1]-CO2[i+1,k-1] #difference in concentration layer i and i+1 Z1=z[i-1]-z[i] # difference in depth between layers i-1 and i Z2=z[i]-z[i+1] # difference in depth between layers i and i+1 #Estimation of flux between layers from concentration differences of previous timestep #Net flux to/from the layer is the sum of the two fluxes from top and bottom of layer #Flux from Fick's law: diffusion coeff times ratio dc/dz; minus sign is for correct direction of flux FluxLayer[i,k]=-Dtm1*(C1/Z1)+Dtm2*(C2/Z2) # for lowest soillayer Dtm1=(DiffCoeffSoil[7,k]+DiffCoeffSoil[6,k])/2 FluxLayer[7,k]=-Dtm1*((CO2[6,k-1]-CO2[7,k-1])/(z[6]-z[7])) for i in range(1,8): # for all soil layers the concentration for actual timestep with assumption that # dz is set 10 cm just for now CO2[i,k]=CO2[i,k-1]-(FluxLayer[i,k]/0.1)*TimeStep + (SProd[i,k]*TimeStep) # estimation of net efflux from the top soil into air-layer Dtm1=(DiffCoeffSoil[0,k]+DiffCoeffSoil[1,k])/2 FluxLayer[1,k]=-Dtm1*((CO2[0,k-1]-CO2[1,k-1])/(z[0]-z[1])) # - # Results are presented in figure 3 and 4: $CO_2$ concentration per layer and net flux from the soil. # + fig3=plt.figure(figsize=(12,8)) ax=plt.axes() for i in layers: plt.plot(CO2[i,:], color=lincolor[i], label=lines[i]) plt.legend(fontsize=10) ax.set_title("CO2 concentration per layer mg m-3") ax.set_ylabel('CO2 concentration in mg/m3') ax.set_xlabel('Time step') ax.tick_params(axis='both', which='major', labelsize=10) # - # For comparison, the total production over all layers is given togther with the net flux from the soil. fig4=plt.figure(figsize=(12,8)) ax=plt.axes() plt.plot(FluxLayer[1,:], color='Black', label='Surface Flux') plt.plot(SProdTot[:], color='Red', label='Total production') plt.legend() ax.set_title("CO2 surface flux") ax.set_ylabel("CO2 flux or production mg m-2 s-1") ax.set_xlabel("Time step") ax.tick_params(axis='both', which='major', labelsize=10)
education/MSc_BSc/soil_exercise_ngen16/TestSoilT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Sz3j5dlzYWEM" colab_type="text" # # Clique Cover # # In graph theory, a clique cover or partition into cliques of a given undirected graph is a partition of the vertices of the graph into cliques, subsets of vertices within which every two vertices are adjacent. A minimum clique cover is a clique cover that uses as few cliques as possible. The minimum k for which a clique cover exists is called the clique cover number of the given graph. # # https://en.wikipedia.org/wiki/Clique_cover # + [markdown] id="FPWEQpF3YWEN" colab_type="text" # # Cost Function # When we color vertex $v$ with color $i$ we show using qubit as $x_{v,i}$. # Now we have the cost function # # $ \displaystyle H = A \sum_v \left( 1 - \sum_{i = 1}^n x_{v,i} \right)^2 + B \sum_{i=1}^n \left[ \frac {1}{2} \left( -1 + \sum_v x_{v,i} \right) \sum_v x_{v,i} - \sum_{(uv) \in E} x_{u,i}x_{v.i} \right]$ # # Expand it and we have, # # $ \displaystyle H = A \sum_v \left\{ -2 \sum_{i=1}^n x_{v,i} + \left(\sum_{i=1}^n x_{v,i}\right)^2 \right\} + B \sum_{i=1}^n \left\{ -\frac{1}{2} \sum_v x_{v,i} + \frac{1}{2}\left( \sum_v x_{v,i}\right)^2 - \sum_{(u,v) \in E} x_{u,i}x_{v,i}\right\}+ Const. $ # $ \displaystyle = A \sum_v \left( -2 \sum_{i=1}^n x_{v,i} + \sum_{i=1}^n x_{v,i}^2 + 2\mathop{ \sum \sum }_{i \neq j }^{n} x_{v,i}x_{v,j} \right) + B \sum_{i=1}^n \left\{ \frac{1}{2} \left(-\sum_v x_{v,i} + \sum_v x_{v,i}^2 + \mathop{\sum \sum}_{u \neq v}^{n} x_{u,i}x_{v,i} \right) - \sum_{(u,v) \in E} x_{u,i}x_{v,i}\right\}+ Const. $ # $ \displaystyle = A \sum_v \left( - \sum_{i=1}^n x_{v,i}^2 + 2\mathop { \sum \sum }_{i \neq j }^{n} x_{v,i}x_{v,j} \right) + B \sum_{i=1}^n \left( \frac{1}{2} \mathop{\sum \sum}_{u \neq v}^{n}x_{u,i}x_{v,i} - \sum_{(u,v) \in E} x_{u,i}x_{v,i}\right)+ Const. $ # + [markdown] id="-P_h2fICYWEO" colab_type="text" # # Solving QUBO # + id="E9k5tuN7Zd0C" colab_type="code" colab={} # !pip install blueqat # + id="biPn2HdLYWEP" colab_type="code" colab={} import blueqat.opt as wq import numpy as np def get_qubo(adjacency_matrix, n_color, A, B): graph_size = len(adjacency_matrix) qubo_size = graph_size * n_color qubo = np.zeros((qubo_size, qubo_size)) indices = [(u,v,i,j) for u in range(graph_size) for v in range(graph_size) for i in range(n_color) for j in range(n_color)] for u,v,i,j in indices: ui = u * n_color + i vj = v * n_color + j if ui > vj: continue if ui == vj: qubo[ui][vj] -= A if u == v and i != j: qubo[ui][vj] += A * 2 if u != v and i == j: qubo[ui][vj] += B * 0.5 if adjacency_matrix[u][v] > 0: qubo[ui][vj] -= B return qubo # + id="bXHjnVQMYWES" colab_type="code" colab={} def show_answer(q, graph_size, n_color): print(q) for v in range(graph_size): color = [] for i in range(n_color): index = v * n_color + i if q[index] > 0: color.append(i) print(f"vertex{v}'s color is {color}") # + id="2k9isCHQYWEV" colab_type="code" colab={} def calculate_H(q, adjacency_matrix, n_color, A, B): graph_size = len(adjacency_matrix) h_a = calculate_H_A(q, graph_size, n_color, A) h_b = calculate_H_B(q, adjacency_matrix, n_color, B) print(f"H = {h_a + h_b}") return h_a + h_b def calculate_H_A(q, graph_size, n_color, A): hamiltonian = 0 for v in range(graph_size): sum_x = 0 for i in range(n_color): index = v * n_color + i sum_x += q[index] hamiltonian += (1 - sum_x) ** 2 hamiltonian *= A print(f"H_A = {hamiltonian}") return hamiltonian def calculate_H_B(q, adjacency_matrix, n_color, B): graph_size = len(adjacency_matrix) hamiltonian = 0 for i in range(n_color): sum_x = 0 for v in range(graph_size): vi = v * n_color + i sum_x += q[vi] for u in range(graph_size): if u >= v: continue ui = u * n_color + i hamiltonian -= adjacency_matrix[u][v] * q[ui] * q[vi] hamiltonian += 0.5 * (-1 + sum_x) * sum_x hamiltonian *= B print(f"H_B = {hamiltonian}") return hamiltonian # + [markdown] id="jfxYM3GMYWEa" colab_type="text" # This time we have an example like below, # # ![005.png](https://github.com/mdrft/Wildqat/blob/master/examples_ja/img/005.png?raw=1) # + id="pTcc263zYWEc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1717} outputId="e2650336-9354-43e5-fcb7-d324f785eeaa" adjacency_matrix = \ [ \ [0,1,1,0,0], \ [1,0,1,1,1], \ [1,1,0,1,0], \ [0,1,1,0,1], \ [0,1,0,1,0], \ ] n_color = 2 A = 0.1 B = 0.1 annealer = wq.opt() annealer.qubo = get_qubo(adjacency_matrix, n_color, A, B) for _ in range(10): q = annealer.sa() calculate_H(q, adjacency_matrix, n_color, A, B) show_answer(q, len(adjacency_matrix), n_color) print() # + [markdown] id="2L8nX_R9YWEi" colab_type="text" # When we have $H = 0$ these as answers.
tutorial/305_cliquecover_en.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_datatest) # language: python # name: conda_datatest # --- # + [markdown] slideshow={"slide_type": "slide"} # # Data Checks # + [markdown] slideshow={"slide_type": "fragment"} # - Schema checks: # - Making sure that only the columns that are expected are provided. # - Making sure the data types are correct: `str`/`object`, `int`, `float32`, `float64`, `datetime`. # + [markdown] slideshow={"slide_type": "subslide"} # - Datum checks: # - Looking for missing values # - Ensuring that expected value ranges are correct # + [markdown] slideshow={"slide_type": "fragment"} # - Statistical checks: # - Visual check of data distributions. # - Correlations between columns. # - Statistical distribution checks. # + [markdown] slideshow={"slide_type": "slide"} # # Schema Checks # # Schema checks are all about making sure that the data columns that you want to have are all present, and that they have the expecte data types. # # We're going to use a few datasets from Boston's open data repository. Let's first take a look at Boston's annual budget data. # + slideshow={"slide_type": "subslide"} # %load_ext autoreload # %autoreload 2 # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # + [markdown] slideshow={"slide_type": "subslide"} # ## A bit of basic `pandas` # + slideshow={"slide_type": "fragment"} import pandas as pd df = pd.read_csv('data/boston_budget.csv') df.head() # + [markdown] slideshow={"slide_type": "subslide"} # To get the columns of a DataFrame object `df`, call `df.columns`. This is a list-like object that can be iterated over. # + slideshow={"slide_type": "fragment"} df.columns # + [markdown] slideshow={"slide_type": "subslide"} # ## YAML Files # + [markdown] slideshow={"slide_type": "fragment"} # Describe data in a human-friendly & computer-readable format. # + [markdown] slideshow={"slide_type": "fragment"} # Structure: # # ```yaml # key1: value # key2: # - value1 # - value2 # - subkey1: # - value3 # ``` # + [markdown] slideshow={"slide_type": "subslide"} # Example YAML-formatted schema: # # ```yaml # filename: boston_budget.csv # columns: # - "Fiscal Year" # - "Service (cabinet)" # - "Department" # - "Program #" # ... # - "Fund" # - "Amount" # #``` # + [markdown] slideshow={"slide_type": "subslide"} # YAML-formatted text can be read as dictionaries. # + slideshow={"slide_type": "-"} spec = """ filename: boston_budget.csv columns: - "Fiscal Year" - "Service (Cabinet)" - "Department" - "Program #" - "Program" - "Expense Type" - "ACCT #" - "Expense Category (Account)" - "Fund" - "Amount" """ # + slideshow={"slide_type": "subslide"} import yaml metadata = yaml.load(spec) metadata # + [markdown] slideshow={"slide_type": "subslide"} # By having things YAML formatted, you preserve human-readability and computer-readability simultaneously. # + [markdown] slideshow={"slide_type": "subslide"} # Let's now switch roles, and pretend that we're on side of the "analyst" and are no longer the "data provider". # # How would you check that the columns match the spec? Basically, check that every element in `df.columns` is present inside the `metadata['columns']` list. # + slideshow={"slide_type": "subslide"} for col in df.columns: # print(col) try: assert col in metadata['columns'] except AssertionError: print(f'"{col}" not in metadata columns') # + [markdown] slideshow={"slide_type": "subslide"} # If there is even a slight mis-spelling, this kind of check will help you pinpoint where that is. Note how the "Amount" column is spelled with an extra space. Where would be the most human-oriented place to correct this? At the data provider stage. # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise # # Encode the aforementioned test into a test function named `test_data_columns`. It should only be concerned with the Boston Budget dataset, and should only test whether the columns match the YAML spec. # + slideshow={"slide_type": "subslide"} # Copy to test_datafuncs.py import yaml import pandas as pd def read_metadata(handle): with open(handle, 'r+') as f: metadata_str = ''.join(l for l in f.readlines()) return yaml.load(metadata_str) def test_data_columns(): metadata = read_metadata('data/metadata_budget.yml') df = pd.read_csv('data/boston_budget.csv') for col in df.columns: assert col in metadata['columns'], f'"{col}" not on metadata spec.' # + [markdown] slideshow={"slide_type": "subslide"} # It is a logical practice to keep one schema spec file per table provided to you. However, it is also possible to take advantage of YAML "documents" to keep multiple schema specs inside a single YAML file. # # The choice is yours - in cases where there are a lot of data files, it may make sense (for the sake of file-system sanity) to keep all of the specs in multiple files that represent logical groupings of data. # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise: Write `YAML` metadata spec. # # Put yourself in the shoes of a data provider. Take any file in the `data/` directory, and make a schema spec file for that file. # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise: Write test for metadata spec. # # Next, put yourself in the shoes of a data analyst. Take the schema spec file and write a test for it. # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise: Write meta-test. # # Now, let's go "meta". Write a "meta-test" that ensures that every CSV file in the `data/` directory has a schema file associated with it. (The function need not check each schema.) # + [markdown] slideshow={"slide_type": "subslide"} # ## Notes # # - Point: have trusted copy of schema apart from data file. YAML not necessarily only way. # - If no schema provided, manually create one; this is exploratory data analysis anyways - no effort wasted! # + [markdown] slideshow={"slide_type": "slide"} # # Datum Checks # # Now that we're done with the schema checks, let's do some sanity checks on the data as well. This is my personal favourite too, as some of the activities here overlap with the early stages of exploratory data analysis. # # We're going to switch datasets here, and move to a 'corrupted' version of the Boston Economic Indicators dataset. Its file path is: `./data/boston_ei-corrupt.csv`. # + slideshow={"slide_type": "subslide"} import pandas as pd import seaborn as sns sns.set_style('white') # %matplotlib inline df = pd.read_csv('data/boston_ei-corrupt.csv') df.head() # + [markdown] slideshow={"slide_type": "subslide"} # ### Demo: Visual Diagnostics # # We can use a package called `missingno`, which gives us a quick visual view of the completeness of the data. This is a good starting point for deciding whether you need to manually comb through the data or not. # + slideshow={"slide_type": "subslide"} # First, we check for missing data. import missingno as msno msno.matrix(df) # + [markdown] slideshow={"slide_type": "subslide"} # Immediately it's clear that there's a number of rows with empty values! Nothing beats a quick visual check like this one. # + [markdown] slideshow={"slide_type": "fragment"} # We can get a table version of this using another package called `pandas_summary`. # + slideshow={"slide_type": "subslide"} # We can do the same using pandas-summary. from pandas_summary import DataFrameSummary dfs = DataFrameSummary(df) dfs.summary() # + [markdown] slideshow={"slide_type": "subslide"} # `dfs.summary()` returns a Pandas DataFrame; can write tests for data completeness. # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise: Test for data completeness. # # Write a test that confirms that there's no missing data. # + slideshow={"slide_type": "fragment"} # Add this to test_datafuncs.py from pandas_summary import DataFrameSummary def test_data_completeness(df): df_summary = DataFrameSummary(df).summary() for col in df_summary.columns: assert df_summary.loc['missing', col] == 0, f'{col} has missing values' # + [markdown] slideshow={"slide_type": "subslide"} # ## Exercise: Test for value correctness. # # Next, we can sanity-check our data values. # # Basic checks: # - `val >= 0` (positive real numbers) # - `0 <= val <= 1` (fraction/rates/percentages) # # Let's write one test that encompasses the [0, 1] scenario. # + slideshow={"slide_type": "subslide"} def test_data_range(df, col): if col == 'labor_force_part_rate': # hard code one condition per column assert df[col].min() >= 0, "minimum value less than zero" assert df[col].max() <= 1, "maximum value greater than zero" test_data_range(df, 'labor_force_part_rate') # + [markdown] slideshow={"slide_type": "subslide"} # We can take the EDA portion further, by doing an empirical cumulative distribution plot for each data column. # + slideshow={"slide_type": "skip"} import numpy as np def compute_dimensions(length): """ Given an integer, compute the "square-est" pair of dimensions for plotting. Examples: - length: 17 => rows: 4, cols: 5 - length: 14 => rows: 4, cols: 4 """ sqrt = np.sqrt(length) floor = int(np.floor(sqrt)) ceil = int(np.ceil(sqrt)) if floor ** 2 >= length: return (floor, floor) elif floor * ceil >= length: return (floor, ceil) else: return (ceil, ceil) compute_dimensions(length=17) assert compute_dimensions(17) == (4, 5) assert compute_dimensions(16) == (4, 4) assert compute_dimensions(15) == (4, 4) assert compute_dimensions(11) == (3, 4) # + slideshow={"slide_type": "subslide"} # Next, let's visualize the empirical CDF for each column of data. import matplotlib.pyplot as plt def empirical_cumdist(data, ax, title=None): """ Plots the empirical cumulative distribution of values. """ x, y = np.sort(data), np.arange(1, len(data)+1) / len(data) ax.scatter(x, y) ax.set_title(title) data_cols = [i for i in df.columns if i not in ['Year', 'Month']] n_rows, n_cols = compute_dimensions(len(data_cols)) fig = plt.figure(figsize=(n_cols*3, n_rows*3)) from matplotlib.gridspec import GridSpec gs = GridSpec(n_rows, n_cols) for i, col in enumerate(data_cols): ax = plt.subplot(gs[i]) empirical_cumdist(df[col], ax, title=col) plt.tight_layout() plt.show() # + [markdown] slideshow={"slide_type": "slide"} # # Statistical Checks # # - Report on deviations from normality. # + [markdown] slideshow={"slide_type": "subslide"} # ## Normality?! # # - The Gaussian (Normal) distribution is commonly assumed in downstream statistical procedures, e.g. outlier detection. # - We can test for normality by using a K-S test. # + [markdown] slideshow={"slide_type": "subslide"} # ## K-S test # # From Wikipedia: # # > In statistics, the Kolmogorov–Smirnov test (K–S test or KS test) is a nonparametric test of the equality of continuous, one-dimensional probability distributions that can be used to compare a sample with a reference probability distribution (one-sample K–S test), or to compare two samples (two-sample K–S test). It is named after <NAME> and <NAME>. # + [markdown] slideshow={"slide_type": "subslide"} # ![](https://upload.wikimedia.org/wikipedia/commons/c/cf/KS_Example.png) # + slideshow={"slide_type": "subslide"} from scipy.stats import ks_2samp import numpy.random as npr # Simulate a normal distribution with 10000 draws. normal_rvs = npr.normal(size=10000) result = ks_2samp(normal_rvs, df['labor_force_part_rate'].dropna()) result.pvalue < 0.05 # + slideshow={"slide_type": "subslide"} fig = plt.figure() ax = fig.add_subplot(111) ecdf_scatter(normal_rvs, ax=ax) ecdf_scatter(df['hotel_occup_rate'], ax=ax) # -
4-data-checks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/lustraka/puzzles/blob/main/AoC2021/AoC_13.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="dDZWBB4Ll10C" # # Advent of Code Puzzles # [Advent of Code 2021](https://adventofcode.com/2021) | [reddit/adventofcode](https://www.reddit.com/r/adventofcode/) # + id="ri3O_1i3l2mo" import requests import pandas as pd import numpy as np import re from collections import Counter path = 'https://raw.githubusercontent.com/lustraka/puzzles/main/AoC2021/data/' # + [markdown] id="tdgyyTyplOSz" # ## [Day 15](https://adventofcode.com/2021/day/15): Chiton # ### Part I # - **Unknown**: The lowest risk of a path from the top left to the bottom right. # - **Data**: A map of risk level. # - **Condition**: # - Add up the risk levels of each position you enter. # - You cannot move diagonally. # - We don't need to store the shortest path. # # Dependecies to consider: # ```python # from collections import Counter, defaultdict, deque # from heapq import heappop, heappush # ``` # + id="96Rf04vymmW0" example = """1163751742 1381373672 2136511328 3694931569 7463417111 1319128137 1359912421 3125421639 1293138521 2311944581""" # + colab={"base_uri": "https://localhost:8080/"} id="ATVBTbQammRs" outputId="0fb44f6d-4af2-4500-c210-a6589138df14" from collections import defaultdict def parse(input): # Initialize an array map = [list(line) for line in input.split('\n')] map = np.array(map).astype(int) # Transform the array into an default dict mapdict = defaultdict(lambda: np.inf) for i in range(map.shape[0]): for j in range(map.shape[1]): mapdict[(i,j)] = map[i,j] # Return the default dict, the start position and the end position return mapdict, (0, 0), tuple(np.array(map.shape)-1) map, start, end = parse(example) len(map), start, end # + [markdown] id="xXYC-IF1TGwe" # - **Plan** (Implement [Dijkstra's algorithm](https://www.wikiwand.com/en/Dijkstra%27s_algorithm)): # - Create the vertex set `Q` # - Initialize the vertex `u` with the `start` # - Initialize the default dict `dist` that contains the current distances from the start to other vertices. # - For each neighbour `v` of `u`: # - calculate the distance `alt` to `v` through `u` # - if `alt` < `dist[v]` update `dist[v]` # - Repeat for the entire map # # + colab={"base_uri": "https://localhost:8080/"} id="e25asMtMo-xK" outputId="72406307-5004-44df-a985-03884a53f8a7" def solve_part1(input): map, start, end = parse(input) # Create vertex set Q Q = list(map.keys()) # Initialize the vertex u u = start # Initialize map of distances from start dist dist = defaultdict(lambda: np.inf) for k, _ in map.items(): dist[k] = np.inf dist[start] = 0 for i in range(end[0]+1): for j in range(end[1]+1): # Identify neighbours not yet visited neighbours = [(i+1,j),(i-1,j),(i,j+1),(i,j-1)] # Compute and update distances for v in neighbours: alt = dist[i,j] + map[v] if alt < dist[v]: dist[v] = alt Q.remove((i,j)) # Recalculate distances one more for j in range(end[1]+1): for i in range(end[0]+1): # Identify neighbours not yet visited neighbours = [(i+1,j),(i-1,j),(i,j+1),(i,j-1)] # Compute and update distances for v in neighbours: alt = dist[i,j] + map[v] if alt < dist[v]: dist[v] = alt # Recalculate distances one more for i in range(end[1]+1): for j in range(end[0]+1): # Identify neighbours not yet visited neighbours = [(i+1,j),(i-1,j),(i,j+1),(i,j-1)] # Compute and update distances for v in neighbours: alt = dist[i,j] + map[v] if alt < dist[v]: dist[v] = alt return dist, dist[end] dist, risk = solve_part1(example) risk # + colab={"base_uri": "https://localhost:8080/"} id="Mc1O8m5NmmMM" outputId="d804aafb-98d9-4167-d678-8b3cab59e119" r = requests.get(path+'AoC2021_15.txt') dist, risk = solve_part1(r.text[:-1]) risk # + [markdown] id="Yp_YsEmfqCkT" # `402` : That's not the right answer; your answer is too high. # # `399` : That's not the right answer; your answer is too high. # # `398` : The right solution! # + colab={"base_uri": "https://localhost:8080/"} id="BEXyVG2dr-DJ" outputId="6d39e6f3-a5ff-4724-c455-f7e21a2e0889" frag = [] for i in range(97,100): for j in range(97,100): frag.append(dist[i,j]) np.array(frag).reshape(3,3) # + colab={"base_uri": "https://localhost:8080/"} id="RQxCtc8rr9_4" outputId="24ef2351-865d-4a03-92e5-e999d9c9c1be" frag = [] for i in range(5): for j in range(5): frag.append(dist[i,j]) np.array(frag).reshape(5,5) # + [markdown] id="eGM2YqRql6U5" # This works but the value of (9,9) is 41 (not 40). Algorithm get lost in a maze somehow... # ```python # def dijkstra_iter(map, dist, u, end): # # Identify neighbours not yet visited # neighbours = [v for v in [(u[0]+1,u[1]),(u[0]-1,u[1]),(u[0],u[1]+1),(u[0],u[1]-1)] if v in Q] # # Compute and update distances # for v in neighbours: # alt = dist[u] + map[v] # if alt < dist[v]: # dist[v] = alt # # Search the next node # if neighbours: # min = neighbours[0] # else: # min = Q[0] # for v in neighbours[1:]: # if dist[v] < dist[min]: # min = v # # Remove u from Q and continue with min # Q.remove(u) # if min in Q: # u = min # else: # u = Q[0] # print(u, dist[u]) # return u # # while Q: # u = dijkstra_iter(map,dist,u,end) # if u == end: # print(dist[u]) # break # ``` # + [markdown] id="_Uv9R-IZbzUf" # ## [Day 14](https://adventofcode.com/2021/day/14): Extended Polymerization # ### Part I # - **Unknown**: A difference between the most and the least common element in the polymer after 10 steps of the process. # - **Data**: Polymer template and the pair insertion rules. # - **Condition**: # - The insertions all happen simultaneously. # - Inserted elements are not considered to be part of a pair until the next step. # - **Plan**: # - Implement the grammar algorithm # + id="_dQDdo49d6Zg" example = """NNCB CH -> B HH -> N CB -> H NH -> C HB -> C HC -> B HN -> C NN -> C BH -> H NC -> B NB -> B BN -> B BB -> N BC -> B CC -> N CN -> C""" # + colab={"base_uri": "https://localhost:8080/"} id="xK9f5mGwd6-I" outputId="9244c536-e88a-4467-c1ce-2875927fcf7f" def parse(input): g = input.split('\n') s = g[0] r = {} for i in range(2, len(g)): k,v = g[i].split(' -> ') r[k] = v return s, r s, r = parse(example) print(s) print(r) # + colab={"base_uri": "https://localhost:8080/"} id="iZ6J6HOEd64g" outputId="5b0c045c-16dc-4342-8be3-447c4d2a2609" r = requests.get(path+'AoC2021_14.txt') s, rules = parse(r.text[:-1]) print(s) print(len(rules), rules['KC']) # + colab={"base_uri": "https://localhost:8080/"} id="e4oH293jd6yQ" outputId="2f0e9c9b-9462-4784-c69f-c9d53bac4899" def polymerize(p, r): """Apply rules `r` to polymer `p`.""" p0 = list(p) # Initialize the new polymer p1 = [p0[0]] for i in range(len(p0)-1): p1.append(r.get(''.join([p0[i],p0[i+1]]),'')) p1.append(p0[i+1]) return "".join(p1) def solve_part1(data): p,r = parse(data) # Polymerize 10 times for i in range(10): p = polymerize(p,r) # Count elements c = Counter(p) # Sort elements e = sorted(c.items(), key=lambda x: x[1]) # Return the difference return e[-1][1]-e[0][1] solve_part1(example) # + colab={"base_uri": "https://localhost:8080/"} id="PAkoaIJ6gC5E" outputId="126f1d23-4a37-45f9-d25b-a58588982cb5" solve_part1(r.text[:-1]) # + [markdown] id="FKicNtKDd7m4" # ### Part II # - **Unknown**: A difference between the most and the least common element in the polymer after 40 steps of the process. # - **Data**: Same as in the part I. # - **Condition**: # - Same as i the part I. # - **Plan**: # - # # + id="0ezvW7RAgCqc" def solve_part2(data): p,r = parse(data) # Polymerize 40 times for i in range(40): p = polymerize(p,r) # Count elements c = Counter(p) # Sort elements e = sorted(c.items(), key=lambda x: x[1]) # Return the difference return e[-1][1]-e[0][1] solve_part2(example) # + id="14Ufsn7lgCnU" # Relace vyčerpala veškerou RAM a selhala # 2188189693529 # + [markdown] id="TnFAd8h1l8NI" # ## [Day 13](https://adventofcode.com/2021/day/13): Transparent Origami # ### Part I # - **Unknown**: The number of dots visible after completing just the first fold instruction on the transparent paper. # - **Data**: Coordinates of dots and folding instructions # - **Condition**: # - [0,0] represents the top-left coordinates # - `x` increases to the right, `y` increases downward # - `fold along y=...` instruction means folding the paper up # - `fold along x=xxx` instruction means folding the paper left # - **Plan**: # - Initialize the matrix # - Write a function for folding # - Count the dots after folding # # + id="HZXVSS9bl736" example = """6,10 0,14 9,10 0,3 10,4 4,11 6,0 6,12 4,1 0,13 10,12 3,4 3,0 8,4 1,10 2,14 8,10 9,0 fold along y=7 fold along x=5""" # + colab={"base_uri": "https://localhost:8080/"} id="Zui2ChCYW5S6" outputId="3620fb96-64df-46f9-c28a-12d02dec509d" def parse(input): lines = input.split('\n') dots = [] folds = [] for line in lines: if ',' in line: dots.append(np.array(line.split(',')).astype(int)) if '=' in line: fold = re.findall('[x,y]=\d+', line)[0].split('=') folds.append([fold[0],int(fold[1])]) return np.array(dots), folds dots, folds = parse(example) print(dots[:4]) print(folds) # + colab={"base_uri": "https://localhost:8080/"} id="LOgWQD6N7jss" outputId="afa5ddbb-672e-4dd3-ddb4-9bdae756a8d6" r = requests.get(path+'AoC2021_13.txt') dots, folds = parse(r.text) print(dots[-2:]) print(folds[-2:]) # + colab={"base_uri": "https://localhost:8080/"} id="78TkviHRW57Z" outputId="45d0ae46-1182-4f1f-f9f7-c317dcbdd8c2" pap = np.zeros(dots.max(axis=0)+1).astype(int) for dot in dots: pap[dot[0], dot[1]] = 1 pap # + [markdown] id="2jP3L-3lsV2t" # **Beware** of slicing vs. indexing. The expression `pap[6,10]` adresses one element [6,10] in the array, whereas the expression `pap[[6,10]]` addresses two rows [6, :] and [10, :] # + colab={"base_uri": "https://localhost:8080/"} id="ehNjG768r-FY" outputId="c805d0fb-1859-4c13-ee24-13ec0f4dfda8" print(dots[0]) print(pap[dots[0]]) print(np.stack((pap[6, :], pap[10, :]))) # + colab={"base_uri": "https://localhost:8080/"} id="DOREbAwomClj" outputId="0f98b420-2fd0-4f06-d285-bd61d2c01062" # Fold the paper up - indices y = 7 for i in range(y): print(i, 2*y-i) # + colab={"base_uri": "https://localhost:8080/"} id="8EfdquIgmCg7" outputId="580b54b9-2a8e-4caa-ed5b-a64e3dde430b" pap_y7 = pap[:,:y].copy() for i in range(y): pap_y7[:,i] += pap[:, 2*y-i] pap_y7.T # + colab={"base_uri": "https://localhost:8080/"} id="lNjddhbfW50p" outputId="5e5be121-9c70-4593-d36e-06f65e8998dd" pap_y7[pap_y7>0].shape # + colab={"base_uri": "https://localhost:8080/"} id="RFrcnUzluuxm" outputId="bd12593e-c9a0-4ab4-8257-905b72fcbe53" # Fold the paper left x = 5 pap_x5 = pap_y7[:x,:].copy() for i in range(x): pap_x5[i,:] += pap_y7[2*x-i, :] pap_x5.T # + colab={"base_uri": "https://localhost:8080/"} id="cEP0V4nbuuoe" outputId="11968091-9a7c-4a74-d779-8f91259cef0b" pap_x5[pap_x5>0].shape # + colab={"base_uri": "https://localhost:8080/"} id="q31wjf0Luukm" outputId="9c6d1440-56ce-4fb6-bc05-547f94fd9216" def solve_part1(input): dots, folds = parse(input) # Initialize the paper pap = np.zeros(dots.max(axis=0)+1).astype(int) for dot in dots: pap[dot[0], dot[1]] = 1 print('Initial shape:', pap.shape) # Calculate the first fold ax, val = folds[0][0], folds[0][1] print(ax, val) # Fold the paper left if ax == 'x': for i in range(val): pap[i, :] += pap[2*val-i, :] # Adjust shape of the paper pap = pap[:val, :] else: for i in range(val): pap[:, i] += pap[:, 2*val-i] # Adjust shape of the paper pap = pap[:, :val] print('Folded shape:', pap.shape) return pap[pap>0].shape[0] solve_part1(example) # + colab={"base_uri": "https://localhost:8080/"} id="DK_p17N67b3z" outputId="3071eac4-84d6-4f14-e878-febacbc98983" solve_part1(r.text) # + [markdown] id="oRHOjPg2W6W5" # ### Part II # - **Unknown**: Eight capital letters after folding the paper. # - **Data**: Same as in part I # - **Condition**: # - Same as in part I # - **Plan**: # - Calculate all folds and return transposed result # # + colab={"base_uri": "https://localhost:8080/"} id="qpuDu7iTAz60" outputId="e057bd6c-5d38-4073-98a8-7f1a6f858544" def solve_part2(input): dots, folds = parse(input) # Initialize the paper #pap = np.zeros(dots.max(axis=0)+1).astype(int) # Solve IndexError: index 894 is out of bounds for axis 1 with size 890 pap = np.zeros((1311, 895)).astype(int) for dot in dots: pap[dot[0], dot[1]] = 1 print('Initial shape:', pap.shape) # Calculate the folds for ax, val in folds: #print(ax, val) # Fold the paper left if ax == 'x': for i in range(val): pap[i, :] += pap[2*val-i, :] # Adjust shape of the paper pap = pap[:val, :] else: for i in range(val): pap[:, i] += pap[:, 2*val-i] # Adjust shape of the paper pap = pap[:, :val] #print('Folded shape:', pap.shape) return pap.T code = solve_part2(example) print(code) # + colab={"base_uri": "https://localhost:8080/"} id="_XqjgiXxA0dm" outputId="b276d890-6fd5-4da5-c23c-a2b284a043d8" code = solve_part2(r.text) print(code) # + colab={"base_uri": "https://localhost:8080/"} id="XKl3k7e0A0S0" outputId="71345572-3db8-402d-b505-c4402ae5bd1b" code = np.piecewise(code, [code==0, code>0], [0,1]).astype(str) code = np.piecewise(code, [code == '0', code == '1'], [' ', '#']) for j in range(5,41,5): print(code[:,j-5:j-1]) print() # + [markdown] id="SiKela2khchC" # Result: UEFZCUCJ # + id="k5KWlzgUflm7"
AoC2021/AoC_13.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Scala 2.12 // language: scala // name: scala212 // --- // ![gt](https://www.osgeo.org/wp-content/uploads/GeoTrellis.png) import $ivy.`org.locationtech.geotrellis::geotrellis-raster:3.5.2` import $ivy.`org.slf4j:slf4j-simple:1.7.30` // ## RasterSource for Landsat // - [RasterSource Overview](https://geotrellis.github.io/geotrellis-workshop/docs/rastersource) // - [RasterSource ScalaDoc](https://geotrellis.github.io/scaladocs/latest/geotrellis/raster/RasterSource.html) import geotrellis.raster._ import geotrellis.raster.geotiff.GeoTiffRasterSource def func(arg: String) = arg // String => String val arg = { str => func(str) } // + // String => String // can we define similar strings to access rasters from S3? def assetUri(key: String): String = s"https://geotrellis-workshop.s3.amazonaws.com/$key" def bandUri(band: String): String = assetUri(s"landsat/LC81070352015218LGN00_$band.TIF") bandUri("string") // - val greenBand = GeoTiffRasterSource(bandUri("B3")) val redBand = GeoTiffRasterSource(bandUri("B4")) val nirBand = GeoTiffRasterSource(bandUri("B5")) val qaBand = GeoTiffRasterSource(bandUri("BQA")) redBand.metadata redBand.crs // ## Read Overview // // Landsat scenes in the `geotrellis-workshop` bucket have added overviews // // - `RasterSource.resolutions` // - `RasterSource.resample` // - Reading tiles, `Option` return // - Rendering tiles // - Fixing `NODATA` value // 7751, 7891 // Overviews: 3876x3946, 1938x1973, 969x987, 485x494, 243x247 redBand.resolutions // + import geotrellis.raster.resample._ import geotrellis.raster.io.geotiff._ val overview = redBand.resample( resampleTarget = TargetCellSize(CellSize(500,500)), method = NearestNeighbor, strategy = Auto() ) // + // val opt: Option[String] = None // opt.getOrElse("str") // + // Option // Some(value) // None val result: Option[Raster[MultibandTile]] = overview.read() val raster: Raster[MultibandTile] = result.get val tile: Tile = raster.tile.band(0) // - Image(tile.renderPng().bytes) val histogram = tile.histogram // histogram.values() // + val colorMap = ColorRamps.BlueToRed.toColorMap(histogram) colorMap.colors // - // why the blue background? Image(tile.withNoData(Some(0)).renderPng(colorMap).bytes) // ## Read GeoJSON // GeoTrellis uses [circe](https://circe.github.io/circe/) library to parse and write JSON. // // All we have to do is provide Encoders/Decoders for GIS types like [MultiPolygon](https://github.com/locationtech/geotrellis/blob/1a2ea84f7a15d790a13a75ede0fecee351ac4a7e/vector/src/main/scala/geotrellis/vector/io/json/GeometryFormats.scala#L157-L173) // + import _root_.io.circe._ import _root_.io.circe.syntax._ import geotrellis.vector._ val json = scala.io.Source.fromURL(assetUri("gadm36/JPN_1_Chiba.geojson")).mkString Text(json) // - json.parseJson // + import scala.util.Try val chibaAoi = json.parseJson.as[MultiPolygon].right.get // Option // Some // None // Either // Right(v: Int) // Left(e: String) // - chibaAoi.asJson.spaces4 // ## Read Window from Landsat // - Using [Proj4J](https://github.com/locationtech/proj4j) // // - [MultiPolygon Reproject ScalaDoc](https://geotrellis.github.io/scaladocs/latest/geotrellis/vector/reproject/Implicits$ReprojectMutliPolygon.html) // - [MultiPolygon Reproject Implicit Method](https://github.com/locationtech/geotrellis/blob/2f8348ac299d889282b7e6d379eed4696ece1dd7/vector/src/main/scala/geotrellis/vector/reproject/Implicits.scala#L89) chibaAoi.extent redBand.crs redBand.read(chibaAoi.extent) // Oh no, we read None! // + import geotrellis.proj4._ val chibaAoiUtm = chibaAoi.reproject(LatLng, greenBand.crs) // - val chibaUtmExtent = chibaAoiUtm.extent val chibaRedRaster = redBand.read(chibaUtmExtent).get val chibaRedBand = chibaRedRaster.tile.band(0).withNoData(Some(0)) Image(chibaRedBand.renderPng(colorMap).bytes) // ## Rasterize AOI // Lets verify that AOI has been reprojected correctly by rasterizing it onto the Landsat scene // + val chibaMask: MutableArrayTile = chibaRedBand.mutable chibaRedRaster.rasterExtent.foreach(chibaAoiUtm) { (x, y) => chibaMask.set(x, y, Short.MaxValue) } // WARNING: this worked, but we just mutated the chibaRedBand Tile! Image(chibaMask.renderPng(colorMap).bytes) // - // ## Mask Clouds using QA Layer val qaTile: Tile = qaBand.read(chibaUtmExtent).get.tile.band(0).withNoData(Some(0)) def maskClouds(tile: Tile): Tile = tile.combine(qaTile) { (v: Int, qa: Int) => val isCloud = qa & 0x8000 val isCirrus = qa & 0x2000 if(isCloud > 0 || isCirrus > 0) { NODATA } else { v } } Image(maskClouds(chibaRedBand).renderPng(colorMap).bytes) // ## Compute NDVI def ndvi (r: Double, ir: Double) : Double = { if (isData(r) && isData(ir)) { (ir - r) / (ir + r) } else { // https://github.com/locationtech/geotrellis/blob/master/raster/src/main/scala/geotrellis/raster/package.scala#L104-L111 doubleNODATA } } val chibaNirBand = nirBand.read(chibaUtmExtent).get.tile.band(0) // + // .convert(FloatConstantNoDataCellType) // Landsat tiles are stored as Short (0 - 32767), NDVI should be Float (-1.0 .. 1.0) val red = maskClouds(chibaRedBand).convert(FloatConstantNoDataCellType) val nir = maskClouds(chibaNirBand).convert(FloatConstantNoDataCellType) // - val chibaNdvi = red.combineDouble(nir) { (r, ir) => if (isData(r) && isData(ir)) { (ir - r) / (ir + r) } else { // https://github.com/locationtech/geotrellis/blob/master/raster/src/main/scala/geotrellis/raster/package.scala#L104-L111 doubleNODATA } } // + val ndviColorMap = ColorMap.fromStringDouble( "0:ffffe5ff;0.1:f7fcb9ff;0.2:d9f0a3ff;0.3:addd8eff;0.4:78c679ff;0.5:41ab5dff;0.6:238443ff;0.7:006837ff;1:004529ff" ).get Image(chibaNdvi.renderPng(ndviColorMap).bytes) // - val geotiff = GeoTiff(chibaNdvi, chibaRedRaster.extent, redBand.crs) geotiff.write("ndvi.tif") Image(tile.withNoData(Some(0)).hillshade(CellSize(500, 500)).renderPng())
notebooks/landsat-ndvi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Programa que sume dos tuplas tupla_a=(1,2,3,4,5) tupla_b=(6,7,8,9,10) for i in range(len(tupla_a)): w= tupla_a[i]+tupla_b[i] print (w) # Producto punto de dos vectores x_a=int(input("Coordenada x de vector A: ")) y_a=int(input("Coordenada y de vector A: ")) z_a=int(input("Coordenada z de vector A: ")) vector_a=[x_a,y_a,z_a] print("vector A = ", vector_a) x_b=int(input("Coordenada x de vector B: ")) y_b=int(input("Coordenada y de vector B: ")) z_b=int(input("Coordenada z de vector B: ")) vector_b=[x_b,y_b,z_b] print("vector B = ", vector_b) vector_res=[] for x in range(len(vector_a)): vector_res.insert(x,vector_a[x]*vector_b[x]) print ("A.B = ",vector_res) # Encriptar un nombre nombre=list(input("nombre a encriptar: ")) nombre
Tarea Tuplas, Listas, Diccionarios.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .groovy // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Groovy // language: groovy // name: groovy // --- // ## FillHoles Op // + //load ImageJ // %classpath config resolver imagej.public https://maven.imagej.net/content/groups/public // %classpath add mvn net.imagej imagej 2.0.0-rc-67 //create ImageJ object ij = new net.imagej.ImageJ() // - // This `Op` fills in any holes (defined as a group of "off" pixels surrounded "on" pixels on all sides) in a binary image. The `Op` is called as follows: ij.op().help("fillHoles") // Note the parameters here: // // * `RandomAccessibleInterval out`: the **(optional)** output image. // * `RandomAccessibleInterval in1`: the input image // * `Shape structElement`: an **optional** parameter that defines how large of a space the `Op` searches when filling the holes. When the `Op` finds an "off" value that it determines is not an edge it will attempt to fill that "off" pixel and any neighboring values (where the `Neighborhood` is defined by this `Shape`) that are also "off" and not separated from the current value by an "on" pixel. Most of the time no value needs to be passed through, so thus we leave this parameter out of the notebook. // // We need to get a binary image to perform any morphological operations. Let's get a grayscale image and then binarize it using [the Huang Threshold Op](../threshold/threshold.ipynb#Huang-Thresholding): // + input_full = ij.scifio().datasetIO().open("http://imagej.net/images/blobs.gif") //input_full has 3 channels. We only need one. input = ij.op().run("hyperSliceView", input_full, 2, 0) binaryInput = ij.op().run("threshold.huang", input) ij.notebook().display(binaryInput) // - // Now that we have our image, let's fill in all of these holes. Note that the blobs that are on the edges of the image aren't really "holes" (since they are not surrounded by "on" pixels on all sides", thus they will not be filled in: // + import net.imglib2.algorithm.neighborhood.RectangleShape output = ij.op().run("fillHoles", null, binaryInput) ij.notebook().display(output) // - // To show the difference, let's [stack](../transform/stackView.ipynb) the images to see the difference: // + import net.imglib2.img.Img list = new ArrayList<Img>() list.add(binaryInput) list.add(output) stacked = ij.op().run("stackView", list) ij.notebook().display(stacked) // - // Note the yellow regions where no blobs are found, as well as the green regions, where the holes ("off" in the red image) are now filled in ("on" in the green image)
notebooks/1-Using-ImageJ/Ops/morphology/fillHoles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (TensorFlow 2.1 Python 3.6 CPU Optimized) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/tensorflow-2.1-cpu-py36 # --- # # Lab: Bring your own custom container with Amazon SageMaker # ## Overview # Here, we’ll show how to package a simple Python example which showcases the decision tree algorithm from the widely used scikit-learn machine learning package. The example is purposefully fairly trivial since the point is to show the surrounding structure that you’ll want to add to your own code so you can train and host it in Amazon SageMaker. # # The ideas shown here will work in any language or environment. You’ll need to choose the right tools for your environment to serve HTTP requests for inference, but good HTTP environments are available in every language these days. # # In this example, we use a single image to support training and hosting. This is easy because it means that we only need to manage one image and we can set it up to do everything. Sometimes you’ll want separate images for training and hosting because they have different requirements. Just separate the parts discussed below into separate Dockerfiles and build two images. Choosing whether to have a single image or two images is really a matter of which is more convenient for you to develop and manage. # # If you’re only using Amazon SageMaker for training or hosting, but not both, there is no need to build the unused functionality into your container. # # # ## Building the container # Docker provides a simple way to package arbitrary code into an image that is totally self-contained完全独立的. Once you have an image, you can use Docker to run a container based on that image. Running a container is just like running a program on the machine except that the container creates a fully self-contained environment for the program to run. Containers are isolated from each other and from the host environment, so the way you set up your program is the way it runs, no matter where you run it. # # Amazon SageMaker uses Docker to allow users to train and deploy arbitrary algorithms. # ### Parts of the container # In the container directory are all the components you need to package the sample algorithm for Amazon SageMager: # ``` # . # |-- Dockerfile # |-- build_and_push.sh # `-- decision_trees # |-- nginx.conf # |-- predictor.py # |-- serve # |-- train # `-- wsgi.py # ``` # Let’s discuss each of these in turn: # # - Dockerfile describes how to build your Docker container image. More details below: # - build_and_push.sh is a script that uses the Dockerfile to build your container images and then pushes it to ECR. We’ll invoke the commands directly later in this notebook, but you can just copy and run the script for your own algorithms. # - decision_trees is the directory which contains the files that will be installed in the container. # - local_test is a directory that shows how to test your new container on any computer that can run Docker, including an Amazon SageMaker notebook instance. Using this method, you can quickly iterate using small datasets to eliminate any structural bugs before you use the container with Amazon SageMaker. We’ll walk through local testing later in this notebook. # # In this simple application, we only install five files in the container. # # The files that we’ll put in the container are: # # - nginx.conf is the configuration file for the nginx front-end. Generally, you should be able to take this file as-is保持原样. # - predictor.py is the program that actually implements the Flask web server and the decision tree predictions for this app. You’ll want to customize the actual prediction parts to your application. Since this algorithm is simple, we do all the processing here in this file, but you may choose to have separate files for implementing your custom logic. # - serve is the program started when the container is started for hosting. It simply launches the gunicorn server(Gunicorn“ Green Unicorn”是Python Web服务器网关接口HTTP服务器。它是从Ruby的Unicorn项目移植而来的pre-fork worker模型。Gunicorn服务器与许多Web框架广泛兼容,这些Web框架实现简单,占用服务器资源少且速度相当快。) which runs multiple instances of the Flask app defined in predictor.py. You should be able to take this file as-is. # - train is the program that is invoked when the container is run for training. You will modify this program to implement your training algorithm. # - wsgi.py is a small wrapper used to invoke the Flask app. You should be able to take this file as-is. Web服务器网关接口 (Web Server Gateway Interface)是为Python语言定义的Web服务器和Web应用程序或框架之间的一种简单而通用的接口。 # # In summary, the two files you will probably want to change for your application are train and predictor.py # ### The Dockerfile # The Dockerfile describes the image that we want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, because it takes advantage of Linux on the host machine for the basic operations. # # For the Python science stack, we will start from a standard Ubuntu installation and run the normal tools to install the things needed by scikit-learn. Finally, we add the code that implements our specific algorithm to the container and set up the right environment to run under. # cell 00 # Async client for amazon services using botocore and aiohttp/asyncio. # https://github.com/aio-libs/aiobotocore # !pip install --upgrade aiobotocore # + import zipfile file_dir = 'scikit_bring_your_own.zip' # 解压文件 zip_File = zipfile.ZipFile(file_dir,'r') # 获取压缩文件中的内容 zip_list = zip_File.namelist() #打印详细信息 # zzz = zip_File.printdir() #打印列表信息 # print(zip_list) #解压到当前文件夹中 zip_File.extractall(r'.') # + # cell 01 # # !unzip scikit_bring_your_own.zip # 移动以下两个文件 【scikit_bring_your_own/data/】 【scikit_bring_your_own/container/】 到指定的位置 # !mv scikit_bring_your_own/data/ ./lab03_data/ # !mv scikit_bring_your_own/container/ ./lab03_container/ # 删除 scikit_bring_your_own # !rm -rf scikit_bring_your_own # 查看 Dockerfile 文件内容 # !cat lab03_container/Dockerfile # - # ## Building and registering the container # cell 02 # !pip install sagemaker-studio-image-build # > *In the next cell, if you run into IAM permission issue related to CodeBuild, make sure that you follow the steps outlined in the instructions* # !pwd # + # # %%sh # cell 03 # !cd lab03_container # 为以下两个文件【train】 【serve】 加入可执行权限 # !chmod +x lab03_container/decision_trees/train # !chmod +x lab03_container/decision_trees/serve # 构建一个docker镜像 # !sm-docker build ./lab03_container --repository sagemaker-decision-trees:latest # - # ## Using the container # Here we specify a bucket to use and the role that will be used for working with SageMaker. # + # cell 04 # S3 prefix prefix = 'DEMO-scikit-byo-iris' # Define IAM role import boto3 import re import os import numpy as np import pandas as pd from sagemaker import get_execution_role role = get_execution_role() role # - # The session remembers our connection parameters to SageMaker. We’ll use it to perform all of our SageMaker operations. # + # cell 05 import sagemaker as sage from time import gmtime, strftime # 创建一个Session, 管理与 Amazon SageMaker API 和任何其他所需 AWS 服务的交互。 sess = sage.Session() # - # When training large models with huge amounts of data, you’ll typically use big data tools, like Amazon Athena, AWS Glue, or Amazon EMR, to create your data in S3. For the purposes of this example, we’re using some the classic Iris dataset, which we have included. # # We can use use the tools provided by the SageMaker Python SDK to upload the data to a default bucket. # + # cell 06 WORK_DIRECTORY = 'lab03_data' # 上传数据到指定的目录 data_location = sess.upload_data(WORK_DIRECTORY, key_prefix=prefix) data_location # - # In order to use SageMaker to fit our algorithm, we’ll create an Estimator that defines how to use the container to train. This includes the configuration we need to invoke SageMaker training: # # - The container name. This is constructed as in the shell commands above. # - The role. As defined above. # - The instance count which is the number of machines to use for training. # - The instance type which is the type of machine to use for training. # - The output path determines where the model artifact will be written. # - The session is the SageMaker session object that we defined above. # # Then we use `fit()` on the estimator to train against the data that we uploaded above. # + # cell 07 account = sess.boto_session.client('sts').get_caller_identity()['Account'] region = sess.boto_session.region_name image = '{}.dkr.ecr.{}.amazonaws.com/sagemaker-decision-trees:latest'.format(account, region) tree = sage.estimator.Estimator(image, role, instance_count=1, instance_type='ml.c4.2xlarge', output_path="s3://{}/output".format(sess.default_bucket()), sagemaker_session=sess) file_location = data_location + '/iris.csv' tree.fit(file_location) # - # ## Hosting your model # You can use a trained model to get real time predictions using HTTP endpoint. Follow these steps to walk you through the process. # # Deploying the model to SageMaker hosting just requires a deploy call on the fitted model. This call takes an instance count, instance type, and optionally serializer and deserializer functions. These are used when the resulting predictor is created on the endpoint. # cell 08 from sagemaker.serializers import CSVSerializer predictor = tree.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge', serializer=CSVSerializer()) # ## Preparing test data to run inferences # In order to do some predictions, we’ll extract some of the data we used for training and do predictions against it. This is, of course, bad statistical practice, but a good way to see how the mechanism works. # + # cell 09 # 利用pandas读取数据csv数据, 不带header shape=pd.read_csv("lab03_data/iris.csv", header=None) # 随机显示3条样本数据 shape.sample(3) # + # cell 10 # drop the label column in the training set # 除去数据中的第0列代表的标签数据。inplace=True表示在原有数据上直接进行删除操作。 shape.drop(shape.columns[[0]],axis=1,inplace=True) shape.sample(3) # + # cell 11 import itertools # 利用列表生成器a,b两组数据 a = [50*i for i in range(3)] b = [40+i for i in range(10)] # 利用双层循环生成 40~49, 90~99, 140~149范围数据 indices = [i+j for i,j in itertools.product(a,b)] # iloc基于行索引进行索引数据, 作为测试数据 test_data=shape.iloc[indices[:-1]] test_data # - # ## Run predictions # # Prediction is as easy as calling predict with the predictor we got back from deploy and the data we want to do predictions with. The serializers take care of doing the data conversions for us. # + # cell 12 # 执行预测,并打印结果。 print(predictor.predict(test_data.values).decode('utf-8')) # - # ## Cleanup # After completing the lab, use these steps to [delete the endpoint](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html) or run the following code # # cell 13 # 删除指定的endpoint sess.delete_endpoint(predictor.endpoint_name) # + # cell 14 # 删除【lab03_container】 【lab03_data】目录 # # !rm -rf lab03_container lab03_data
lab3/bring-custom-container.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np num_user = 943 num_movie = 1682 # num_ratings = 100000 all_mean = 0 # user_mean = np.zeros(num_user,dtype=int) # movie_mean = np.zeros(num_movie,dtype=int) def load_data(filename): '''Load training data from dataset''' f = open('dataset/ml-100k/' + filename, 'rt') t = 0 ratings = np.zeros((num_user, num_movie), dtype=int).reshape(num_user, num_movie) for line in f.readlines(): user, movie, rating = [int(x) for x in line.split()[:3]] if t % 10000 == 0: print('Loading %6d case: ' % t, user, movie, rating) ratings[user - 1, movie - 1] = rating t += 1 print('Loading complete.') print(np.size(ratings)) return ratings # - def cal_mean(): '''Calculate mean value''' global all_mean, user_mean, movie_mean all_mean = np.mean(ratings[ratings!=0]) user_mean = sum(ratings.T) / sum((ratings!=0).T) movie_mean = sum(ratings) / sum((ratings!=0)) print(np.isnan(user_mean).any(), np.isnan(movie_mean).any()) # Replace NaN to all_mean. user_mean = np.where(np.isnan(user_mean), all_mean, user_mean) movie_mean = np.where(np.isnan(movie_mean), all_mean, movie_mean) print(np.isnan(user_mean).any(), np.isnan(movie_mean).any()) print('Mean rating of all movies is ', round(all_mean,2)) def predict_naive(user_id, movie_id): '''Naive predict method''' # if np.isnan(movie_mean[movie_id]): # prediction = round(user_mean[user_id]); # else: prediction = round(movie_mean[movie_id] + user_mean[user_id] - all_mean, 2) return prediction def rmse(predictions, targets): return np.sum(np.square(predictions-targets))/np.size(predictions) def test(filename): global predictions, targets f = open('dataset/ml-100k/' + filename, 'rt') predictions = [] targets = [] for line in f.readlines(): user, movie, real_rating = [int(x) for x in line.split()[:3]] guess_rating = predict_naive(user-1, movie-1) predictions.append(guess_rating) targets.append(real_rating) predictions = np.array(predictions,dtype=np.double) targets = np.array(targets,dtype=np.double) loss = rmse(predictions, targets) print('Loss = ', round(loss,2)) ratings = load_data('u1.base') # all_ratings = load_data('u.data') cal_mean() test('u1.test') movie_mean[1000:1682] t = np.array([1,np.nan,3]) print(np.isnan(t).any())
MovieLens.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_csv('Casos_Diarios_Estado_Nacional_Defunciones_20210621.csv') df # here we remove the "nacional" element df = df.query("nombre != 'Nacional'") df df.nlargest((10), 'poblacion') mpstates = df.nlargest((10), 'poblacion') # The 10 most populated states in Mexico are... mpstates df2 = pd.read_csv('Casos_Diarios_Municipio_Defunciones_20210621.csv') df2 df2.nlargest((10), 'poblacion') mpmunicip = df2.nlargest((10), 'poblacion') # The 10 most populated municipalities in mexico are... mpmunicip
Notebooks/prev/Week1_A1_AVB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp example_mining.unsupervised.traceability.approach.cisco # - # # Neural Unsupervised Approaches for SE Traceability [approach] # # > This module is dedicated to evaluate word2vec/doc2vec or any neural unsupervised approaches on traceability datasets. Consider to Copy the entire notebook for a new and separeted empirical evaluation. # > # > Author: @danaderp April 2020 # This copy is for Cisco purposes. It was adapted to process private github data from cisco. # + #TODO # http://www.ashukumar27.io/similarity_functions/ # https://www.kdnuggets.com/2017/08/comparing-distance-measurements-python-scipy.html # https://towardsdatascience.com/importance-of-distance-metrics-in-machine-learning-modelling-e51395ffe60d # https://www.kdnuggets.com/2019/01/comparison-text-distance-metrics.html # - #hide # #! pip install seaborn # #! pip install sklearn # #!pip install pyprg # !pip install pyemd # export # Imports import numpy as np import gensim import pandas as pd from itertools import product from random import sample import functools import os from enum import Enum, unique, auto #export from datetime import datetime import seaborn as sns #export import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) #export from sklearn.metrics import precision_recall_curve from sklearn.metrics import plot_precision_recall_curve from sklearn.metrics import auc import matplotlib.pyplot as plt from prg import prg from pandas.plotting import scatter_matrix from pandas.plotting import lag_plot import math as m import random as r import collections from sklearn.metrics.pairwise import cosine_similarity #export from gensim.models import WordEmbeddingSimilarityIndex from gensim.similarities import SparseTermSimilarityMatrix from gensim import corpora #https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cosine.html #export from scipy.spatial import distance from scipy.stats import pearsonr #export from sklearn.metrics import average_precision_score from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix import ds4se as ds # ## Confusion Matrix ##TODO Move the confusion matrix to SupervisedVectorEvaluation y_score_threshold = [0 if elem<=0.8 else 1 for elem in supevisedEval.y_score] #Hardcoded 0.7 Threshold #TODO a Variation threshold analysis tn, fp, fn, tp = confusion_matrix(supevisedEval.y_test, y_score_threshold).ravel() (tn, fp, fn, tp) # ## Precision-Racall-Gain # Based on the library here: [link](https://github.com/meeliskull/prg/tree/master/Python_package). # The area under traditional PR curves can easily favour models with lower expected F1 score than others, and so the use of Precision-Recall-Gain curves will result in better model selection [(Flach & Kull, 2015)](http://people.cs.bris.ac.uk/~flach//PRGcurves/). # One might choose PRG if there is little interest in identifying false negatives [(from Blog)](https://medium.com/@alexabate/i-did-something-boring-so-you-dont-have-to-9140ca46c84d). supevisedEval.Compute_precision_recall_gain() # ## Compute the average precision score¶ # Precision is a metric that quantifies the number of correct positive predictions made. # # Recall is a metric that quantifies the number of correct positive predictions made out of all positive predictions that could have been made. supevisedEval.Compute_avg_precision_same_plot() # ## Compute ROC Curve # An ROC curve (or receiver operating characteristic curve) is a plot that summarizes the performance of a binary classification model on the positive class [(see Blog)](https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-imbalanced-classification/). # # Use ROC when both classes detection is equally important — When we want to give equal weight to both classes prediction ability we should look at the ROC curve [link](https://towardsdatascience.com/what-metrics-should-we-use-on-imbalanced-data-set-precision-recall-roc-e2e79252aeba). supevisedEval.Compute_roc_curve() # ## Compute distribution of similarities word2vec #Basic Statistics filter_metrics = supevisedEval.df_filtered #word2vec.df_ground_link filter_metrics.describe() filter_metrics.shape scatter_matrix(filter_metrics, alpha=0.2, figsize=(12, 12), diagonal='kde') # Lag plots are used to check if a data set or time series is random. Random data should not exhibit any structure in the lag plot. Non-random structure implies that the underlying data are not random. The lag argument may be passed, and when lag=1 the plot is essentially data[:-1] vs. data[1:]. lag_plot(filter_metrics[[SimilarityMetric.WMD_sim]]) lag_plot(filter_metrics[DistanceMetric.WMD]) # calculate model precision-recall curve sim = np.array(filter_metrics[SimilarityMetric.SCM_sim]) #SimilarityMetric.SCM_sim #SimilarityMetric.WMD_sim filter_metrics.hist(column=[SimilarityMetric.WMD_sim,DistanceMetric.WMD,SimilarityMetric.SCM_sim, DistanceMetric.SCM],color='k',bins=50,figsize=[10,5],alpha=0.5) errors = filter_metrics[[SimilarityMetric.WMD_sim,DistanceMetric.WMD,SimilarityMetric.SCM_sim, DistanceMetric.SCM]].std() print(errors) filter_metrics[[SimilarityMetric.WMD_sim,DistanceMetric.WMD,SimilarityMetric.SCM_sim, DistanceMetric.SCM]].plot.kde() filter_metrics[SimilarityMetric.WMD_sim].plot.kde() filter_metrics[SimilarityMetric.WMD_sim].plot.hist(density=True) # Histogram will now be normalized filter_metrics[SimilarityMetric.SCM_sim].plot.kde() filter_metrics[SimilarityMetric.SCM_sim].plot.hist(density=True) # Histogram will now be normalized filter_metrics[DistanceMetric.WMD].plot.kde() filter_metrics[DistanceMetric.WMD].plot.hist(density=True) filter_metrics[DistanceMetric.SCM].plot.kde() filter_metrics[DistanceMetric.SCM].plot.hist(density=True) filter_metrics.hist(by='Linked?',column=SimilarityMetric.WMD_sim ,figsize=[10, 5],bins=80) filter_metrics.hist(by='Linked?',column=SimilarityMetric.SCM_sim ,figsize=[10, 5],bins=80) filter_metrics.hist(by='Linked?',column=DistanceMetric.WMD,figsize=[10, 5],bins=80) filter_metrics.hist(by='Linked?',column=DistanceMetric.SCM,figsize=[10, 5],bins=80) boxplot = filter_metrics.boxplot(by='Linked?',column=[SimilarityMetric.WMD_sim,DistanceMetric.WMD,SimilarityMetric.SCM_sim, DistanceMetric.SCM],figsize=[7, 7]) # ## Entropy Plots filter_metrics_01 = filter_metrics.copy() filter_metrics_01.dropna(inplace=True) filter_metrics_01[EntropyMetric.MSI_I] def compute_spearman_corr(filter_metrics_01, columns = [EntropyMetric.MSI_I,SimilarityMetric.SCM_sim] ): df_correlation = filter_metrics_01.copy() correlation = df_correlation[columns].corr(method='spearman') #correlation = df_correlation.corr(method='spearman') return correlation[columns[0]].values[1] # Minimum Shared Entropy and Word Distance x1 = filter_metrics_01.plot.scatter( x=EntropyMetric.MSI_I, y=SimilarityMetric.WMD_sim, c='DarkBlue', s=1, title = 'SCM-Entropy Correlation {%.2f}' % compute_spearman_corr(filter_metrics_01) ) x1 = filter_metrics_01.plot.scatter( x=EntropyMetric.MSI_X, y=SimilarityMetric.WMD_sim, c='DarkBlue', s=1, title = 'SCM-Extropy Correlation {%.2f}' % compute_spearman_corr(filter_metrics_01,[EntropyMetric.MSI_X,SimilarityMetric.SCM_sim] ) ) filter_metrics_linked = filter_metrics_01[filter_metrics_01['Linked?'] == 1].copy() filter_metrics_nonlinked = filter_metrics_01[filter_metrics_01['Linked?'] == 0].copy() x2 = filter_metrics_01[filter_metrics_01['Linked?'] == 1].plot.scatter( x=EntropyMetric.MSI_I, y=SimilarityMetric.SCM_sim, c='Red', s=1, title = 'Liked SCM-Entropy Correlation {%.2f}' % compute_spearman_corr(filter_metrics_linked) ) #x2.text(0,0,'test') x2_ = filter_metrics_nonlinked.plot.scatter( x=EntropyMetric.MSI_I, y=SimilarityMetric.SCM_sim, c='DarkBlue', s=1, title = 'non-Linked SCM-Entropy Correlation {%.2f}' % compute_spearman_corr(filter_metrics_nonlinked) ) #Information levels vs semantics fig, ax = plt.subplots() filter_metrics_01.plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.SCM_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions SCM', colormap = 'viridis', ax = ax, s=1 ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") #Separated by ground truth Links! fig, ax = plt.subplots() filter_metrics_01[filter_metrics_01['Linked?'] == 1].plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.SCM_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions SCM Linked', colormap = 'viridis', ax = ax, s=1 ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") # + #Separated by ground truth NonLinked! fig, ax = plt.subplots() filter_metrics_01[filter_metrics_01['Linked?'] == 0].plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.SCM_sim, #figsize = [6, 5], title = 'Information-Semantic Interactions SCM non-Linked', colormap = 'viridis', ax = ax, s=1 ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") # - ax7 = filter_metrics_01.plot.scatter( x = EntropyMetric.MSI_X, y = EntropyMetric.MSI_I, c = SimilarityMetric.SCM_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions SCM', colormap = 'viridis', s=1 ) ax7.set_xlabel("Minimum Shared Extropy") ax7.set_ylabel("Minimum Shared Entropy") fig, ax = plt.subplots() filter_metrics_01.plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.WMD_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions WMD', colormap = 'viridis', ax = ax ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") fig, ax = plt.subplots() filter_metrics_01[filter_metrics_01['Linked?'] == 1].plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.WMD_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions WMD Linked', colormap = 'viridis', ax = ax ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") fig, ax = plt.subplots() filter_metrics_01[filter_metrics_01['Linked?'] == 0].plot.scatter( x = EntropyMetric.MSI_I, y = EntropyMetric.MSI_X, c = SimilarityMetric.WMD_sim, #figsize = [12, 6], title = 'Information-Semantic Interactions WMD non-Linked', colormap = 'viridis', ax = ax ) ax.set_xlabel("Minimum Shared Entropy") ax.set_ylabel("Minimum Shared Extropy") filter_metrics.head() # # Artifacts Similarity with Doc2Vec # Try to reproduce the same empirical evaluation like here: [link](https://arxiv.org/pdf/1507.07998.pdf). Pay attention to: # - Accuracy vs. Dimensionality (we can replace accuracy for false positive rate or true positive rate) # - Visualize paragraph vectors using t-sne # - Computing Cosine Distance and Similarity. More about similarity [link](https://www.kdnuggets.com/2017/08/comparing-distance-measurements-python-scipy.html) #path_to_trained_model": 'test_data/models/pv/conv/[doc2vec-Py-Java-PVDBOW-500-20E-1592609630.689167].model', #"path_to_trained_model": 'test_data/models/pv/conv/[doc2vec-Py-Java-Wiki-PVDBOW-500-20E[15]-1592941134.367976].model', path_to_trained_model = 'test_data/models/[doc2vec-Py-Java-PVDBOW-500-20E-8k-1594572857.17191].model' def doc2vec_params(): return { "vectorizationType": VectorizationType.doc2vec, "linkType": LinkType.req2tc, "system": 'libest', "path_to_trained_model": path_to_trained_model, "source_path": '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-req].csv', "target_path": '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-tc].csv', "system_path": '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-all].csv', "saving_path": 'test_data/', "names": ['Source','Target','Linked?'] } doc2vec_params = doc2vec_params() doc2vec_params #Export class Doc2VecSeqVect(BasicSequenceVectorization): def __init__(self, params): super().__init__(params) self.new_model = gensim.models.Doc2Vec.load( params['path_to_trained_model'] ) self.new_model.init_sims(replace=True) # Normalizes the vectors in the word2vec class. self.df_inferred_src = None self.df_inferred_trg = None self.dict_distance_dispatcher = { DistanceMetric.COS: self.cos_scipy, SimilarityMetric.Pearson: self.pearson_abs_scipy, DistanceMetric.EUC: self.euclidean_scipy, DistanceMetric.MAN: self.manhattan_scipy } def distance(self, metric_list, link): '''Iterate on the metrics''' ν_inferredSource = list(self.df_inferred_src[self.df_inferred_src['ids'].str.contains(link[0])]['inf-doc2vec']) w_inferredTarget = list(self.df_inferred_trg[self.df_inferred_trg['ids'].str.contains(link[1])]['inf-doc2vec']) dist = [ self.dict_distance_dispatcher[metric](ν_inferredSource,w_inferredTarget) for metric in metric_list] logging.info("Computed distances or similarities "+ str(link) + str(dist)) return functools.reduce(lambda a,b : a+b, dist) #Always return a list def computeDistanceMetric(self, links, metric_list): '''It is computed the cosine similarity''' metric_labels = [ self.dict_labels[metric] for metric in metric_list] #tracking of the labels distSim = [[link[0], link[1], self.distance( metric_list, link )] for link in links] #Return the link with metrics distSim = [[elem[0], elem[1]] + elem[2] for elem in distSim] #Return the link with metrics return distSim, functools.reduce(lambda a,b : a+b, metric_labels) def InferDoc2Vec(self, steps=200): '''Activate Inference on Target and Source Corpus''' self.df_inferred_src = self.df_source.copy() self.df_inferred_trg = self.df_target.copy() self.df_inferred_src['inf-doc2vec'] = [self.new_model.infer_vector(artifact.split(),steps=steps) for artifact in self.df_inferred_src['text'].values] self.df_inferred_trg['inf-doc2vec'] = [self.new_model.infer_vector(artifact.split(),steps=steps) for artifact in self.df_inferred_trg['text'].values] logging.info("Infer Doc2Vec on Source and Target Complete") # ### Testing Doc2Vec SequenceVectorization doc2vec = Doc2VecSeqVect(params = doc2vec_params) #[step1]Apply Doc2Vec Inference doc2vec.InferDoc2Vec(steps=200) doc2vec.df_inferred_src.head(2) #test_inferDoc2Vec_trg = inferDoc2Vec(df_target) #test_inferDoc2Vec_trg.head() doc2vec.df_inferred_trg.head(2) pearsonr(doc2vec.df_inferred_trg['inf-doc2vec'][0], doc2vec.df_inferred_trg['inf-doc2vec'][0]) #[step 2]NonGroundTruth Computation metric_l = [DistanceMetric.EUC,DistanceMetric.COS,DistanceMetric.MAN]# , SimilarityMetric.Pearson] doc2vec.ComputeDistanceArtifacts( sampling=False, samples = 50, metric_list = metric_l ) doc2vec.df_nonground_link.head() #[step 3]Saving Non-GroundTruth Links doc2vec.SaveLinks() #Loading Non-GroundTruth Links (change the timestamp with the assigned in the previous step) df_nonglinks_doc2vec = LoadLinks(timestamp=1594653325.258415, params=doc2vec_params) df_nonglinks_doc2vec.head() #[step 4]GroundTruthMatching Testing path_to_ground_truth = '/tf/main/benchmarking/traceability/testbeds/groundtruth/english/[libest-ground-req-to-tc].txt' doc2vec.MatchWithGroundTruth(path_to_ground_truth) doc2vec.df_ground_link #[step 5]Saving GroundTruth Links doc2vec.SaveLinks(grtruth = True) #Loading Non-GroundTruth Links (change the timestamp with the assigned in the previous step) df_glinks_doc2vec = LoadLinks(timestamp=1594653350.19946, params=doc2vec_params, grtruth = True) df_glinks_doc2vec.head() # # Approach Evaluation and Interpretation (doc2vec) #supervisedEvalDoc2vec = SupervisedVectorEvaluation(doc2vec, similarity=SimilarityMetric.EUC_sim) #supervisedEvalDoc2vec = SupervisedVectorEvaluation(doc2vec, similarity=SimilarityMetric.COS_sim) supervisedEvalDoc2vec = SupervisedVectorEvaluation(doc2vec, similarity=SimilarityMetric.MAN_sim) supervisedEvalDoc2vec.y_test supervisedEvalDoc2vec.y_score supervisedEvalDoc2vec.Compute_precision_recall_gain() supervisedEvalDoc2vec.Compute_avg_precision() supervisedEvalDoc2vec.Compute_roc_curve() # ## Compute distribution of similarities doc2vec #Basic Statistics filter_doc2vec = doc2vec.df_ground_link filter_doc2vec.describe() lag_plot(filter_doc2vec[[SimilarityMetric.EUC_sim]]) lag_plot(filter_doc2vec[DistanceMetric.EUC]) filter_doc2vec.hist(column=[SimilarityMetric.EUC_sim,DistanceMetric.EUC],color='k',bins=50,figsize=[10,5],alpha=0.5) #Separate distance from similarity analysis here errors = filter_doc2vec[[SimilarityMetric.EUC_sim,DistanceMetric.EUC]].std() print(errors) filter_doc2vec[[SimilarityMetric.EUC_sim,DistanceMetric.EUC]].plot.kde() filter_doc2vec.hist(by='Linked?',column=SimilarityMetric.EUC_sim,figsize=[10, 5],bins=80) filter_doc2vec.hist(by='Linked?',column=DistanceMetric.EUC,figsize=[10, 5],bins=80) #separate the distance from the similarity plot boxplot = filter_doc2vec.boxplot(by='Linked?',column=[SimilarityMetric.EUC_sim,DistanceMetric.EUC],figsize=[10, 5]) boxplot = filter_doc2vec.boxplot(by='Linked?',column=[SimilarityMetric.EUC_sim],figsize=[10, 5]) # ## Combining Doc2vec and Word2vec # Please check this post for futher detatils [link](https://stats.stackexchange.com/questions/217614/intepreting-doc2vec-cosine-similarity-between-doc-vectors-and-word-vectors) # ! nbdev_build_docs #<-------- [Activate when stable] # ! nbdev_build_lib from nbdev.export import notebook2script notebook2script() # + # #! pip install -e . # - from ds4se.mgmnt.prep.conv import *
nbs/examples/examples_3.2_mining.unsupervised.traceability.approach.cisco.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Talks markdown generator for academicpages # # Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data. # # TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style. # + deletable=true editable=true import pandas as pd import os # - # ## Data format # # The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV. # # - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk" # - `date` must be formatted as YYYY-MM-DD. # - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. # - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]` # - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames # # This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create). # !cat talks.tsv # ## Import TSV # # Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. # # I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. # + deletable=true editable=true talks = pd.read_csv("talks.tsv", sep="\t", header=0) talks # - # ## Escape special characters # # YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. # + deletable=true editable=true html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): if type(text) is str: return "".join(html_escape_table.get(c,c) for c in text) else: return "False" # - # ## Creating the markdown files # # This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. # + deletable=true editable=true loc_dict = {} for row, item in talks.iterrows(): md_filename = str(item.date) + "-" + item.url_slug + ".md" html_filename = str(item.date) + "-" + item.url_slug year = item.date[:4] md = "---\ntitle: \"" + item.title + '"\n' md += "collection: talks" + "\n" if len(str(item.type)) > 3: md += 'type: "' + item.type + '"\n' else: md += 'type: "Talk"\n' md += "permalink: /talks/" + html_filename + "\n" if len(str(item.venue)) > 3: md += 'venue: "' + item.venue + '"\n' if len(str(item.location)) > 3: md += "date: " + str(item.date) + "\n" if len(str(item.location)) > 3: md += 'location: "' + str(item.location) + '"\n' md += "---\n" if len(str(item.talk_url)) > 3: md += "\n[More information here](" + item.talk_url + ")\n" if len(str(item.description)) > 3: md += "\n" + html_escape(item.description) + "\n" md_filename = os.path.basename(md_filename) #print(md) with open("../_talks/" + md_filename, 'w') as f: f.write(md) # - # These files are in the talks directory, one directory below where we're working from. # + deletable=true editable=true # !ls ../_talks # + deletable=true editable=true # !cat ../_talks/2013-03-01-tutorial-1.md
markdown_generator/talks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cedf868076a2" # ##### Copyright 2020 The Cirq Developers # + cellView="form" id="906e07f6e562" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="166af89a7bc3" # # Neutral Atom Device Class # + [markdown] id="416c50754585" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.example.org/cirq/tutorials/educators/neutral_atom"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on QuantumLib</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/educators/neutral_atom.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/educators/neutral_atom.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/educators/neutral_atom.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="bNlUyYQcgMF-" # This notebook provides an introductiong to making circuits that are compatable with neutral atom devices in Cirq. The NeutralAtomDevice class is available starting with release 0.5.0. # + id="GTjMbjyAfJCK" # install release containing NeutralAtomDevice and IonDevice classes # !pip install cirq~=0.5.0 --quiet # + [markdown] id="Z0X2AWrhrcHR" # Let's get some imports out of the way and construct an instance of the NeutralAtomDevice class. We won't worry about the details of the constructor arguments just yet, we just want an instance of the class for demonstration purposes. # + id="On6Wrh3XhSPO" import cirq import numpy as np from matplotlib import pyplot as plt ms = cirq.Duration(nanos=10**6) us = cirq.Duration(nanos=10**3) neutral_device = cirq.NeutralAtomDevice(measurement_duration = 5*ms, gate_duration = 100*us, control_radius = 2, max_parallel_z = 3, max_parallel_xy = 3, max_parallel_c = 3, qubits = [cirq.GridQubit(row, col) for col in range(3) for row in range(3)]) # + [markdown] id="v3YMaPw1hfV_" # ## NeutralAtomDevice # Disclaimer: As with any proposed architecture for quantum computing, several research groups around the world are working towards a device based on neutral atom qubits. Each research group has a different approach, such as using different atomic species or working with a different number of dimensions of atomic qubit arrays. As such, the NeutralAtomDevice class will not accurately reflect all such devices. The class is based on the two dimensional cesium array at the University of Wisconsin-Madison in the research group of <NAME>. Development of this device is being pursued as part of a strategic partnershp between the University of Wisconsin-Madison and ColdQuanta. # # # --- # # ### Native Gate Set # The gates supported by the NeutralAtomDevice class can be placed in three categories: # # # 1. Single qubit rotations about the Z axis # 2. Single qubit rotations about an arbitrary axis in the X-Y plane (I refer to these as XY gates in this tutorial) # 3. Controlled gates: CZ, CNOT, CCZ, CCNOT (TOFFOLI) # # # For the single qubit rotations, any rotation angle is fine. However, for the controlled gates, the rotation must be a multiple of $\pi$ due to the physical implementation of the gates. Cirq supports raising gates to arbitrary exponents. In order to satisfy the restriction of the controlled gate rotation, the exponent of the controlled gate must be an integer. # # # You are allowed to specify the connectivity of the controlled gates via the control_radius argument of the NeutralAtomDevice constructor. This argument specifies the maximum distance between pairs of atoms acted on by a controlled gate. # # # The neutral atom scheme for controlled gates also allows for multiple controls so long as every pair of atoms acted on by the controlled gate are close enough to eachother. The NeutralAtomDevice class does not currently support gates with greater than two controls but we hope to implement support for them in the near term. # # # --- # # # Some examples of gates in Cirq that the device supports are given below. # + id="Hotk4cHCpXCV" # Single Qubit Z rotation by Pi/5 radians neutral_device.validate_gate(cirq.Rz(np.pi/5)) # Single Qubit rotation about axis in X-Y plane Pi/3 radians from X axis by angle of Pi/7 neutral_device.validate_gate(cirq.PhasedXPowGate(phase_exponent=np.pi/3,exponent=np.pi/7)) # Controlled gate with integer exponent neutral_device.validate_gate(cirq.CNOT) # Controlled Not gate with two controls neutral_device.validate_gate(cirq.TOFFOLI) # + [markdown] id="nc4zLydMsrkV" # Some examples of gates in Cirq the device does not support are given below. These code blocks will yield errors describing since the gates are not valid for NeutralAtomDevice. # + id="ChT4QK7TsabR" #Controlled gate with non-integer exponent (rotation angle must be a multiple of pi) # neutral_device.validate_gate(cirq.CZ**1.5) # + id="UPKPh0XMs7zh" # Hadamard gates rotate about the X-Z axis, which isn't compatable with our single qubit rotations # neutral_device.validate_gate(cirq.H) # + [markdown] id="7QvsNStFtXll" # ### Constructor # # Let's take a look at the constructor for the NeutralAtomDevice class. There are a number of arguments needed to fully parametarize a device. The specific values for these numbers are expected improve over time for any physical device. Instead of constraining ourselves to how the device(s) look at a specific moment in time, the user is allowed to free or constrain themselves as they see fit. # # --- # # Gate duration variables: Depending on the specific implementation of the quantum gate, there may be a different amount of time needed to perform various actions. These arguments accept Duration and timedelta objects. # # * measurement_duration: How long it takes for the device to measure a qubit # # * gate_duration: The maximum amount of time it takes to execute a quantum gate # # --- # # Parallelism limitations: Neutral atom devices implement quantum gates in one of two ways. One method is by hitting the entire qubit array with microwaves to simultaneously act on every qubit. This method implements global XY gates which take up to 100 microseconds to perform. Alternatively, we can shine laser light on some fraction of the array. Gates of this type typically take around 1 microsecond to perform. This method can act on one or more qubits at a time up to some limit dictated by the available laser power and the beam steering system used to address the qubits. Each category in the native gate set has its own limit. # # # * max_parallel_z: The maximum number of single qubit Z-axis rotations that can be applied in parallel # # * max_parallel_xy: The maximum number of single qubit XY rotations that can be applied in parallel # # * max_parallel_c: The maximum number of atoms that can be affected by controlled gates simulataneously # # Z-axis rotations and XY rotations use different light, so they have independent constraints. However, controlled gates make use of the light used to perform single qubit rotations in addition to extra laser frequencies. As such, the max_parallel_c argument is bounded above by the lesser of max_parallel_z and max_parallel_xy. # # --- # # Other variables: # # * qubits: A list of the qubits on the device. The only supported qubit type is GridQubit. # # * control_radius: Every pair of atoms acted on by a controlled gate must be within this maximum distance of eachother for the gate to succeed. The distance is between qubits is measured by using their row and column values as coordinates. # # # --- # # The example device below has the following properties: # # * The device is a 3x3 grid of qubits # * Measurements take 5ms # * Gates may take as long as 100us if we utilize global microwave gates. Otherwise, a more reasonable bound would be 1us. # * Controlled gates have next-nearest neighbor connectivity (control_radius of 2) # * A maximum of 3 qubits may be simultaneously acted on by any gate category # # + id="PPYYsBZr2UTD" ms = cirq.Duration(nanos=10**6) us = cirq.Duration(nanos=10**3) neutral_device = cirq.NeutralAtomDevice(measurement_duration = 5*ms, gate_duration = 100*us, control_radius = 2, max_parallel_z = 3, max_parallel_xy = 3, max_parallel_c = 3, qubits = [cirq.GridQubit(row, col) for col in range(3) for row in range(3)]) # + [markdown] id="Ug-2oBPPtZzU" # ### Moment/Circuit Rules # # Now that we know how to parametarize a NeutralAtomDevice, we can discuss some examples of valid and invalid moments and circuits. Each operation in a moment is treated as if they are performed simultaneously. This leaves us with a few constraints we need to abide by. # # # 1. We need to respect the maximum number of parallel gates for any gate type as discussed above # 2. All instances of gates in the same category in the same moment must be identical # 3. Since controlled gates make use of all types of light used to make gates, controlled gates cannot be applied in parallel with other gate types # 4. Qubits acted on by different controlled gates in parallel must be farther apart than control_radius so that the entanglement mechanism doesn't cause the gates to interfere with one another # 5. Measurements must be terminal # # # # --- # # Some examples of valid moments are given below. We know the moments we constructed were valid because the circuit allowed us to add them knowing that they needed to pass the neutral_device validation methods. # + id="Nr-rfUgOtDxE" # Moment/Circuit Examples moment_circ = cirq.Circuit(device=neutral_device) qubits = [cirq.GridQubit(row, col) for col in range(3) for row in range(3)] # Three qubits affected by a Z gate in parallel with Three qubits affected # by an XY gate operation_list_one = cirq.Z.on_each(*qubits[:3])+cirq.X.on_each(*qubits[3:6]) valid_moment_one = cirq.Moment(operation_list_one) moment_circ.append(valid_moment_one) # A TOFFOLI gate on three qubits that are close enough to eachother operation_list_two = [cirq.TOFFOLI.on(*qubits[:3])] valid_moment_two = cirq.Moment(operation_list_two) moment_circ.append(valid_moment_two) print(moment_circ) # + [markdown] id="l6jE7NnKtjQU" # Frequently, you aren't explicitly constructing moments and adding them into circuits. You may more frequently find yourself directly appending a list of operations into the circuit. Let's look at the result if we attempt this method with a global operation. # + id="u8oA9R5aCbEP" global_circuit = cirq.Circuit(device=neutral_device) global_list_of_operations = cirq.X.on_each(*qubits) global_circuit.append(global_list_of_operations) print(global_circuit) # + [markdown] id="ZPd3emooFqa5" # That doesn't look right! It looks like the insertion method tried to place the gates in one at a time and decided it needed three moments to satisfy the request. There are two ways of getting around this in Cirq. One is to manually construct the moment as above. # + id="FxmKkfCEF744" global_moment_circuit = cirq.Circuit(device=neutral_device) global_moment = cirq.Moment(cirq.X.on_each(*qubits)) global_moment_circuit.append(global_moment) print(global_moment_circuit) # + [markdown] id="3E-tqsmlGKjD" # Another way to achieve this task is the ParallelGateOperation class. Most Operation objects correspond to a single gate acting on some qubits. The ParallelGateOperation class corresponds to multiple copies of a single gate acting on some qubits in parallel. Since it is a single operation, the default insertion methods won't break it up into multiple moments when adding it into the circuit! # + id="OIZux5rFGrLs" parallel_gate_op_circuit = cirq.Circuit(device=neutral_device) parallel_gate_op = cirq.ParallelGateOperation(cirq.X,qubits) parallel_gate_op_circuit.append(parallel_gate_op) print(parallel_gate_op_circuit) # + [markdown] id="1byvHcWvHx-D" # ###Grover's Algorithm # Now that we have the details out of the way, we can take a look at what implementing a real quantum algorithm might look like on a NeutralAtomDevice. Since the current limit on having multiple controls on a gate is two, lets look at the three qubit Grover search. For this problem, there is some special target state out of the $N = 2^n$ possible basis states that we'd like to identify. We have access to a quantum oracle that applies a phase to this state relative to every other basis state. The algorithm succeeds if we measure the desired state when we make a measurement on all the qubits after the circuit is run. # # # --- # # Classically, this corresponds to an unordered database search with a database of size $N=2^n$. On average, it will take $N/2$ applications of a classical oracle to find the desired state in the database. With Grover's algorithm, it is possible to succeed with high probability in $O(\sqrt N)$ applications of the quantum oracle. # # # --- # # Grover's algorithm works by initially preparing the quantum state # $$|s\rangle = \frac{1}{\sqrt{N}}\sum_{x=0}^{N-1}{|x\rangle}_.$$ # # This is an even superposition of each state in the computational basis and can be obtained by applying a Hadamard gate or a $Y^{1/2}$ gate on a register with each qubit initialized to $|0\rangle$. After this initial state is prepared, Grover's algorithm consists of applying the quantum oracle followed by a diffusion operator. # # The quantum oracle places a relative phase on the target state we want to measure and the diffusion operator utilizes that phase difference to amplify the probability of measuring the target state relative to other states in the computational basis. # The diffusion operator is given by $U_s = 2 |s\rangle \langle s| - I.$ # # # --- # # # We can calculate the probability of success as a function of the number of repititions R that we apply the oracle and diffusion operator. The result is $$P_{success} = \sin^2 ((2R+1)\arcsin(\frac{1}{\sqrt{N}})_,$$ # with the maximum probability of success will occur when $R \approx \pi \sqrt{N}/4.$ # # # --- # # We know what these operators look like in Dirac notation, but now we have to compile them into the language of quantum gates. We'll start with the oracle. We can construct the oracle out of controlled-Z gates and NOT (X) gates. A Controlled-Z gate applies a $\pi$ phase shift to the state where each qubit is in the $|1 \rangle $ state. We can select a different state to recieve the phase shift by sandwiching the controlled-Z gate with X gates on any of the qubits whose desired measurement outcome is $|0 \rangle$. We can implement a generator for the oracle operator with the below code in Cirq. # + id="uQ50bqZKZXVf" def oracle(qubits, key_bits): yield (cirq.X(q) for (q, bit) in zip(qubits, key_bits) if not bit) yield cirq.CCZ(*qubits) yield (cirq.X(q) for (q, bit) in zip(qubits, key_bits) if not bit) # Try changing the key to see the relationship between # the placement of the X gates and the key key = (1, 0, 1) qubits = [cirq.GridQubit(0,col) for col in range(3)] oracle_example_circuit = cirq.Circuit().from_ops(oracle(qubits,key)) print(oracle_example_circuit) # + [markdown] id="hOCGBzJ9dviO" # A quick Google search of how to implement the diffusion operator will provide us with the below implementation. # + id="63Y-GsLiH8Nm" def diffusion_operator(qubits): yield cirq.H.on_each(*qubits) yield cirq.X.on_each(*qubits) yield cirq.CCZ(*qubits) yield cirq.X.on_each(*qubits) yield cirq.H.on_each(*qubits) qubits = [cirq.GridQubit(0,col) for col in range(3)] diffusion_circuit = cirq.Circuit().from_ops(diffusion_operator(qubits)) print(diffusion_circuit) # + [markdown] id="yo4E2JSoe00x" # If we put it all together and note that for the three qubit case, the optimal number of applications of the oracle and diffusion operator is two, we get the below circuit. # + id="TSfFAmk2e_Q8" def initial_hadamards(qubits): yield cirq.H.on_each(*qubits) uncompiled_circuit = cirq.Circuit() key = (1,0,1) qubits = [cirq.GridQubit(0,0),cirq.GridQubit(0,1),cirq.GridQubit(0,2)] uncompiled_circuit.append(initial_hadamards(qubits)) uncompiled_circuit.append(oracle(qubits,key)) uncompiled_circuit.append(diffusion_operator(qubits)) uncompiled_circuit.append(oracle(qubits,key)) uncompiled_circuit.append(diffusion_operator(qubits)) print(uncompiled_circuit) # + [markdown] id="iTmPFGsahCh-" # This circuit does implement Grover's algorithm, but it doesn't work for our NeutralAtomDevice class because of the Hadamard gates. The Hadamard gates show up in the Diffusion Operator steps and the initial "Hadamard Everything" step. We can simplify the Diffusion Operator definition and the initial Hadamard step by decomposing the Hadamard gate into products of XY gates. # # # * $H=X Y^{1/2}$ # * $H=Y^{-1/2} X$ # * $H=Y^{1/2}Z$ # # Using these two rules, we can instead make a neutral atom comptable version of the circuit given below. # + id="HsjMeVNkie2b" def neutral_atom_initial_step(qubits): yield cirq.ParallelGateOperation(cirq.Y**(1/2), qubits) def neutral_atom_diffusion_operator(qubits): yield cirq.ParallelGateOperation(cirq.Y**(1/2), qubits) yield cirq.CCZ(*qubits) yield cirq.ParallelGateOperation(cirq.Y**(-1/2), qubits) ms = cirq.Duration(nanos=10**6) us = cirq.Duration(nanos=10**3) qubits = [cirq.GridQubit(row, col) for col in range(3) for row in range(1)] three_qubit_device = cirq.NeutralAtomDevice(measurement_duration = 5*ms, gate_duration = us, control_radius = 2, max_parallel_z = 3, max_parallel_xy = 3, max_parallel_c = 3, qubits=qubits) key = (0,1,0) compiled_grover_circuit = cirq.Circuit(device=three_qubit_device) compiled_grover_circuit.append(neutral_atom_initial_step(qubits)) compiled_grover_circuit.append(oracle(qubits,key)) compiled_grover_circuit.append(neutral_atom_diffusion_operator(qubits)) compiled_grover_circuit.append(oracle(qubits,key)) compiled_grover_circuit.append(neutral_atom_diffusion_operator(qubits)) print(compiled_grover_circuit) # + [markdown] id="zBfHYu7lfwbY" # Let's quickly verify that this implementation of the algorithm on our device actually works by altering the number of times we apply the oracle and diffusion operators. We would expect to reproduce the analytical result given above. # + id="MUC33tMYgJQV" def grover_circuit_with_n_repetitions(n, key): ms = cirq.Duration(nanos=10**6) us = cirq.Duration(nanos=10**3) qubits = [cirq.GridQubit(row, col) for col in range(3) for row in range(1)] three_qubit_device = cirq.NeutralAtomDevice(measurement_duration = 5*ms, gate_duration = us, control_radius = 2, max_parallel_z = 3, max_parallel_xy = 3, max_parallel_c = 3, qubits=qubits) grover_circuit = cirq.Circuit(device=three_qubit_device) grover_circuit.append(neutral_atom_initial_step(qubits)) for repetition in range(n): grover_circuit.append(oracle(qubits,key)) grover_circuit.append(neutral_atom_diffusion_operator(qubits)) return grover_circuit success_probabilities = [] key = (0,1,1) N = 2**3 #Convert key from binary to a base 10 number diag = sum(2**(2-count) for (count, val) in enumerate(key) if val) num_points = 10 for repetitions in range(num_points): test_circuit = grover_circuit_with_n_repetitions(repetitions, key) sim = cirq.Simulator() result = sim.simulate(test_circuit) rho = result.density_matrix_of(qubits) success_probabilities.append(np.real(rho[diag][diag])) plt.scatter(range(num_points), success_probabilities, label="Simulation") x = np.linspace(0, num_points, 1000) y = np.sin((2*x+1)*np.arcsin(1/np.sqrt(N)))**2 plt.plot(x, y, label="Theoretical Curve") plt.title("Probability of Success Vs. Number of Oracle-Diffusion Operators") plt.ylabel("Probability of Success") plt.xlabel("Number of Times Oracle and Diffusion Operators are Applied") plt.legend(loc='upper right') plt.show() # + [markdown] id="oSPDCZPCmugO" # The results match! And we actually see that if we can afford to increase the number of times we apply the oracle and diffusion operators to six, we can get an improved probability of success.
docs/tutorials/educators/neutral_atom.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring the voicing of two repeated roman numeral annotations # *If you are asked to harmonize a progression like* # # ``` # I IV | V V | I || # ``` # # *Should you change the voicing of the two* `V` *chords, or can you just repeat the same one*? import music21 from urllib.request import urlopen import re # # Getting the data # The analyzed Bach chorales are located in the [KernScores](http://kern.ccarh.org/) website. # # They can be visualized interactively in the [Verovio Humdrum Viewer](https://verovio.humdrum.org/). # # They can also be scraped with a single query to the Kern Scores server: https://kern.humdrum.org/data?l=BachChoralesAnalyzed # # I used the last approach to collect the data for this experiment. bachChoralesURL = 'https://kern.humdrum.org/data?l=BachChoralesAnalyzed' choralesData = urlopen(bachChoralesURL).read().decode('iso-8859-1') # This is a hack to force music21 into reading the **harm spines choralesData2 = re.sub(r'(\*MM[0-9]+\n)', r"\1*staff1/2/3/4\t*\t*staff1\t*staff2\t*staff3\t*staff4\n", choralesData) # Semicolons confuse the **harm parser choralesData2 = choralesData2.replace(';', '') chorales = re.split(r'!!!!SEGMENT: (chor[0-9]+)\.krn\n', choralesData2) # The first string is empty chorales = chorales[1:] # + out = music21.stream.Stream() sameVoicing = [] for name, chorale in zip(chorales[0::2], chorales[1::2]): print(name) s = music21.converter.parseData(chorale, format='humdrum') rna = {rn.offset: rn for rn in s.flat.getElementsByClass("RomanNumeral")} offsets = list(rna.keys()) pairs = zip(list(rna.values())[:-1], list(rna.values())[1:]) filename = name for idx, pair in enumerate(pairs): rn1, rn2 = pair offs1 = offsets[idx] offs2 = offsets[idx + 1] if rn1.key == rn2.key and rn1.figure == rn2.figure: notes1 = list( s.flat.getElementsByOffset( offs1, mustBeginInSpan=False, classList="Note" ) ) notes1 = [n.pitch.nameWithOctave for n in notes1] notes2 = list( s.flat.getElementsByOffset( offs2, mustBeginInSpan=False, classList="Note" ) ) notes2 = [n.pitch.nameWithOctave for n in notes2] isSameVoicing = notes1 == notes2 sameVoicing.append(isSameVoicing) print( "\t{}\t{}\t{}\t{}\t{}".format( offsets[idx], notes1, offsets[idx + 1], notes2, isSameVoicing, ) ) chord1 = music21.chord.Chord(notes1, quarterLength=2) chord1.style.color = "green" if isSameVoicing else "red" chord1.addLyric(f"{offs1}") chord1.addLyric(filename) chord2 = music21.chord.Chord(notes2, quarterLength=2) chord2.addLyric(f"{offs2}") chord2.style.color = "green" if isSameVoicing else "red" out.append(chord1) out.append(chord2) filename = "" same = sum(sameVoicing) / len(sameVoicing) print( """ Number of consecutive identical roman numerals: {} Same voicing: {:.2f}% Different voicing: {:.2f}% """.format( len(sameVoicing), same * 100.0, (1 - same) * 100.0 ) ) out.insert(0, music21.clef.Treble8vbClef()) # - # In the table above, offsets are given in quarter notes from the beginning of the score (starting from 0.0). # # In all instances, `chord1` and `chord2` are contiguous, express the same chord (i.e., same roman numeral in the same key context), and have been annotated by a human expert. # Show some of the scores out.show() # In the figure above, red-colored pairs represent a change in the voicing between the two contiguous annotations of the same roman numeral. # # Green-colored pairs show examples in which the voicing was identical in the two contiguous annotations. # Generally, when a voice-leading algorithm finds two contiguous instances of the same roman numeral, it should be safe to assume that the voicing should change. For example, with a change of position in the three upper voices and/or changing the octave of the bass.
content/post/repeatedharmonies/.ipynb_checkpoints/index-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Preprocessing # 1. For individual observers, entries for paintings that were never presented are discarded. # 2. All entires are combined into a single [results_combined.csv](results_combined.csv) table. # 3. For all entires with an estimate (_i.e._, Confidence>0), the last estimate is used to compute the relative location of the light source both in polar and in Cartesian coordinate systems. Results of preprocessings are saved to [results_with_estimates.csv](results_with_estimates.csv). # importing prerequisites from ast import literal_eval import os import numpy as np import pandas as pd # ### Loading all observers into a single data frame and saving it to the 'results_combined.csv' # + data_folder= 'Data' files= ['01KSC94w', '02SEF89m', '03SSK93m', '04IKB95w', '05SKL94w', '06MMN92m', '07HHA96w'] results= None for current_file in files: # loading data for an individual observer current_observer= pd.read_csv(os.path.join(data_folder, current_file+".csv"), sep=';', decimal='.', encoding='utf8') # discarding any entry for the painting that was not presented (no Confidence report) current_observer= current_observer[current_observer.Confidence.notnull()] # due to programming error, for some observers trial timestamp column is absent if not 'TrialTimestamp' in current_observer.columns.values: print(current_file) current_observer['TrialTimestamp']= np.nan # merging tables if results is None: results= current_observer else: results= pd.concat([results, current_observer]) # reseting index (multiple index entries were combined during merging) results= results.reset_index(drop=True) # saving results.to_csv(os.path.join('Data', 'results_combined.csv'), sep=';', index_label=False, decimal= '.', index=False, encoding='utf8') # - # ### Extracting an estimate as an angle in polar coordinates and as dx/dy components # + ### computing estimate both in polar and in cartesian coordinate systems results['dx']= np.nan # randomly inverted in ~50% of trials results['true_dx']= np.nan results['dy']= np.nan results['angle']= np.nan # randomly flipped around 0 deg in ~50% of trials results['true_angle']= np.nan for iRow in results.index: if results.Confidence[iRow]>0: # 0 confidence means lack of estimate current_estimate= literal_eval(results.Estimates[iRow]) results.set_value(iRow, 'dx', -(current_estimate[-1]['End'][0]- current_estimate[-1]['Start'][0])) results.set_value(iRow, 'dy', current_estimate[-1]['End'][1]- current_estimate[-1]['Start'][1]) results.set_value(iRow, 'angle', np.arctan2(results.dy[iRow], results.dx[iRow])) if results.FlippedLR[iRow]: results.set_value(iRow, 'true_dx', -results.dx[iRow]) else: results.set_value(iRow, 'true_dx', results.dx[iRow]) results.set_value(iRow, 'true_angle', np.arctan2(results.dy[iRow], results.true_dx[iRow])) ## converting polar angle estimates so that 1) 0 degrees is at 12 o'clock, 2) they are within -pi..pi range, and 3) angles are counted CLOCKWIZE for angle_type in ['angle', 'true_angle']: # 1) 0 degrees is at 12 o'clock results.loc[results.Confidence>0, angle_type]= results.loc[results.Confidence>0, angle_type]-np.pi/2 # 2) estimates are within -pi..pi range results.loc[(results.Confidence>0) & (results[angle_type]<-np.pi), angle_type]= results.loc[(results.Confidence>0) & (results[angle_type]<-np.pi), angle_type]+2*np.pi # 3) angles are counted clockwise results.loc[results.Confidence>0, angle_type]= -results.loc[results.Confidence>0, angle_type] ## adding a degrees version results['angle_deg']= np.degrees(results.angle) results['true_angle_deg']= np.degrees(results.true_angle) # saving results.to_csv(os.path.join('Data', 'results_with_estimates.csv'), sep=';', decimal='.', index_label=False, index=False, encoding='utf8')
0p. Preprocessing - combines observers and extracts estimates.ipynb