markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
https://twitter.com/iitenki_moruten/status/1467683477474930688
# Original: https://github.com/moruten/julia-code/blob/1dafca3e1a4e3b36445c2263e440f6e4056b90aa/2021-12-6-test-no1.ipynb using Plots using Random using Distributions using QuadGK #============関数定義============================================# function action(x) 0.5*x*x end function deriv_action(x) x end function hamiltonian(x,p) action(x) + 0.5*p*p end function HMC_update(x,Nt,dt) #backup x_old = x p = rand(Normal(0,1)) #check H_ini = hamiltonian(x,p) x = molecular_dynamics!(x,p,Nt,dt) H_fin = hamiltonian(x,p) r = rand() ΔH = H_fin-H_ini if r < exp(-ΔH) return x,1 #accept else return x_old,0 end end function molecular_dynamics!(x,p,Nt,dt) force = 0.0 #1/2step x = x + p * 0.5*dt #1~Nt-1 step for j=1:(Nt-1) force = deriv_action(x) p = p - dt * force x = x + dt * p end #Nt step p = p - dt * deriv_action(x) x = x + 0.5 * dt * p return x end #============関数終わり============================================# #============計算=========================================================================# #セットアップ Ntest = 300000 Nt = 20 dt = 1.0/Nt conf_vec = zeros(Ntest) accept_count = 0 ret = 0 x = 0.0 sumxx = 0.0 sumx = 0.0 #計算 for i=1:Ntest x,ret = HMC_update(x,Nt,dt) accept_count += ret conf_vec[i] = x sumx += x sumxx += x*x end println("P(accept) = $(accept_count/Ntest)") println("<x> = $(sumx/Ntest)") println("<xx> = $(sumxx/Ntest)") #=======確認=============================================================================# xr = range(-5,5,length=1000) f1(x) = exp(-0.5*x^2) f2(x) = exp(-x^2) Z1,error1 = quadgk(f1,-Inf,Inf) Z2,error2 = quadgk(f2,-Inf,Inf) g1(x) = f1(x)/Z1 g2(x) = f2(x)/Z2 histogram(conf_vec,norm=:true,label="data") plot!(xr,[g1.(xr) g2.(xr)],lw=3,label=["exp(-0.5x^2)/Z1" "exp(-x^2)/Z2"]) #========================================================================================# # Revised 1 using Plots using Random using Distributions using QuadGK #============関数定義============================================# function action(x) 0.5*x*x end function deriv_action(x) x end function hamiltonian(x,p) action(x) + 0.5*p*p end function HMC_update(x,Nt,dt) #backup x_old = x p = rand(Normal(0,1)) #check H_ini = hamiltonian(x,p) x, p = molecular_dynamics!(x,p,Nt,dt) # <========== ここ H_fin = hamiltonian(x,p) r = rand() ΔH = H_fin-H_ini if r < exp(-ΔH) return x,1 #accept else return x_old,0 end end function molecular_dynamics!(x,p,Nt,dt) force = 0.0 #1/2step x = x + p * 0.5*dt #1~Nt-1 step for j=1:(Nt-1) force = deriv_action(x) p = p - dt * force x = x + dt * p end #Nt step p = p - dt * deriv_action(x) x = x + 0.5 * dt * p return x, p # <========== ここ end #============関数終わり============================================# #============計算=========================================================================# #セットアップ Ntest = 300000 Nt = 20 dt = 1.0/Nt conf_vec = zeros(Ntest) accept_count = 0 ret = 0 x = 0.0 sumxx = 0.0 sumx = 0.0 #計算 for i=1:Ntest x,ret = HMC_update(x,Nt,dt) accept_count += ret conf_vec[i] = x sumx += x sumxx += x*x end println("P(accept) = $(accept_count/Ntest)") println("<x> = $(sumx/Ntest)") println("<xx> = $(sumxx/Ntest)") #=======確認=============================================================================# xr = range(-5,5,length=1000) f1(x) = exp(-0.5*x^2) f2(x) = exp(-x^2) Z1,error1 = quadgk(f1,-Inf,Inf) Z2,error2 = quadgk(f2,-Inf,Inf) g1(x) = f1(x)/Z1 g2(x) = f2(x)/Z2 histogram(conf_vec,norm=:true,label="data") plot!(xr,[g1.(xr) g2.(xr)],lw=3,label=["exp(-0.5x^2)/Z1" "exp(-x^2)/Z2"]) #========================================================================================# # Revised 1 - second test using Plots using Random using Distributions using QuadGK #============関数定義============================================# function action(x) 3(x^2 - 1)^2 end function deriv_action(x) 6x*(x^2 - 1) end function hamiltonian(x,p) action(x) + 0.5*p*p end function HMC_update(x,Nt,dt) #backup x_old = x p = rand(Normal(0,1)) #check H_ini = hamiltonian(x,p) x, p = molecular_dynamics!(x,p,Nt,dt) # <========== ここ H_fin = hamiltonian(x,p) r = rand() ΔH = H_fin-H_ini if r < exp(-ΔH) return x,1 #accept else return x_old,0 end end function molecular_dynamics!(x,p,Nt,dt) force = 0.0 #1/2step x = x + p * 0.5*dt #1~Nt-1 step for j=1:(Nt-1) force = deriv_action(x) p = p - dt * force x = x + dt * p end #Nt step p = p - dt * deriv_action(x) x = x + 0.5 * dt * p return x, p # <========== ここ end #============関数終わり============================================# #============計算=========================================================================# #セットアップ Ntest = 300000 Nt = 20 dt = 1.0/Nt conf_vec = zeros(Ntest) accept_count = 0 ret = 0 x = 0.0 sumxx = 0.0 sumx = 0.0 #計算 for i=1:Ntest x,ret = HMC_update(x,Nt,dt) accept_count += ret conf_vec[i] = x sumx += x sumxx += x*x end println("P(accept) = $(accept_count/Ntest)") println("<x> = $(sumx/Ntest)") println("<xx> = $(sumxx/Ntest)") #=======確認=============================================================================# xr = range(-2,2,length=1000) f(x) = exp(-action(x)) Z,error1 = quadgk(f,-Inf,Inf) g(x) = f(x)/Z histogram(conf_vec,norm=:true,label="data") plot!(xr,g.(xr),lw=3,label="exp(-action(x))/Z", legend=:outertop) #========================================================================================# # Revised 2 using Plots using Random using Distributions using QuadGK #============関数定義============================================# function action(x) x^2/2 end function deriv_action(x) x end function hamiltonian(x,p) action(x) + 0.5*p*p end function HMC_update(x,Nt,dt) #backup x_old = x p = rand(Normal(0,1)) #check H_ini = hamiltonian(x,p) x, p = molecular_dynamics!(x,p,Nt,dt) # <========== ここ H_fin = hamiltonian(x,p) r = rand() ΔH = H_fin-H_ini if r < exp(-ΔH) return x,1 #accept else return x_old,0 end end function molecular_dynamics!(x,p,Nt,dt) p -= deriv_action(x) * dt/2 x += p * dt for j in 2:Nt p -= deriv_action(x) * dt x += p * dt end p -= deriv_action(x) * dt/2 return x, p # <========== ここ end #============関数終わり============================================# #============計算=========================================================================# #セットアップ Ntest = 300000 Nt = 20 dt = 1.0/Nt conf_vec = zeros(Ntest) accept_count = 0 ret = 0 x = 0.0 sumxx = 0.0 sumx = 0.0 #計算 for i=1:Ntest x,ret = HMC_update(x,Nt,dt) accept_count += ret conf_vec[i] = x sumx += x sumxx += x*x end println("P(accept) = $(accept_count/Ntest)") println("<x> = $(sumx/Ntest)") println("<xx> = $(sumxx/Ntest)") #=======確認=============================================================================# xr = range(-5,5,length=1000) f(x) = exp(-action(x)) Z,error1 = quadgk(f,-Inf,Inf) g(x) = f(x)/Z histogram(conf_vec,norm=:true,label="data",alpha=0.3,bin=100) plot!(xr,g.(xr),lw=3,label="exp(-action(x))/Z", legend=:outertop) #========================================================================================# # Revised 2 - second test using Plots using Random using Distributions using QuadGK #============関数定義============================================# function action(x) 3(x^2 - 1)^2 end function deriv_action(x) 6x*(x^2 - 1) end function hamiltonian(x,p) action(x) + 0.5*p*p end function HMC_update(x,Nt,dt) #backup x_old = x p = rand(Normal(0,1)) #check H_ini = hamiltonian(x,p) x, p = molecular_dynamics!(x,p,Nt,dt) # <========== ここ H_fin = hamiltonian(x,p) r = rand() ΔH = H_fin-H_ini if r < exp(-ΔH) return x,1 #accept else return x_old,0 end end function molecular_dynamics!(x,p,Nt,dt) p -= deriv_action(x) * dt/2 x += p * dt for j in 2:Nt p -= deriv_action(x) * dt x += p * dt end p -= deriv_action(x) * dt/2 return x, p # <========== ここ end #============関数終わり============================================# #============計算=========================================================================# #セットアップ Ntest = 300000 Nt = 20 dt = 1.0/Nt conf_vec = zeros(Ntest) accept_count = 0 ret = 0 x = 0.0 sumxx = 0.0 sumx = 0.0 #計算 for i=1:Ntest x,ret = HMC_update(x,Nt,dt) accept_count += ret conf_vec[i] = x sumx += x sumxx += x*x end println("P(accept) = $(accept_count/Ntest)") println("<x> = $(sumx/Ntest)") println("<xx> = $(sumxx/Ntest)") #=======確認=============================================================================# xr = range(-2,2,length=1000) f(x) = exp(-action(x)) Z,error1 = quadgk(f,-Inf,Inf) g(x) = f(x)/Z histogram(conf_vec,norm=:true,label="data",alpha=0.3,bin=100) plot!(xr,g.(xr),lw=3,label="exp(-action(x))/Z", legend=:outertop) #========================================================================================#
P(accept) = 0.8680966666666666 <x> = -0.004413428035654437 <xx> = 0.8885940396252682
MIT
0025/HMC revised.ipynb
genkuroki/public
Analysis - exp51- DQN with a conv net. First tuning attempt.
import os import csv import numpy as np import torch as th from glob import glob from pprint import pprint import matplotlib import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' import seaborn as sns sns.set(font_scale=1.5) sns.set_style('ticks') matplotlib.rcParams.update({'font.size': 16}) matplotlib.rc('axes', titlesize=16) from notebook_helpers import load_params from notebook_helpers import load_monitored from notebook_helpers import join_monitored from notebook_helpers import score_summary def load_data(path, run_index=(0, 20)): runs = range(run_index[0], run_index[1]+1) exps = [] for r in runs: file = os.path.join(path, "run_{}_monitor.csv".format(int(r))) try: mon = load_monitored(file) except FileNotFoundError: mon = None exps.append(mon) return exps
_____no_output_____
MIT
notebooks/wythoff_exp51.ipynb
CoAxLab/azad
Load data
path = "/Users/qualia/Code/azad/data/wythoff/exp51/" exp_51 = load_data(path, run_index=(0, 99)) print(len(exp_51)) pprint(exp_51[1].keys()) pprint(exp_51[1]['score'][:20])
dict_keys(['file', 'episode', 'loss', 'score']) [0.0851063829787234, 0.07476635514018691, 0.07692307692307693, 0.06923076923076923, 0.06896551724137931, 0.06535947712418301, 0.07317073170731707, 0.06976744186046512, 0.06593406593406594, 0.06030150753768844, 0.06944444444444445, 0.07234042553191489, 0.06938775510204082, 0.06640625, 0.06439393939393939, 0.06593406593406594, 0.06382978723404255, 0.06688963210702341, 0.0664451827242525, 0.0673076923076923]
MIT
notebooks/wythoff_exp51.ipynb
CoAxLab/azad
Plots All parameter summaryHow's it look overall. Timecourse
plt.figure(figsize=(6, 3)) for r, mon in enumerate(exp_51): if mon is not None: _ = plt.plot(mon['episode'], mon['score'], color='black') _ = plt.ylim(0, 1) _ = plt.ylabel("Optimal score") _ = plt.tight_layout() _ = plt.xlabel("Episode")
_____no_output_____
MIT
notebooks/wythoff_exp51.ipynb
CoAxLab/azad
Histograms of final values
data = [] plt.figure(figsize=(6, 3)) for r, mon in enumerate(exp_51): if mon is not None: data.append(np.max(mon['score'])) _ = plt.hist(data, bins=5, range=(0,1), color='black') _ = plt.xlabel("Max score") _ = plt.ylabel("Count") _ = plt.tight_layout() data = [] plt.figure(figsize=(6, 3)) for r, mon in enumerate(exp_51): if mon is not None: data.append(np.mean(mon['score'])) _ = plt.hist(data, bins=5, range=(0,1), color='black') _ = plt.xlabel("Mean score") _ = plt.ylabel("Count") _ = plt.tight_layout()
_____no_output_____
MIT
notebooks/wythoff_exp51.ipynb
CoAxLab/azad
03: Generate countsThis script takes a directory of `.csv` files containing entity counts by month in the following format:```csv,2012-01,2012-02meat,1011.0,873.0salt,805.0,897.0chicken,694.0,713.0```It sums the counts from all files, only keeps the `N` most common records and calculates the variance, scaled by the average. This helps select a more "interesting" subset of entities with the most variance over time. The result are the most variant entities (minus the most frequent, which tend to be less interesting). The result can be used to create an interactive [bar chart race visualization](https://public.flourish.studio/visualisation/1532208/).
INPUT_DIR = "./counts" # directory of counts file(s) created in the previous step OUTPUT_FILE = "./output_counts.csv" # path to output file MOST_COMMON = 10_000 # number of most common entities to keep DROP_MOST_FREQUENT = 10 # number of most frequent entities to drop N_TOTAL = 50 # number of results to export !pip install pandas import csv from collections import Counter, defaultdict from pathlib import Path import pandas as pd def read_csv(file_): counts = Counter() for row in csv.DictReader(file_): term = row[""] for year, freq in row.items(): if year != "" and freq: counts[(term, year)] = int(float(freq)) return counts def prune_rows(counts_by_term, n): totals = Counter() for term, counts in counts_by_term.items(): if "Total" in counts: total = counts["Total"] else: total = sum(counts.values()) totals[term] = total pruned = defaultdict(dict) for term, _ in totals.most_common(n): pruned[term] = counts_by_term[term] return pruned def sum_counts(directory, n=10000): directory = Path(directory) counts = Counter() for path in directory.glob("**/*.csv"): with path.open("r", encoding="utf8") as file_: counts.update(read_csv(file_)) by_term = defaultdict(Counter) for (term, month), freq in counts.items(): by_term[term][month] = freq records = prune_rows(by_term, n) months = set() for term, counts in records.items(): months.update(counts.keys()) fields = ["Term"] + list(sorted(months)) rows = [] for term, month_freqs in records.items(): month_freqs["Term"] = term for month in months: month_freqs.setdefault(month, 0.0) rows.append(month_freqs) return pd.DataFrame.from_records(rows, index="Term", columns=fields) def sort_by_frequency(df): most_common = df.sum(axis=1) most_common.sort_values(ascending=False, inplace=True) return df.loc[most_common.index] def drop_most_frequent(df, n): return sort_by_frequency(df)[n:] def get_most_variant(df, n, mean_weight=False): cvars = df.var(axis=1) if mean_weight: cvars = cvars / df.mean(axis=1) cvars = cvars.sort_values(ascending=False) return df.loc[cvars.index][:n] DF = sum_counts(INPUT_DIR, MOST_COMMON) DF SUBSET = drop_most_frequent(DF, DROP_MOST_FREQUENT) SUBSET = get_most_variant(SUBSET, N_TOTAL, mean_weight=True)[:200] SUBSET = sort_by_frequency(SUBSET) SUBSET = SUBSET.cumsum(axis=1) SUBSET SUBSET.to_csv(OUTPUT_FILE)
_____no_output_____
MIT
ner-food-ingredients/03_Generate_counts.ipynb
hertelm/projects
Downloads the odc-colab Python module and runs it to setup ODC.
!wget -nc https://raw.githubusercontent.com/ceos-seo/odc-colab/master/odc_colab.py from odc_colab import odc_colab_init odc_colab_init(install_odc_gee=True)
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Downloads an existing index and populates the new ODC environment with it.
from odc_colab import populate_db populate_db()
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Mission CoincidencesThis notebook finds concident acquisition regions for three missions: Landsat-8, Sentinel-2 and Sentinel-1. Each of these missions has a different orbit and revisit rates, so coincident pairs (two missions at the same location and day) are not that common and coincident triplets (all 3 missions at the same location and day) are extremely rare. These coincidences are quite valuable for comparing datasets for calibration and validation purposes or for providing viable locations for a combined product analysis. Load Data Cube Configuration and Import Utilities
# Load Data Cube Configuration from odc_gee import earthengine dc = earthengine.Datacube(app='Mission_Coincidences') # Import Utilities from IPython.display import display_html from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask import matplotlib.pyplot as plt import numpy as np import pandas as pd import sys import xarray as xr
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Create new functions to display data output and find coincidences * `display_side_by_side`: A method [found here](https://stackoverflow.com/a/44923103) for displaying Pandas DataFrames next to each other in one output.* `find_coincidences`: A helper method to return the various intersection of dates for the three products.* `s1_rgb`: Generates an RGB image from a Sentinel-1 dataset.
def display_side_by_side(*args, index=True): html_str='' for df in args: if index: html_str+=df.to_html() else: html_str+=df.to_html(index=False) display_html(html_str.replace('table','table style="display:inline"'),raw=True) def find_coincidence(ls_index, s1_index, s2_index): return {'LS⋂S2': ls_index.intersection(s2_index).values, 'LS⋂S1': ls_index.intersection(s1_index).values, 'S2⋂S1': s2_index.intersection(s1_index).values, 'LS⋂S2⋂S1': ls_index.intersection(s2_index).intersection(s1_index).values} def s1_rgb(ds, rrange=(-25, 0), grange=(-30,-5), brange=(0,15)): r = ds.vv g = ds.vh b = r - g # Clip the data to remove extreme outliers r = np.clip(r, rrange[0], rrange[1]) g = np.clip(g, grange[0], grange[1]) b = np.clip(b, brange[0], brange[1]) # Normalize the data to improve colors r = (r-r.min())/(r.max()-r.min()) g = (g-g.min())/(g.max()-g.min()) b = (b-b.min())/(b.max()-b.min()) # Name the bands r.name = 'vv' g.name = 'vh' b.name = 'vv/vh' return xr.merge((r,g,b))
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Analysis parameters* `latitude`: The latitude extents for the analysis area.* `longitude`: The longitude extents for the analysis area.* `time`: The time window for the analysis (Year-Month)
# MODIFY HERE # Select the center of an analysis region (lat_long) # Adjust the surrounding box size (box_size) around the center (in degrees) # Remove the comment tags (#) below to change the sample location # Barekese Dam, Ghana, Africa lat_long = (6.846, -1.709) box_size_deg = 0.05 # Calculate the latitude and longitude bounds of the analysis box latitude = (lat_long[0]-box_size_deg/2, lat_long[0]+box_size_deg/2) longitude = (lat_long[1]-box_size_deg/2, lat_long[1]+box_size_deg/2) time = ('2019-1', '2019-12') # The code below renders a map that can be used to view the region. from utils.data_cube_utilities.dc_display_map import display_map display_map(latitude,longitude)
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Load partial datasetsLoad only the dates, coordinate, and scene classification values if available for determining cloud coverage.
# Define the product details to load in the next code block platforms = {'LANDSAT_8': dict(product=f'ls8_google',latitude=latitude,longitude=longitude), 'SENTINEL-1': dict(product=f's1_google',group_by='solar_day'), 'SENTINEL-2': dict(product=f's2_google',group_by='solar_day')} # Load Landsat 8 data including times and pixel_qa (cloud cover) ls_dataset = dc.load(measurements=['pixel_qa'], time=time, **platforms['LANDSAT_8']) # Load Sentinel-2 data including times and scl (cloud cover) s2_dataset = dc.load(like=ls_dataset, measurements=['scl'], time=time, **platforms['SENTINEL-2']) # Load Basic Sentinel-1 data with only time slice details s1_dataset = dc.load(like=ls_dataset, measurements=[], time=time, **platforms['SENTINEL-1'])
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Cloud MaskingCreate cloud masks for the optical data (Landsat-8 and Sentinel-2)
ls_clean_mask = landsat_qa_clean_mask(ls_dataset, platform='LANDSAT_8') s2_clean_mask = (s2_dataset.scl != 0) & (s2_dataset.scl != 1) & \ (s2_dataset.scl != 3) & (s2_dataset.scl != 8) & \ (s2_dataset.scl != 9) & (s2_dataset.scl != 10)
/content/utils/data_cube_utilities/clean_mask.py:278: UserWarning: Please specify a value for `collection`. Assuming data is collection 1. warnings.warn('Please specify a value for `collection`. Assuming data is collection 1.') /content/utils/data_cube_utilities/clean_mask.py:283: UserWarning: Please specify a value for `level`. Assuming data is level 2. warnings.warn('Please specify a value for `level`. Assuming data is level 2.')
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Display a table of scenesFilter optical data by cloud cover
# MODIFY HERE # Percent of clean pixels in the optical images. # The default is 80% which will yield mostly clear scenes percent_clean = 80 # Display the dates and cloud information for the available scenes ls_df = pd.DataFrame(list(zip(ls_dataset.time.values.astype('datetime64[D]'), [round(mask.mean().item()*100, 2) for mask in ls_clean_mask], [mask.sum().item() for mask in ls_clean_mask])), columns=['Landsat 8 Date', 'clean_pixel_percent', 'clean_pixel_count'])\ .query(f'clean_pixel_percent >= {percent_clean}') s2_df = pd.DataFrame(list(zip(s2_dataset.time.values.astype('datetime64[D]'), [round(mask.mean().item()*100, 2) for mask in s2_clean_mask], [mask.sum().item() for mask in s2_clean_mask])), columns=['Sentinel-2 Date', 'clean_pixel_percent', 'clean_pixel_count'])\ .query(f'clean_pixel_percent >= {percent_clean}') s1_df = pd.DataFrame(list(s1_dataset.time.values.astype('datetime64[D]')), columns=['Sentinel-1 Date']) display_side_by_side(ls_df, s2_df, s1_df)
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
CoincidencesFind the coincidence dates for the datasets using the filtered data from the previous section.
ls_index = pd.Index(ls_df['Landsat 8 Date'].values) s2_index = pd.Index(s2_df['Sentinel-2 Date'].values) s1_index = pd.Index(s1_df['Sentinel-1 Date'].values) # List the double and triple coincidences args = [pd.DataFrame(val, columns=[key]) for key, val in find_coincidence(ls_index, s1_index, s2_index).items()] display_side_by_side(*args, index=False)
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Plot a single time selection to view the scene detailsSelect and plot a time from the coincidence results listed above.
# MODIFY HERE # Select a time from the table above. time_selection = '2019-01-22' # Define the plotting bands for each image on the specified date s1 = s2 = ls = None if ls_dataset.time.dt.floor('D').isin(np.datetime64(time_selection)).sum(): ls = dc.load(measurements=['red', 'green', 'blue'], time=time_selection, **platforms['LANDSAT_8']) if s2_dataset.time.dt.floor('D').isin(np.datetime64(time_selection)).sum(): s2 = dc.load(like=ls_dataset, measurements=['red', 'green', 'blue'], time=time_selection, **platforms['SENTINEL-2']) if s1_dataset.time.dt.floor('D').isin(np.datetime64(time_selection)).sum(): s1 = dc.load(like=ls_dataset, measurements=['vv', 'vh'], time=time_selection, **platforms['SENTINEL-1']) # Plot sample images for the specified date. # Based on the selected date, there will be either 2 or 3 images shown below. fig, ax = plt.subplots(2, 2, figsize=(ls_dataset.longitude.size/ls_dataset.latitude.size*16,16)) if ls: ls.isel(time=0).to_array().plot.imshow(ax=ax[0][0], vmin=0, vmax=2000) ax[0][0].set_title('Landsat 8') ax[0][0].xaxis.set_visible(False), ax[0][0].yaxis.set_visible(False) if s2: s2.isel(time=0).to_array().plot.imshow(ax=ax[0][1], vmin=0, vmax=2000) ax[0][1].set_title('Sentinel-2') ax[0][1].xaxis.set_visible(False), ax[0][1].yaxis.set_visible(False) if s1: s1_rgb(s1.isel(time=0)).to_array().plot.imshow(ax=ax[1][0]) ax[1][0].set_title('Sentinel-1') ax[1][0].xaxis.set_visible(False), ax[1][0].yaxis.set_visible(False) ax[1][1].axis('off');
_____no_output_____
Apache-2.0
notebooks/02.09.Colab_Mission_Coincidences.ipynb
gamedaygeorge/odc-colab
Discretization---In this notebook, you will deal with continuous state and action spaces by discretizing them. This will enable you to apply reinforcement learning algorithms that are only designed to work with discrete spaces. 1. Import the Necessary Packages
import sys import gym import numpy as np import pandas as pd import matplotlib.pyplot as plt # Set plotting options %matplotlib inline plt.style.use('ggplot') np.set_printoptions(precision=3, linewidth=120)
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
2. Specify the Environment, and Explore the State and Action SpacesWe'll use [OpenAI Gym](https://gym.openai.com/) environments to test and develop our algorithms. These simulate a variety of classic as well as contemporary reinforcement learning tasks. Let's use an environment that has a continuous state space, but a discrete action space.
# Create an environment and set random seed env = gym.make('MountainCar-v0') env.seed(505);
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
Run the next code cell to watch a random agent.
state = env.reset() score = 0 for t in range(200): action = env.action_space.sample() env.render() state, reward, done, _ = env.step(action) score += reward if done: break print('Final score:', score) env.close()
Final score: -200.0
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
In this notebook, you will train an agent to perform much better! For now, we can explore the state and action spaces, as well as sample them.
# Explore state (observation) space print("State space:", env.observation_space) print("- low:", env.observation_space.low) print("- high:", env.observation_space.high) # Generate some samples from the state space print("State space samples:") print(np.array([env.observation_space.sample() for i in range(10)])) # Explore the action space print("Action space:", env.action_space) # Generate some samples from the action space print("Action space samples:") print(np.array([env.action_space.sample() for i in range(10)]))
Action space: Discrete(3) Action space samples: [1 0 1 2 0 2 0 1 1 2]
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
3. Discretize the State Space with a Uniform GridWe will discretize the space using a uniformly-spaced grid. Implement the following function to create such a grid, given the lower bounds (`low`), upper bounds (`high`), and number of desired `bins` along each dimension. It should return the split points for each dimension, which will be 1 less than the number of bins.For instance, if `low = [-1.0, -5.0]`, `high = [1.0, 5.0]`, and `bins = (10, 10)`, then your function should return the following list of 2 NumPy arrays:```[array([-0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8]), array([-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0])]```Note that the ends of `low` and `high` are **not** included in these split points. It is assumed that any value below the lowest split point maps to index `0` and any value above the highest split point maps to index `n-1`, where `n` is the number of bins along that dimension.
def create_uniform_grid(low, high, bins=(10, 10)): """Define a uniformly-spaced grid that can be used to discretize a space. Parameters ---------- low : array_like Lower bounds for each dimension of the continuous space. high : array_like Upper bounds for each dimension of the continuous space. bins : tuple Number of bins along each corresponding dimension. Returns ------- grid : list of array_like A list of arrays containing split points for each dimension. """ # TODO: Implement this grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1] for dim in range(len(bins))] print("Uniform grid: [<low>, <high>] / <bins> => <splits>") for l, h, b, splits in zip(low, high, bins, grid): print(" [{}, {}] / {} => {}".format(l, h, b, splits)) return grid low = [-1.0, -5.0] high = [1.0, 5.0] create_uniform_grid(low, high) # [test]
Uniform grid: [<low>, <high>] / <bins> => <splits> [-1.0, 1.0] / 10 => [-0.8 -0.6 -0.4 -0.2 0. 0.2 0.4 0.6 0.8] [-5.0, 5.0] / 10 => [-4. -3. -2. -1. 0. 1. 2. 3. 4.]
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
Now write a function that can convert samples from a continuous space into its equivalent discretized representation, given a grid like the one you created above. You can use the [`numpy.digitize()`](https://docs.scipy.org/doc/numpy-1.9.3/reference/generated/numpy.digitize.html) function for this purpose.Assume the grid is a list of NumPy arrays containing the following split points:```[array([-0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8]), array([-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0])]```Here are some potential samples and their corresponding discretized representations:```[-1.0 , -5.0] => [0, 0][-0.81, -4.1] => [0, 0][-0.8 , -4.0] => [1, 1][-0.5 , 0.0] => [2, 5][ 0.2 , -1.9] => [6, 3][ 0.8 , 4.0] => [9, 9][ 0.81, 4.1] => [9, 9][ 1.0 , 5.0] => [9, 9]```**Note**: There may be one-off differences in binning due to floating-point inaccuracies when samples are close to grid boundaries, but that is alright.
def discretize(sample, grid): """Discretize a sample as per given grid. Parameters ---------- sample : array_like A single sample from the (original) continuous space. grid : list of array_like A list of arrays containing split points for each dimension. Returns ------- discretized_sample : array_like A sequence of integers with the same number of dimensions as sample. """ # TODO: Implement this return list(int(np.digitize(s, g)) for s, g in zip(sample, grid)) # apply along each dimension # Test with a simple grid and some samples grid = create_uniform_grid([-1.0, -5.0], [1.0, 5.0]) samples = np.array( [[-1.0 , -5.0], [-0.81, -4.1], [-0.8 , -4.0], [-0.5 , 0.0], [ 0.2 , -1.9], [ 0.8 , 4.0], [ 0.81, 4.1], [ 1.0 , 5.0]]) discretized_samples = np.array([discretize(sample, grid) for sample in samples]) print("\nSamples:", repr(samples), sep="\n") print("\nDiscretized samples:", repr(discretized_samples), sep="\n")
Uniform grid: [<low>, <high>] / <bins> => <splits> [-1.0, 1.0] / 10 => [-0.8 -0.6 -0.4 -0.2 0. 0.2 0.4 0.6 0.8] [-5.0, 5.0] / 10 => [-4. -3. -2. -1. 0. 1. 2. 3. 4.] Samples: array([[-1. , -5. ], [-0.81, -4.1 ], [-0.8 , -4. ], [-0.5 , 0. ], [ 0.2 , -1.9 ], [ 0.8 , 4. ], [ 0.81, 4.1 ], [ 1. , 5. ]]) Discretized samples: array([[0, 0], [0, 0], [1, 1], [2, 5], [5, 3], [9, 9], [9, 9], [9, 9]])
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
4. VisualizationIt might be helpful to visualize the original and discretized samples to get a sense of how much error you are introducing.
import matplotlib.collections as mc def visualize_samples(samples, discretized_samples, grid, low=None, high=None): """Visualize original and discretized samples on a given 2-dimensional grid.""" fig, ax = plt.subplots(figsize=(10, 10)) # Show grid ax.xaxis.set_major_locator(plt.FixedLocator(grid[0])) ax.yaxis.set_major_locator(plt.FixedLocator(grid[1])) ax.grid(True) # If bounds (low, high) are specified, use them to set axis limits if low is not None and high is not None: ax.set_xlim(low[0], high[0]) ax.set_ylim(low[1], high[1]) else: # Otherwise use first, last grid locations as low, high (for further mapping discretized samples) low = [splits[0] for splits in grid] high = [splits[-1] for splits in grid] # Map each discretized sample (which is really an index) to the center of corresponding grid cell grid_extended = np.hstack((np.array([low]).T, grid, np.array([high]).T)) # add low and high ends grid_centers = (grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 # compute center of each grid cell locs = np.stack(grid_centers[i, discretized_samples[:, i]] for i in range(len(grid))).T # map discretized samples ax.plot(samples[:, 0], samples[:, 1], 'o') # plot original samples ax.plot(locs[:, 0], locs[:, 1], 's') # plot discretized samples in mapped locations ax.add_collection(mc.LineCollection(list(zip(samples, locs)), colors='orange')) # add a line connecting each original-discretized sample ax.legend(['original', 'discretized']) visualize_samples(samples, discretized_samples, grid, low, high)
/usr/local/Cellar/jupyterlab/1.2.4/libexec/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3319: FutureWarning: arrays to stack must be passed as a "sequence" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future. exec(code_obj, self.user_global_ns, self.user_ns)
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
Now that we have a way to discretize a state space, let's apply it to our reinforcement learning environment.
# Create a grid to discretize the state space state_grid = create_uniform_grid(env.observation_space.low, env.observation_space.high, bins=(10, 10)) state_grid # Obtain some samples from the space, discretize them, and then visualize them state_samples = np.array([env.observation_space.sample() for i in range(10)]) discretized_state_samples = np.array([discretize(sample, state_grid) for sample in state_samples]) visualize_samples(state_samples, discretized_state_samples, state_grid, env.observation_space.low, env.observation_space.high) plt.xlabel('position'); plt.ylabel('velocity'); # axis labels for MountainCar-v0 state space
/usr/local/Cellar/jupyterlab/1.2.4/libexec/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3319: FutureWarning: arrays to stack must be passed as a "sequence" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future. exec(code_obj, self.user_global_ns, self.user_ns)
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
You might notice that if you have enough bins, the discretization doesn't introduce too much error into your representation. So we may be able to now apply a reinforcement learning algorithm (like Q-Learning) that operates on discrete spaces. Give it a shot to see how well it works! 5. Q-LearningProvided below is a simple Q-Learning agent. Implement the `preprocess_state()` method to convert each continuous state sample to its corresponding discretized representation.
class QLearningAgent: """Q-Learning agent that can act on a continuous state space by discretizing it.""" def __init__(self, env, state_grid, alpha=0.02, gamma=0.99, epsilon=1.0, epsilon_decay_rate=0.9995, min_epsilon=.01, seed=505): """Initialize variables, create grid for discretization.""" # Environment info self.env = env self.state_grid = state_grid self.state_size = tuple(len(splits) + 1 for splits in self.state_grid) # n-dimensional state space self.action_size = self.env.action_space.n # 1-dimensional discrete action space self.seed = np.random.seed(seed) print("Environment:", self.env) print("State space size:", self.state_size) print("Action space size:", self.action_size) # Learning parameters self.alpha = alpha # learning rate self.gamma = gamma # discount factor self.epsilon = self.initial_epsilon = epsilon # initial exploration rate self.epsilon_decay_rate = epsilon_decay_rate # how quickly should we decrease epsilon self.min_epsilon = min_epsilon # Create Q-table self.q_table = np.zeros(shape=(self.state_size + (self.action_size,))) print("Q table size:", self.q_table.shape) def preprocess_state(self, state): """Map a continuous state to its discretized representation.""" # TODO: Implement this return tuple(discretize(state, self.state_grid)) def reset_episode(self, state): """Reset variables for a new episode.""" # Gradually decrease exploration rate self.epsilon *= self.epsilon_decay_rate self.epsilon = max(self.epsilon, self.min_epsilon) # Decide initial action self.last_state = self.preprocess_state(state) self.last_action = np.argmax(self.q_table[self.last_state]) return self.last_action def reset_exploration(self, epsilon=None): """Reset exploration rate used when training.""" self.epsilon = epsilon if epsilon is not None else self.initial_epsilon def act(self, state, reward=None, done=None, mode='train'): """Pick next action and update internal Q table (when mode != 'test').""" state = self.preprocess_state(state) if mode == 'test': # Test mode: Simply produce an action action = np.argmax(self.q_table[state]) else: # Train mode (default): Update Q table, pick next action # Note: We update the Q table entry for the *last* (state, action) pair with current state, reward self.q_table[self.last_state + (self.last_action,)] += self.alpha * \ (reward + self.gamma * max(self.q_table[state]) - self.q_table[self.last_state + (self.last_action,)]) # Exploration vs. exploitation do_exploration = np.random.uniform(0, 1) < self.epsilon if do_exploration: # Pick a random action action = np.random.randint(0, self.action_size) else: # Pick the best action from Q table action = np.argmax(self.q_table[state]) # Roll over current state, action for next step self.last_state = state self.last_action = action return action q_agent = QLearningAgent(env, state_grid)
Environment: <TimeLimit<MountainCarEnv<MountainCar-v0>>> State space size: (10, 10) Action space size: 3 Q table size: (10, 10, 3)
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
Let's also define a convenience function to run an agent on a given environment. When calling this function, you can pass in `mode='test'` to tell the agent not to learn.
def run(agent, env, num_episodes=20000, mode='train'): """Run agent in given reinforcement learning environment and return scores.""" scores = [] max_avg_score = -np.inf for i_episode in range(1, num_episodes+1): # Initialize episode state = env.reset() action = agent.reset_episode(state) total_reward = 0 done = False # Roll out steps until done while not done: state, reward, done, info = env.step(action) total_reward += reward action = agent.act(state, reward, done, mode) # Save final score scores.append(total_reward) # Print episode stats if mode == 'train': if len(scores) > 100: avg_score = np.mean(scores[-100:]) if avg_score > max_avg_score: max_avg_score = avg_score if i_episode % 100 == 0: print("\rEpisode {}/{} | Max Average Score: {}".format(i_episode, num_episodes, max_avg_score), end="") sys.stdout.flush() return scores scores = run(q_agent, env)
Episode 13900/20000 | Max Average Score: -137.36
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
The best way to analyze if your agent was learning the task is to plot the scores. It should generally increase as the agent goes through more episodes.
# Plot scores obtained per episode plt.plot(scores); plt.title("Scores");
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
If the scores are noisy, it might be difficult to tell whether your agent is actually learning. To find the underlying trend, you may want to plot a rolling mean of the scores. Let's write a convenience function to plot both raw scores as well as a rolling mean.
def plot_scores(scores, rolling_window=100): """Plot scores and optional rolling mean using specified window.""" plt.plot(scores); plt.title("Scores"); rolling_mean = pd.Series(scores).rolling(rolling_window).mean() plt.plot(rolling_mean); return rolling_mean rolling_mean = plot_scores(scores)
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
You should observe the mean episode scores go up over time. Next, you can freeze learning and run the agent in test mode to see how well it performs.
# Run in test mode and analyze scores obtained test_scores = run(q_agent, env, num_episodes=100, mode='test') print("[TEST] Completed {} episodes with avg. score = {}".format(len(test_scores), np.mean(test_scores))) _ = plot_scores(test_scores)
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
It's also interesting to look at the final Q-table that is learned by the agent. Note that the Q-table is of size MxNxA, where (M, N) is the size of the state space, and A is the size of the action space. We are interested in the maximum Q-value for each state, and the corresponding (best) action associated with that value.
def plot_q_table(q_table): """Visualize max Q-value for each state and corresponding action.""" q_image = np.max(q_table, axis=2) # max Q-value for each state q_actions = np.argmax(q_table, axis=2) # best action for each state fig, ax = plt.subplots(figsize=(10, 10)) cax = ax.imshow(q_image, cmap='jet'); cbar = fig.colorbar(cax) for x in range(q_image.shape[0]): for y in range(q_image.shape[1]): ax.text(x, y, q_actions[x, y], color='white', horizontalalignment='center', verticalalignment='center') ax.grid(False) ax.set_title("Q-table, size: {}".format(q_table.shape)) ax.set_xlabel('position') ax.set_ylabel('velocity') plot_q_table(q_agent.q_table)
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
6. Modify the GridNow it's your turn to play with the grid definition and see what gives you optimal results. Your agent's final performance is likely to get better if you use a finer grid, with more bins per dimension, at the cost of higher model complexity (more parameters to learn).
# TODO: Create a new agent with a different state space grid state_grid_new = create_uniform_grid(env.observation_space.low, env.observation_space.high, bins=(20, 20)) q_agent_new = QLearningAgent(env, state_grid_new) q_agent_new.scores = [] # initialize a list to store scores for this agent # Train it over a desired number of episodes and analyze scores # Note: This cell can be run multiple times, and scores will get accumulated q_agent_new.scores += run(q_agent_new, env, num_episodes=50000) # accumulate scores rolling_mean_new = plot_scores(q_agent_new.scores) # Run in test mode and analyze scores obtained test_scores = run(q_agent_new, env, num_episodes=100, mode='test') print("[TEST] Completed {} episodes with avg. score = {}".format(len(test_scores), np.mean(test_scores))) _ = plot_scores(test_scores) # Visualize the learned Q-table plot_q_table(q_agent_new.q_table)
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
7. Watch a Smart Agent
state = env.reset() score = 0 for t in range(200): action = q_agent_new.act(state, mode='test') env.render() state, reward, done, _ = env.step(action) score += reward if done: break print('Final score:', score) env.close()
_____no_output_____
MIT
discretization/Discretization_Solution.ipynb
Jeromeschmidt/Udacity-Deep-Reinforcement-Nanodegree
Lecture 08: Basic data analysis [Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2021)[](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2021/master?urlpath=lab/tree/08/Basic_data_analysis.ipynb) 1. [Combining datasets (merging and concatenating)](Combining-datasets-(merging-and-concatenating))2. [Fetching data using an API](Fetching-data-using-an-API)3. [Split-apply-combine](Split-apply-combine)4. [Summary](Summary)
import numpy as np import pandas as pd import datetime import pandas_datareader # install with `pip install pandas-datareader` import pydst # install with `pip install git+https://github.com/elben10/pydst` import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') from matplotlib_venn import venn2 # `pip install matplotlib-venn`
C:\Users\gmf123\Anaconda3\envs\new\lib\site-packages\pandas_datareader\compat\__init__.py:7: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. from pandas.util.testing import assert_frame_equal
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
1. Combining datasets (merging and concatenating) When **combining datasets** there are a few crucial concepts: 1. **Concatenate (append)**: "stack" rows (observations) on top of each other. This works if the datasets have the same columns (variables).2. **Merge**: the two datasets have different variables, but may or may not have the same observations. There are **different kinds of merges** depending on which observations you want to keep:1. **Outer join (one-to-one)** Keep observations which are in *either* or in *both* datasets.2. **Inner join (one-to-one)** Keep observations which are in *both* datasets. 3. **Left join (many-to-one)** Keep observations which are in the *left* dataset or in *both* datasets. Keeping observations which are not in both datasets will result in **missing values** for the variables comming from the dataset, where the observation does not exist. **Read data:**
empl = pd.read_csv('../07/data/RAS200_long.csv') # .. -> means one folder up inc = pd.read_csv('../07/data/INDKP107_long.csv') area = pd.read_csv('../07/data/area.csv')
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
1.1 Concatenating datasetsSuppose we have two datasets that have the same variables and we just want to concatenate them.
empl.head(5) N = empl.shape[0] A = empl.loc[empl.index < N/2,:] # first half of observations B = empl.loc[empl.index >= N/2,:] # second half of observations print(f'A has shape {A.shape} ') print(f'B has shape {B.shape} ')
A has shape (495, 3) B has shape (495, 3)
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Concatenation** is done using the command `pd.concat([df1, df2])`.
C = pd.concat([A,B]) print(f'C has shape {C.shape} (same as the original empl, {empl.shape})')
C has shape (990, 3) (same as the original empl, (990, 3))
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
1.2 Merging datasets Two datasets with **different variables**: `empl` and `inc`. **Central command:** `pd.merge(empl, inc, on=[municipalitiy, year], how=METHOD)`. 1. The keyword `on` specifies the **merge key(s)**. They uniquely identify observations in both datasets (for sure in at least one of them). 2. The keyword `how` specifies the **merge method** (taking values such as `'outer'`, `'inner'`, or `'left'`). **Look at datasets:**
print(f'Years in empl: {empl.year.unique()}') print(f'Municipalities in empl = {len(empl.municipality.unique())}') print(f'Years in inc: {inc.year.unique()}') print(f'Municipalities in inc = {len(inc.municipality.unique())}')
Years in empl: [2008 2009 2010 2011 2012 2013 2014 2015 2016 2017] Municipalities in empl = 99 Years in inc: [2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017] Municipalities in inc = 98
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Find differences:**
diff_y = [y for y in inc.year.unique() if y not in empl.year.unique()] print(f'years in inc data, but not in empl data: {diff_y}') diff_m = [m for m in empl.municipality.unique() if m not in inc.municipality.unique()] print(f'municipalities in empl data, but not in inc data: {diff_m}')
years in inc data, but not in empl data: [2004, 2005, 2006, 2007] municipalities in empl data, but not in inc data: ['Christiansø']
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Conclusion:** `inc` has more years than `empl`, but `empl` has one municipality that is not in `inc`.
plt.figure() v = venn2(subsets = (4, 4, 10), set_labels = ('empl', 'inc')) v.get_label_by_id('100').set_text('Cristiansø') v.get_label_by_id('010').set_text('2004-07' ) v.get_label_by_id('110').set_text('common observations') plt.show()
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Outer join: union
plt.figure() v = venn2(subsets = (4, 4, 10), set_labels = ('empl', 'inc')) v.get_label_by_id('100').set_text('included') v.get_label_by_id('010').set_text('included') v.get_label_by_id('110').set_text('included') plt.title('outer join') plt.show() outer = pd.merge(empl,inc,on=['municipality','year'],how='outer') print(f'Number of municipalities = {len(outer.municipality.unique())}') print(f'Number of years = {len(outer.year.unique())}')
Number of municipalities = 99 Number of years = 14
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
We see that the **outer join** includes rows that exist in either dataframe and therefore includes missing values:
I = (outer.year.isin(diff_y)) | (outer.municipality.isin(diff_m)) outer.loc[I, :].head(15)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Inner join
plt.figure() v = venn2(subsets = (4, 4, 10), set_labels = ('empl', 'inc')) v.get_label_by_id('100').set_text('dropped'); v.get_patch_by_id('100').set_alpha(0.05) v.get_label_by_id('010').set_text('dropped'); v.get_patch_by_id('010').set_alpha(0.05) v.get_label_by_id('110').set_text('included') plt.title('inner join') plt.show() inner = pd.merge(empl,inc,how='inner',on=['municipality','year']) print(f'Number of municipalities = {len(inner.municipality.unique())}') print(f'Number of years = {len(inner.year.unique())}')
Number of municipalities = 98 Number of years = 10
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
We see that the **inner join** does not contain any rows that are not in both datasets.
I = (inner.year.isin(diff_y)) | (inner.municipality.isin(diff_m)) inner.loc[I, :].head(15)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Left join In my work, I most frequently use the **left join**. It is also known as a *many-to-one* join. * **Left dataset:** `inner` many observations of a given municipality (one per year),* **Right dataset:** `area` at most one observation per municipality and new variable (km2).
inner_with_area = pd.merge(inner, area, on='municipality', how='left') inner_with_area.head(5) print(f'inner has shape {inner.shape}') print(f'area has shape {area.shape}') print(f'merge result has shape {inner_with_area.shape}') plt.figure() v = venn2(subsets = (4, 4, 10), set_labels = ('inner', 'area')) v.get_label_by_id('100').set_text('included:\n no km2'); v.get_label_by_id('010').set_text('dropped'); v.get_patch_by_id('010').set_alpha(0.05) v.get_label_by_id('110').set_text('included:\n with km2') plt.title('left join') plt.show()
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Intermezzo:** Finding the non-overlapping observations
not_in_area = [m for m in inner.municipality.unique() if m not in area.municipality.unique()] not_in_inner = [m for m in area.municipality.unique() if m not in inner.municipality.unique()] print(f'There are {len(not_in_area)} municipalities in inner that are not in area. They are:') print(not_in_area) print('') print(f'There is {len(not_in_inner)} municipalities in area that are not in inner. They are:') print(not_in_inner) print('')
There are 0 municipalities in inner that are not in area. They are: [] There is 1 municipalities in area that are not in inner. They are: ['Christiansø']
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Check that km2 is never missing:**
inner_with_area.km2.isnull().mean()
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Alternative function for left joins: `df.join()` To use a left join function `df.join()`, we must first set the index. Technically, we do not need this, but if you ever need to join on more than one variable, `df.join()` requires you to work with indices so we might as well learn it now.
inner.set_index('municipality', inplace=True) area.set_index('municipality', inplace=True) final = inner.join(area) print(f'final has shape: {final.shape}') final.head(5)
final has shape: (980, 4)
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
1.3 Other programming languages **SQL** (including SAS *proc sql*) SQL is one of the most powerful database languages and many other programming languages embed a version of it. For example, SAS has the `proc SQL`, where you can use SQL syntax. SQL is written in statements such as * **left join** `select * from empl left join inc on empl.municipality = inc.municipality and empl.year = inc.year`* **outer join** `select * from empl full outer join inc on empl.municipality = inc.municipality and empl.year = inc.year` **STATA** In Stata, the command `merge` nests many of the commands mentioned above. You specify `merge 1:1` for a one-to-one merge or `merge m:1` or `merge 1:m` for many-to-one or one-to-many merges, and you do not use `merge m:m` (until you are quite advanced). 2. Fetching data using an API API stands for **Application Programming Interface**. An API is an interface through which we can directly ask for and **receive data from an online source**. We will be using packages for this and will not look at what is going on underneath. 1. We use `pandas_datareader` to access many common **international online data** sources (install with `pip install pandas-datareader`)2. For **Statistics Denmark**, Jakob Elben has written the `pydst` package (install with `pip install git+https://github.com/elben10/pydst`) Fetching data from an API requires an **internet connection** and works directly without saving data to your hard disc (unless you ask Python to do so afterwards). You can use it to automate tasks such as fetching the most recent data, doing some calculations and outputting it in the same manner. This can be useful e.g. for quarterly reports. **Pros:** Automatic; smart; everything is done from Python (so no need to remember steps in between). **Cons:** The connection can be slow or drop out, which may lead to errors. If e.g. 100 students simultaneously fetch data (during, say, a lecture), the host server may not be able to service all the requests and may drop out. > The raw output data from an API could look like this: https://stats.oecd.org/SDMX-JSON/data/NAAG. It is a log list of non-human-readable gobledygook in the so-called "JSON" format. 2.1 Import data from Denmark Statistics **Setup:**
Dst = pydst.Dst(lang='en') # setup data loader with the langauge 'english'
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Data from DST are organized into: 1. **Subjects:** indexed by numbers. Use `Dst.get_subjects()` to see the list. 2. **Tables:** with names like "INDKP107". Use `Dst.get_tables(subjects=['X'])` to see all tables in a subject. **Data is extracted** with `Dst.get_data(table_id = 'NAME', variables = DICT)`. **Subjects:** With `Dst.get_subjects()` we can list all subjects.
Dst.get_subjects()
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Tables:** With `get_tables()`, we can list all tables under a subject.
tables = Dst.get_tables(subjects=['04']) tables
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Variable in a dataset:**
tables[tables.id == 'INDKP107'] indk_vars = Dst.get_variables(table_id='INDKP107') indk_vars
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Values of variable in a dataset:**
indk_vars = Dst.get_variables(table_id='INDKP107') for id in ['ENHED','KOEN','UDDNIV','INDKOMSTTYPE']: print(id) values = indk_vars.loc[indk_vars.id == id,['values']].values[0,0] for value in values: print(f' id = {value["id"]}, text = {value["text"]}')
ENHED id = 101, text = People with type of income (number) id = 110, text = Amount of income (DKK 1.000) id = 116, text = Average income for all people (DKK) id = 121, text = Average income for people with type of income (DKK) KOEN id = MOK, text = Men and women, total id = M, text = Men id = K, text = Women UDDNIV id = 10, text = 10 BASIC SCHOOL 8-10 grade id = 26, text = 20+25 UPPER SECONDARY SCHOOL id = 35, text = 35 VOCATIONAL EDUCATION id = 40, text = 40 SHORT-CYCLE HIGHER EDUCATION id = 61, text = 50+60 MEDIUM-CYCLE HIGHER EDUCATION, BACHLEOR id = 65, text = 65 LONG-CYCLE HIGHER EDUCATION id = 9, text = Not stated INDKOMSTTYPE id = 100, text = 1 Disposable income (2+30-31-32-35) id = 105, text = 2 Pre-tax Income, total (3+7+22+26+29) id = 110, text = 3 Primary income (4+5+6) id = 115, text = 4 Wages and salaries etc., total id = 120, text = 5 Entrepreneurial income, total id = 125, text = 6 Received fees subject to labour market contributions id = 130, text = 7 Public transfer incomes(8+14+19) id = 135, text = 8 Unemployment and cash benefits (9+10+11+12+13) id = 140, text = 9 Unemployment benefits id = 145, text = 10 Other benefits from unemployment funds id = 150, text = 11 Cash benefits id = 155, text = 12 Job training & Limited employment benefits id = 160, text = 13 Sickness- & parental leave id = 165, text = 14 Other transfers(15+16+17+18) id = 170, text = 15 Public educational grants id = 175, text = 16 Housing benefits id = 180, text = 17 Child benefits id = 185, text = 18 Green check id = 190, text = 19 Public pensions(20+21) id = 195, text = 20 Early retirement pay id = 200, text = 21 Disability and old age pensions id = 205, text = 22 Private pensions(23+24+25) id = 210, text = 23 Public servants pension id = 215, text = 24 Pension from the ATP (Labour Market Supplementary Pension Scheme) id = 220, text = 25 Labour market and private pensions (Annuities only) id = 225, text = 26 Capital income, gross (27+28) id = 230, text = 27 Interest received id = 235, text = 28 Other property income (From stocks etc.) id = 240, text = 29 Other personal income id = 245, text = 30 Imputed rent id = 250, text = 31 Interest expenses id = 255, text = 32 Tax, total (33+34) id = 260, text = 33 Income taxes id = 265, text = 34 Labour market contributions etc. id = 270, text = 35 Paid alimonies id = 275, text = Equivalised Disposable income id = 280, text = Land tax home owners id = 285, text = Land tax, tenants id = 290, text = Taxable income
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Get data:**
variables = {'OMRÅDE':['*'],'ENHED':['110'],'KOEN':['M','K'],'TID':['*'],'UDDNIV':['65'],'INDKOMSTTYPE':['100']} inc_api = Dst.get_data(table_id = 'INDKP107', variables=variables) inc_api.head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
2.2 FRED (Federal Reserve Economic Data) **GDP data** for the US
start = datetime.datetime(2005,1,1) end = datetime.datetime(2017,1,1) gdp = pandas_datareader.data.DataReader('GDP', 'fred', start, end) gdp.head(10)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Finding data:**1. go to https://fred.stlouisfed.org 2. search for employment3. click first link4. table name is next to header **Fetch:**
empl_us = pandas_datareader.data.DataReader('PAYEMS', 'fred', datetime.datetime(1939,1,1), datetime.datetime(2018,12,1))
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Plot:**
fig = plt.figure() ax = fig.add_subplot(1,1,1) empl_us.plot(ax=ax) ax.legend(frameon=True) ax.set_xlabel('') ax.set_ylabel('employment (US)');
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
2.3 World Bank indicators: `wb` **Finding data:**1. go to https://data.worldbank.org/indicator/2. search for GDP 3. variable name ("NY.GDP.PCAP.KD") is in the URL **Fetch GDP:**
from pandas_datareader import wb wb_gdp = wb.download(indicator='NY.GDP.PCAP.KD', country=['SE','DK','NO'], start=1990, end=2017) wb_gdp = wb_gdp.rename(columns = {'NY.GDP.PCAP.KD':'GDP'}) wb_gdp = wb_gdp.reset_index() wb_gdp.head(5) wb_gdp.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 84 entries, 0 to 83 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 country 84 non-null object 1 year 84 non-null object 2 GDP 84 non-null float64 dtypes: float64(1), object(2) memory usage: 2.1+ KB
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Problem:** Unfortunately, it turns out that the dataframe has stored the variable year as an "object", meaning in practice that it is a string. Country is an object because it is a string, but that cannot be helped. Fortunately, GDP is a float (i.e. a number). Let's convert year to make it an integer:
wb_gdp.year = wb_gdp.year.astype(int) wb_gdp.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 84 entries, 0 to 83 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 country 84 non-null object 1 year 84 non-null int32 2 GDP 84 non-null float64 dtypes: float64(1), int32(1), object(1) memory usage: 1.8+ KB
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Fetch employment-to-population ratio:**
wb_empl = wb.download(indicator='SL.EMP.TOTL.SP.ZS', country=['SE','DK','NO'], start=1990, end=2017) wb_empl.rename(columns = {'SL.EMP.TOTL.SP.ZS':'employment_to_pop'}, inplace=True) wb_empl.reset_index(inplace = True) wb_empl.year = wb_empl.year.astype(int) wb_empl.head(3)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Merge:**
wb = pd.merge(wb_gdp, wb_empl, how='outer', on = ['country','year']); wb.head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
3. Split-apply-combine One of the most useful skills to learn is **the split-apply-combine process**. For example, we may want to compute the average employment rate within a municipality over time and calculate whether the employment rate in each year is above or below the average. We calculate this variable using a split-apply-combine procedure: 1. **split**: divide the dataset into units (one for each municipality)2. **apply**: compute the average employment rate for each unit3. **combine**: merge this new variable back onto the original dataset 3.1 Groupby **Example data:**
empl = empl.sort_values(['municipality','year']) # sort by first municipality then year empl.head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Use **groupby** to calculate **within means**:
empl.groupby('municipality')['e'].mean().head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Custom functions** can be specified by using the `lambda` notation. E.g., average change:
empl.groupby('municipality')['e'].apply(lambda x: x.diff(1).mean()).head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Or:
myfun = lambda x: np.mean(x[1:]-x[:-1]) empl.groupby('municipality')['e'].apply(lambda x: myfun(x.values)).head(5)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Plot statistics**: Dispersion in employment rate across Danish municipalities over time.
fig = plt.figure() ax = fig.add_subplot(1,1,1) empl.groupby('year')['e'].std().plot(ax=ax,style='-o') ax.set_ylabel('std. dev.') ax.set_title('std. dev. across municipalities in the employment rate');
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
3.2 Split-Apply-Combine **Goal:** Calculate within municipality difference to mean employment rate. **1. Split**:
e_grouped = empl.groupby('municipality')['e']
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**2. Apply:**
e_mean = e_grouped.mean() # mean employment rate e_mean.head(10)
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Change name of series:
e_mean.name = 'e_mean' # necessary for join
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**3. Combine:**
empl_ = empl.set_index('municipality').join(e_mean, how='left') empl_['diff'] = empl_.e - empl_.e_mean empl_.xs('Copenhagen')
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Plot:**
municipalities = ['Copenhagen','Roskilde','Lejre'] fig = plt.figure() ax = fig.add_subplot(1,1,1) for m in municipalities: empl_.xs(m).plot(x='year',y='diff',ax=ax,label=m) ax.legend(frameon=True) ax.set_ylabel('difference to mean')
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
with `agg()` **Agg:** The same value for all observations in a group.
empl_ = empl.copy() # a. split-apply e_mean = empl_.groupby('municipality')['e'].agg(lambda x: x.mean()) e_mean.name = 'e_mean' # b. combine empl_ = empl_.set_index('municipality').join(e_mean, how='left') empl_['diff'] = empl_.e - empl_.e_mean empl_.xs('Copenhagen')
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
**Note:** Same result!! with - `transform()` **Transform:** Different values across observations in a group.
empl_ = empl.copy() empl_['diff'] = empl_.groupby('municipality')['e'].transform(lambda x: x - x.mean()) empl_.set_index('municipality').xs('Copenhagen')
_____no_output_____
MIT
web/08/Basic_data_analysis.ipynb
Jovansam/lectures-2021
Advanced agromanagement with PCSE/WOFOSTThis notebook will demonstrate how to implement advanced agromanagement options with PCSE/WOFOST.Allard de Wit, April 2018For the example we will assume that data files are in the data directory within the directory where this notebook is located. This will be the case if you downloaded the notebooks from github.**Prerequisites for running this notebook**Several packages need to be installed for running PCSE/WOFOST: 1. PCSE and its dependencies. See the [PCSE user guide](http://pcse.readthedocs.io/en/stable/installing.html) for more information; 2. The `pandas` module for processing and storing WOFOST output; 3. The `matplotlib` module for plotting resultsFinally, you need a working internet connection.
%matplotlib inline import os, sys import matplotlib matplotlib.style.use("ggplot") import matplotlib.pyplot as plt import pandas as pd import yaml import pcse from pcse.models import Wofost71_WLP_FD from pcse.fileinput import CABOFileReader, YAMLCropDataProvider from pcse.db import NASAPowerWeatherDataProvider from pcse.util import WOFOST71SiteDataProvider from pcse.base import ParameterProvider data_dir = os.path.join(os.getcwd(), "data") print("This notebook was built with:") print("python version: %s " % sys.version) print("PCSE version: %s" % pcse.__version__)
This notebook was built with: python version: 3.7.5 (default, Oct 31 2019, 15:18:51) [MSC v.1916 64 bit (AMD64)] PCSE version: 5.4.2
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Input requirementsFor running the PCSE/WOFOST (and PCSE models in general), you need three types of inputs:1. Model parameters that parameterize the different model components. These parameters usually consist of a set of crop parameters (or multiple sets in case of crop rotations), a set of soil parameters and a set of site parameters. The latter provide ancillary parameters that are specific for a location.2. Driving variables represented by weather data which can be derived from various sources.3. Agromanagement actions which specify the farm activities that will take place on the field that is simulated by PCSE. Reading model parametersIn this example, we will derive the model parameters from different sources. First of all, the crop parameters will be read from my [github repository](https://github.com/ajwdewit/WOFOST_crop_parameters) using the `YAMLCropDataProvider`. Next, the soil parameters will be read from a classical CABO input file using the `CABOFileReader`. Finally, the site parameters can be defined directly using the `WOFOST71SiteDataProvider` which provides sensible defaults for site parameters. However, PCSE models expect a single set of parameters and therefore they need to be combined using the `ParameterProvider`:
crop = YAMLCropDataProvider() soil = CABOFileReader(os.path.join(data_dir, "soil", "ec3.soil")) site = WOFOST71SiteDataProvider(WAV=100,CO2=360) parameterprovider = ParameterProvider(soildata=soil, cropdata=crop, sitedata=site) crop = YAMLCropDataProvider()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Reading weather dataFor reading weather data we will use the NASAPowerWeatherDataProvider.
from pcse.fileinput import ExcelWeatherDataProvider weatherfile = os.path.join(data_dir, 'meteo', 'nl1.xlsx') weatherdataprovider = ExcelWeatherDataProvider(weatherfile)
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Defining agromanagement with timed eventsDefining agromanagement needs a bit more explanation because agromanagement is a relativelycomplex piece of PCSE. The agromanagement definition for PCSE is written in a format called `YAML` and for a thorough discusion have a look at the [Section on Agromanagement](https://pcse.readthedocs.io/en/stable/reference_guide.htmlthe-agromanager) in the PCSE manual.For the current example the agromanagement definition looks like this: Version: 1.0 AgroManagement: - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: 2006-03-31 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: - event_signal: irrigate name: Irrigation application table comment: All irrigation amounts in cm events_table: - 2006-07-10: {amount: 10, efficiency: 0.7} - 2006-08-05: {amount: 5, efficiency: 0.7} StateEvents: nullThe agromanagement definition starts with `Version:` indicating the version number of the agromanagement filewhile the actual definition starts after the label `AgroManagement:`. Next a date must be provide which sets thestart date of the campaign (and the start date of the simulation). Each campaign is defined by zero or oneCropCalendars and zero or more TimedEvents and/or StateEvents. The CropCalendar defines the crop type, date of sowing,date of harvesting, etc. while the Timed/StateEvents define actions that are either connected to a date orto a model state.In the current example, the campaign starts on 2006-01-01, there is a crop calendar for sugar beet starting on2006-03-31 with a harvest date of 2006-10-20. Next there are timed events defined for applying irrigation at 2006-07-10 and 2006-08-05. The current example has no state events. For a thorough description of all possibilities see the section on AgroManagement in the Reference Guide.Loading the agromanagement definition from a file can be done with the `YAMLAgroManagementReader`. However for this example, we can just as easily define it here and parse it directly with the YAML parser. In this case we can directly use the section after the `Agromanagement:` label.
yaml_agro = """ - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: 2006-03-31 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: - event_signal: irrigate name: Irrigation application table comment: All irrigation amounts in cm events_table: - 2006-07-10: {amount: 10, efficiency: 0.7} - 2006-08-05: {amount: 5, efficiency: 0.7} StateEvents: null """ agromanagement = yaml.load(yaml_agro)
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Starting and running the WOFOSTWe have now all parameters, weather data and agromanagement information available to start WOFOST and make a simulation.
wofost = Wofost71_WLP_FD(parameterprovider, weatherdataprovider, agromanagement) wofost.run_till_terminate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Getting and visualizing resultsNext, we can easily get the output from the model using the get_output() method and turn it into a pandas DataFrame:
output = wofost.get_output() df = pd.DataFrame(output).set_index("day") df.tail()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Finally, we can visualize the results from the pandas DataFrame with a few commands:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16,8)) df['LAI'].plot(ax=axes[0], title="Leaf Area Index") df['SM'].plot(ax=axes[1], title="Root zone soil moisture") fig.autofmt_xdate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Defining agromanagement with state events Connecting events to development stagesIt is also possible to connect irrigation events to state variables instead of dates. A logical approach is to connect an irrigation even to a development stage instead of a date, in this way changes in the sowing date will be automatically reflected in changes in irrigation events.For this we need to change the definition of the agromanagement as below: Version: 1.0 AgroManagement: - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: null StateEvents: - event_signal: irrigate event_state: DVS zero_condition: rising name: Irrigation application table comment: All irrigation amounts in cm events_table: - 0.9: {amount: 10, efficiency: 0.7} - 1.5: {amount: 5, efficiency: 0.7} - 2006-11-20: null In this case the irrigation events are connected to the state DVS and are occurring when the simulated DVS crosses the values 0.9 and 1.5. Note that there two additional parameters: `event_state` which defines the state to which the event is connected and `zero_condition` which specifies the condition under which the state event fires, see for an explanation [here](http://pcse.readthedocs.org/en/latest/code.htmlagromanagement). Finally, also note that there must be an "empty trailing campaign" defined which defines that the campaign that starts at 2006-01-01 ends at 2006-11-20. Otherwise PCSE cannot determine the end of the simulation period, see also the link above for an explanation.Again, we will define the agromanagement directly on the command line and parse it with YAML.
yaml_agro = """ - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: 2006-03-31 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: null StateEvents: - event_signal: irrigate event_state: DVS zero_condition: rising name: Irrigation application table comment: All irrigation amounts in cm events_table: - 0.9: {amount: 10, efficiency: 0.7} - 1.5: {amount: 5, efficiency: 0.7} - 2006-11-20: null """ agromanagement = yaml.load(yaml_agro)
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Again we run the model with all inputs but a changed agromanagement and plot the results
wofost2 = Wofost71_WLP_FD(parameterprovider, weatherdataprovider, agromanagement) wofost2.run_till_terminate() output2 = wofost2.get_output() df2 = pd.DataFrame(output2).set_index("day") fig2, axes2 = plt.subplots(nrows=1, ncols=2, figsize=(16,8)) df2['LAI'].plot(ax=axes2[0], title="Leaf Area Index") df2['SM'].plot(ax=axes2[1], title="Root zone soil moisture") fig2.autofmt_xdate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Connecting events to soil moisture levelsThe logical approach is to connect irrigation events to stress levels that are experiences by the crop. In this case we connect the irrigation event to the state variables soil moisture (SM) and define the agromanagement like this: Version: 1.0 AgroManagement: - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: 2006-03-31 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: null StateEvents: - event_signal: irrigate event_state: SM zero_condition: falling name: Irrigation application table comment: All irrigation amounts in cm events_table: - 0.2: {amount: 10, efficiency: 0.7} - 2006-11-20: Note that in this case the `zero_condition` is `falling` because we only want the event to trigger when the SM goes below the specified level (0.2). If we had set `zero_condition` to `either` it would trigger twice, the first time when the soil moisture gets exhausted and the second time because of the irrigation water added.
yaml_agro = """ - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: 2006-03-31 crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: null StateEvents: - event_signal: irrigate event_state: SM zero_condition: falling name: Irrigation application table comment: All irrigation amounts in cm events_table: - 0.2: {amount: 10, efficiency: 0.7} - 2006-11-20: null """ agromanagement = yaml.load(yaml_agro) wofost3 = Wofost71_WLP_FD(parameterprovider, weatherdataprovider, agromanagement) wofost3.run_till_terminate() output3 = wofost3.get_output() df3 = pd.DataFrame(output3).set_index("day") fig3, axes3 = plt.subplots(nrows=1, ncols=2, figsize=(16,8)) df3['LAI'].plot(ax=axes3[0], title="Leaf Area Index") df3['SM'].plot(ax=axes3[1], title="Volumetric soil moisture") fig3.autofmt_xdate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Showing the differences in irrigation events============================================We combine the `SM` column from the different data frames in a new dataframe and plot the results to see the effect of the differences in agromanagement.
df_all = pd.DataFrame({"by_date": df.SM, "by_DVS": df2.SM, "by_SM": df3.SM}, index=df.index) fig4, axes4 = plt.subplots(nrows=1, ncols=1, figsize=(14,12)) df_all.plot(ax=axes4, title="differences in irrigation approach.") axes4.set_ylabel("irrigation amount [cm]") fig4.autofmt_xdate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Adjusting the sowing date with the AgroManager and making multiple runs==============================================The most straightforward way of adjusting the sowing date is by editing the crop management definition in YAML format directly. Here we put a placeholder `{crop_start_date}` at the point where the crop start date is defined in the YAML format. We can then use string formatting operations to insert a new data and use `yaml.load` to load the definition in yaml directly. Note that we need double curly brackets (`{{` and `}}`) at the events table to avoid that python sees them as a placeholder.
agromanagement_yaml = """ - 2006-01-01: CropCalendar: crop_name: sugarbeet variety_name: Sugarbeet_603 crop_start_date: {crop_start_date} crop_start_type: emergence crop_end_date: 2006-10-20 crop_end_type: harvest max_duration: 300 TimedEvents: null StateEvents: - event_signal: irrigate event_state: SM zero_condition: falling name: Irrigation application table comment: All irrigation amounts in cm events_table: - 0.2: {{amount: 10, efficiency: 0.7}} - 2006-11-20: """
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
The main loop for making several WOFOST runs
import datetime as dt sdate = dt.date(2006,3,1) step = 10 # Loop over six different start dates results = [] for i in range(6): # get new start date csdate = sdate + dt.timedelta(days=i*step) # update agromanagement with new start date and load it with yaml.load tmp = agromanagement_yaml.format(crop_start_date=csdate) agromanagement = yaml.load(tmp) # run wofost and collect output wofost = Wofost71_WLP_FD(parameterprovider, weatherdataprovider, agromanagement) wofost.run_till_terminate() output = wofost.get_output() df = pd.DataFrame(output).set_index("day") results.append(df)
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Plot the results for the different runs and variables
colors = ['k','r','g','b','m','y'] fig5, axes5 = plt.subplots(nrows=6, ncols=2, figsize=(16,30)) for c, df in zip(colors, results): for key, axis in zip(df.columns, axes5.flatten()): df[key].plot(ax=axis, title=key, color=c) fig5.autofmt_xdate()
_____no_output_____
MIT
06_advanced_agromanagement_with_PCSE.ipynb
albertdaniell/wofost_kalro
Wind Turbine combined with Heat Pump and Water TankIn this example the heat demand is supplied by a wind turbine in combination with a heat pump and a water tank that stores hot water with a standing loss.
import pypsa import pandas as pd from pyomo.environ import Constraint network = pypsa.Network() network.set_snapshots(pd.date_range("2016-01-01 00:00","2016-01-01 03:00", freq="H")) network.add("Bus", "0", carrier="AC") network.add("Bus", "0 heat", carrier="heat") network.add("Carrier", "wind") network.add("Carrier", "heat") network.add("Generator", "wind turbine", bus="0", carrier="wind", p_nom_extendable=True, p_max_pu=[0.,0.2,0.7,0.4], capital_cost=500) network.add("Load", "heat demand", bus="0 heat", p_set=20.) #NB: Heat pump has changing efficiency (properly the Coefficient of Performance, COP) #due to changing ambient temperature network.add("Link", "heat pump", bus0="0", bus1="0 heat", efficiency=[2.5,3.,3.2,3.], capital_cost=1000, p_nom_extendable=True) network.add("Store", "water tank", bus="0 heat", e_cyclic=True, e_nom_extendable=True, standing_loss=0.01) network.lopf(network.snapshots) pd.DataFrame({attr: network.stores_t[attr]["water tank"] for attr in ["p","e"]}) pd.DataFrame({attr: network.links_t[attr]["heat pump"] for attr in ["p0","p1"]}) network.stores.loc[["water tank"]].T network.generators.loc[["wind turbine"]].T
_____no_output_____
MIT
examples/notebooks/power-to-heat-water-tank.ipynb
martacki/PyPSA
Replication of Carneiro, Heckman, & Vytlacil's (2011) *Local Instrumental Variables* approach In this notebook, I reproduce the semiparametric results from> Carneiro, P., Heckman, J. J., & Vytlacil, E. J. (2011). [Estimating marginal returns to education.](https://pubs.aeaweb.org/doi/pdfplus/10.1257/aer.101.6.2754) *American Economic Review, 101*(6), 2754-81. The authors analyze the returns to college for white males born between 1957 and 1963 using data from the National Longitudinal Survey of Youth 1979. The authors provide some [replication material]((https://pubs.aeaweb.org/doi/pdfplus/10.1257/aer.101.6.2754)) on their website but do not include geographic identifiers. Therefore, we make use of a mock data merging background characteristics and local data randomly. In a future update, the semiparametric estimation method will be included in the open-source package *grmpy* for the simulation and estimation of the generalized Roy model in Python. Currently, *grmpy* is limited to the estimation of a parametric normal version of the generalized Roy model. For more, see the [online documentation](https://grmpy.readthedocs.io/en/develop/). 0) Imports
import numpy as np from tutorial_semipar_auxiliary import plot_semipar_mte from grmpy.estimate.estimate import fit import warnings warnings.filterwarnings('ignore') %load_ext autoreload %autoreload 2
_____no_output_____
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
1) The LIV Framework The method of Local Instrumental Variables (LIV) is based on the generalized Roy model, which is characterized by the following equations: \begin{align*} &\textbf{Potential Outcomes} & & \textbf{Choice} &\\ & Y_1 = \beta_1 X + U_{1} & & I = Z \gamma - V &\\ & Y_0 = \beta_0 X + U_{0} & & D_i = \left\{\begin{array}{ll}1 & if \ I > 0 \\0 & if \ I \leq 0\\\end{array}\right. &&&&\\& \textbf{Observed Outcome} &&&\\& Y = D Y_1 + (1-D) Y_0 &&&\end{align*} We work with the linear-in-the-parameters version of the generalized Roy model:\begin{align}E(Y|X = \overline{x}, P(Z) = p) = \overline{x} \beta_0 + p \overline{x} (\beta_1 - \beta_0) + K(p),\end{align}where $K(p) = E(U_1 - U_0 | D = 1, P(Z) = p)$ is a nonlinear function of $p$ that captures heterogeneity along the unobservable resistance to treatment $u_D$. In addition, assume that $(X, Z)$ is independent of $\{U_1, U_0, V\}$. Then, the MTE is1) additively separable in $X$ and $U_D$, which means that the shape of the MTE is independent of $X$, and2) identified over the common support of $P(Z)$, unconditional on $X$. The common support, $P(Z)$, plays a crucial role for the identification of the MTE. It denotes the probability of going to university ($D=1$). Common support is defined as the intersection of the support of $P(Z)$ given $D = 1$ and the support of $P(Z)$ given $D = 0$. i.e., those evaluations of $P(Z)$ for which we obtain positive frequencies in both subsamples. We will plot it below. The larger the common support, the larger the region over which the MTE is identified. The LIV estimator, $\Delta^{LIV}$, is derived as follows (Heckman and Vytlacil [2001](https://www.aeaweb.org/articles?id=10.1257/aer.91.2.107), [2005](https://www.jstor.org/stable/3598865?seq=1page_scan_tab_contents)):\begin{equation}\begin{split}\Delta^{LIV} (\overline{x}, u_D) &= \frac{\partial E(Y|X = \overline{x}, P(Z) = p)}{\partial p} \bigg\rvert_{p = u_D} \\& \\&= \overline{x}(\beta_1 - \beta_0) + E(U_1 - U_0 | U_D = u_D) \\&\\& = \underbrace{\overline{x}(\beta_1 - \beta_0)}_{\substack{observable \\ component}} + \underbrace{\frac{\partial K}{\partial p} \bigg\rvert_{p = u_D}}_{\substack{k(p): \ unobservable \\ component}} = MTE(\overline{x}, u_D)\end{split}%\frac{[E(U_1 - U_0 | U_D \leq p] p}{\partial p} \bigg\rvert_{p = u_D}%E(U_1 - U_0 | U_D = u_D)\end{equation}Since we do not make any assumption about the functional form of the unobservables, we estimate $k(p)$ non-parametrically. In particualr, $k(p)$ is the first derivative of a locally quadratic kernel regression. 3) The Initialization File For the semiparametric estimation, we need information on the following sections:* __ESTIMATION__: Specify the dependent (wage) and indicator variable (treatment dummy) of the input data frame.For the estimation of the propensity score $P(Z)$, we choose a probability model, here logit. Furthermore, we select 30 bins to determine the common support in the treated and untreated subsamples. For the locally quadratic regression, we follow the specification of [Carneiro et al. (2011)](https://pubs.aeaweb.org/doi/pdfplus/10.1257/aer.101.6.2754) and choose a bandwidth of 0.322. The respective gridsize for the locally quadratic regression is set to 500. [Fan and Marron (1994)](https://www.tandfonline.com/doi/abs/10.1080/10618600.1994.10474629) find that a gridsize of 400 is a good default for graphical analysis. Since the data set is large (1785 observations) and the kernel regression function has a very low runtime, we increase the gridsize to 500. Setting it to the default or increasing it even more does not affect the final MTE. Note that the MTE identified by LIV consists of two components: $\overline{x}(\beta_1 - \beta_0)$ (which does not depend on $P(Z) = p$) and $k(p)$ (which does depend on $p$). The latter is estimated nonparametrically. The key "p_range" in the initialization file specifies the interval over which $k(p)$ is estimated. After the data outside the overlapping support are trimmed, the locally quadratic kernel estimator uses the remaining data to predict $k(p)$ over the entire "p_range" specified by the user. If "p_range" is larger than the common support, *grmpy* extrapolates the values for the MTE outside this region. Technically speaking, interpretations of the MTE are only valid within the common support. Here, we set "p_range" to [0.005, 0.995]. The other parameters in this section are set by default and, normally, do not need to be changed.* __TREATED, UNTREATED, CHOICE__: In this section, the variables of the outcome equations (treated, untreated) and the college decision (choice) are specified.* __DIST__: The distribution of the unobservables is not of relevance in the semiparametric apporach and can be ignored.
%%file files/tutorial_semipar.yml --- ESTIMATION: file: data/aer-replication-mock.pkl dependent: wage indicator: state semipar: True show_output: True logit: True nbins: 30 bandwidth: 0.322 gridsize: 500 trim_support: True reestimate_p: False rbandwidth: 0.05 derivative: 1 degree: 2 ps_range: [0.005, 0.995] TREATED: order: - exp - expsq - lwage5 - lurate - cafqt - cafqtsq - mhgc - mhgcsq - numsibs - numsibssq - urban14 - lavlocwage17 - lavlocwage17sq - avurate - avuratesq - d57 - d58 - d59 - d60 - d61 - d62 - d63 UNTREATED: order: - exp - expsq - lwage5 - lurate - cafqt - cafqtsq - mhgc - mhgcsq - numsibs - numsibssq - urban14 - lavlocwage17 - lavlocwage17sq - avurate - avuratesq - d57 - d58 - d59 - d60 - d61 - d62 - d63 CHOICE: params: - 1.0 order: - const - cafqt - cafqtsq - mhgc - mhgcsq - numsibs - numsibssq - urban14 - lavlocwage17 - lavlocwage17sq - avurate - avuratesq - d57 - d58 - d59 - d60 - d61 - d62 - d63 - lwage5_17numsibs - lwage5_17mhgc - lwage5_17cafqt - lwage5_17 - lurate_17 - lurate_17numsibs - lurate_17mhgc - lurate_17cafqt - tuit4c - tuit4cnumsibs - tuit4cmhgc - tuit4ccafqt - pub4 - pub4numsibs - pub4mhgc - pub4cafqt DIST: params: - 0.1 - 0.0 - 0.0 - 0.1 - 0.0 - 1.0
Overwriting files/tutorial_semipar.yml
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
Note that I do not include a constant in the __TREATED, UNTREATED__ section. The reason for this is that in the semiparametric setup, $\beta_1$ and $\beta_0$ are determined by running a Double Residual Regression without an intercept: $$ e_Y =e_X \beta_0 \ + \ e_{X \ \times \ p} (\beta_1 - \beta_0) \ + \ \epsilon $$ where $e_X$, $e_{X \ \times \ p}$, and $e_Y$ are the residuals of a local linear regression of $X$, $X$ x $p$, and $Y$ on $\widehat{P}(Z)$. We now proceed to our replication. 3) Estimation Conduct the estimation based on the initialization file.
rslt = fit('files/tutorial_semipar.yml', semipar=True)
Logit Regression Results ============================================================================== Dep. Variable: y No. Observations: 1747 Model: Logit Df Residuals: 1717 Method: MLE Df Model: 29 Date: Mon, 12 Oct 2020 Pseudo R-squ.: 0.2858 Time: 21:29:55 Log-Likelihood: -864.74 converged: True LL-Null: -1210.8 Covariance Type: nonrobust LLR p-value: 4.178e-127 ==================================================================================== coef std err z P>|z| [0.025 0.975] ------------------------------------------------------------------------------------ const 288.3699 151.012 1.910 0.056 -7.609 584.349 cafqt -6.4256 5.019 -1.280 0.200 -16.263 3.411 cafqtsq 0.3348 0.072 4.665 0.000 0.194 0.476 mhgc -0.2733 2.146 -0.127 0.899 -4.480 3.933 mhgcsq 0.0180 0.007 2.624 0.009 0.005 0.031 numsibs -0.4059 2.403 -0.169 0.866 -5.116 4.304 numsibssq 0.0012 0.011 0.104 0.917 -0.021 0.024 urban14 0.3387 0.140 2.418 0.016 0.064 0.613 lavlocwage17 -54.1077 29.111 -1.859 0.063 -111.164 2.948 lavlocwage17sq 2.6770 1.420 1.885 0.059 -0.107 5.461 avurate -0.0936 0.633 -0.148 0.882 -1.334 1.146 avuratesq 0.0139 0.049 0.286 0.775 -0.081 0.109 d57 0.3166 0.251 1.262 0.207 -0.175 0.808 d58 0.3065 0.253 1.214 0.225 -0.189 0.802 d59 -0.2110 0.251 -0.840 0.401 -0.703 0.281 d60 0.0341 0.237 0.144 0.886 -0.430 0.498 d61 0.0863 0.238 0.362 0.717 -0.381 0.553 d62 0.2900 0.224 1.293 0.196 -0.150 0.730 d63 -0.0237 0.239 -0.099 0.921 -0.492 0.444 lwage5_17numsibs 0.0170 0.237 0.072 0.943 -0.448 0.482 lwage5_17mhgc 0.0050 0.214 0.023 0.981 -0.414 0.424 lwage5_17cafqt 0.7582 0.498 1.521 0.128 -0.219 1.735 lwage5_17 -1.5203 2.738 -0.555 0.579 -6.887 3.846 lurate_17 -0.1394 0.248 -0.563 0.573 -0.625 0.346 lurate_17numsibs -0.0028 0.020 -0.140 0.888 -0.042 0.037 lurate_17mhgc 0.0074 0.019 0.386 0.700 -0.030 0.045 lurate_17cafqt -0.0174 0.044 -0.394 0.693 -0.104 0.069 tuit4c 0.0114 0.060 0.191 0.849 -0.105 0.128 tuit4cnumsibs 0.0039 0.005 0.806 0.420 -0.006 0.013 tuit4cmhgc -0.0008 0.005 -0.167 0.867 -0.010 0.008 tuit4ccafqt -0.0041 0.010 -0.398 0.690 -0.024 0.016 pub4 0.4641 0.873 0.532 0.595 -1.247 2.175 pub4numsibs 0.0451 0.074 0.611 0.541 -0.100 0.190 pub4mhgc -0.0408 0.069 -0.594 0.553 -0.176 0.094 pub4cafqt -0.0164 0.164 -0.100 0.920 -0.338 0.305 ==================================================================================== Common support lies beteen: 0.05361584898356705 and 0.9670786072336018
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
The rslt dictionary contains information on the estimated parameters and the final MTE.
list(rslt)
_____no_output_____
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
Before plotting the MTE, let's see what else we can learn.For instance, we can account for the variation in $X$. Note that we divide the MTE by 4 to investigate the effect of one additional year of college education.
np.min(rslt['mte_min']) / 4, np.max(rslt['mte_max']) / 4
_____no_output_____
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
Next we plot the MTE based on the estimation results. As shown in the figure below, the replicated MTE gets very close to the original, but its 90 percent confidence bands are wider. This is due to the use of a mock data set which merges basic and local variables randomly. The bootsrap method, which is used to estimate the confidence bands, is sensitive to the discrepancies in the data.
mte, quantiles = plot_semipar_mte(rslt, 'files/tutorial_semipar.yml', nbootstraps=250)
_____no_output_____
MIT
promotion/grmpy_tutorial_notebook/tutorial_semipar_notebook.ipynb
OpenSourceEconomics/grmpy
Multivariate Logistic Regression Demo_Source: 🤖[Homemade Machine Learning](https://github.com/trekhleb/homemade-machine-learning) repository_> ☝Before moving on with this demo you might want to take a look at:> - 📗[Math behind the Logistic Regression](https://github.com/trekhleb/homemade-machine-learning/tree/master/homemade/logistic_regression)> - ⚙️[Logistic Regression Source Code](https://github.com/trekhleb/homemade-machine-learning/blob/master/homemade/logistic_regression/logistic_regression.py)**Logistic regression** is the appropriate regression analysis to conduct when the dependent variable is dichotomous (binary). Like all regression analyses, the logistic regression is a predictive analysis. Logistic regression is used to describe data and to explain the relationship between one dependent binary variable and one or more nominal, ordinal, interval or ratio-level independent variables.Logistic Regression is used when the dependent variable (target) is categorical.For example:- To predict whether an email is spam (`1`) or (`0`).- Whether online transaction is fraudulent (`1`) or not (`0`).- Whether the tumor is malignant (`1`) or not (`0`).> **Demo Project:** In this example we will train handwritten digits (0-9) classifier.
# To make debugging of logistic_regression module easier we enable imported modules autoreloading feature. # By doing this you may change the code of logistic_regression library and all these changes will be available here. %load_ext autoreload %autoreload 2 # Add project root folder to module loading paths. import sys sys.path.append('../..')
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Import Dependencies- [pandas](https://pandas.pydata.org/) - library that we will use for loading and displaying the data in a table- [numpy](http://www.numpy.org/) - library that we will use for linear algebra operations- [matplotlib](https://matplotlib.org/) - library that we will use for plotting the data- [math](https://docs.python.org/3/library/math.html) - math library that we will use to calculate sqaure roots etc.- [logistic_regression](https://github.com/trekhleb/homemade-machine-learning/blob/master/homemade/logistic_regression/logistic_regression.py) - custom implementation of logistic regression
# Import 3rd party dependencies. import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import math # Import custom logistic regression implementation. from homemade.logistic_regression import LogisticRegression
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Load the DataIn this demo we will be using a sample of [MNIST dataset in a CSV format](https://www.kaggle.com/oddrationale/mnist-in-csv/home). Instead of using full dataset with 60000 training examples we will use cut dataset of just 10000 examples that we will also split into training and testing sets.Each row in the dataset consists of 785 values: the first value is the label (a number from 0 to 9) and the remaining 784 values (28x28 pixels image) are the pixel values (a number from 0 to 255).
# Load the data. data = pd.read_csv('../../data/mnist-demo.csv') # Print the data table. data.head(10)
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Plot the DataLet's peek first 25 rows of the dataset and display them as an images to have an example of digits we will be working with.
# How many numbers to display. numbers_to_display = 25 # Calculate the number of cells that will hold all the numbers. num_cells = math.ceil(math.sqrt(numbers_to_display)) # Make the plot a little bit bigger than default one. plt.figure(figsize=(10, 10)) # Go through the first numbers in a training set and plot them. for plot_index in range(numbers_to_display): # Extrace digit data. digit = data[plot_index:plot_index + 1].values digit_label = digit[0][0] digit_pixels = digit[0][1:] # Calculate image size (remember that each picture has square proportions). image_size = int(math.sqrt(digit_pixels.shape[0])) # Convert image vector into the matrix of pixels. frame = digit_pixels.reshape((image_size, image_size)) # Plot the number matrix. plt.subplot(num_cells, num_cells, plot_index + 1) plt.imshow(frame, cmap='Greys') plt.title(digit_label) plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False) # Plot all subplots. plt.subplots_adjust(hspace=0.5, wspace=0.5) plt.show()
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Split the Data Into Training and Test SetsIn this step we will split our dataset into _training_ and _testing_ subsets (in proportion 80/20%).Training data set will be used for training of our model. Testing dataset will be used for validating of the model. All data from testing dataset will be new to model and we may check how accurate are model predictions.
# Split data set on training and test sets with proportions 80/20. # Function sample() returns a random sample of items. pd_train_data = data.sample(frac=0.8) pd_test_data = data.drop(pd_train_data.index) # Convert training and testing data from Pandas to NumPy format. train_data = pd_train_data.values test_data = pd_test_data.values # Extract training/test labels and features. num_training_examples = 6000 x_train = train_data[:num_training_examples, 1:] y_train = train_data[:num_training_examples, [0]] x_test = test_data[:, 1:] y_test = test_data[:, [0]]
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Init and Train Logistic Regression Model> ☝🏻This is the place where you might want to play with model configuration.- `polynomial_degree` - this parameter will allow you to add additional polynomial features of certain degree. More features - more curved the line will be.- `max_iterations` - this is the maximum number of iterations that gradient descent algorithm will use to find the minimum of a cost function. Low numbers may prevent gradient descent from reaching the minimum. High numbers will make the algorithm work longer without improving its accuracy.- `regularization_param` - parameter that will fight overfitting. The higher the parameter, the simplier is the model will be.- `polynomial_degree` - the degree of additional polynomial features (`x1^2 * x2, x1^2 * x2^2, ...`). This will allow you to curve the predictions.- `sinusoid_degree` - the degree of sinusoid parameter multipliers of additional features (`sin(x), sin(2*x), ...`). This will allow you to curve the predictions by adding sinusoidal component to the prediction curve.- `normalize_data` - boolean flag that indicates whether data normalization is needed or not.
# Set up linear regression parameters. max_iterations = 10000 # Max number of gradient descent iterations. regularization_param = 10 # Helps to fight model overfitting. polynomial_degree = 0 # The degree of additional polynomial features. sinusoid_degree = 0 # The degree of sinusoid parameter multipliers of additional features. normalize_data = True # Whether we need to normalize data to make it more uniform or not. # Init logistic regression instance. logistic_regression = LogisticRegression(x_train, y_train, polynomial_degree, sinusoid_degree, normalize_data) # Train logistic regression. (thetas, costs) = logistic_regression.train(regularization_param, max_iterations)
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning
Print Training ResultsLet's see how model parameters (thetas) look like. For each digit class (from 0 to 9) we've just trained a set of 784 parameters (one theta for each image pixel). These parameters represents the importance of every pixel for specific digit recognition.
# Print thetas table. pd.DataFrame(thetas) # How many numbers to display. numbers_to_display = 9 # Calculate the number of cells that will hold all the numbers. num_cells = math.ceil(math.sqrt(numbers_to_display)) # Make the plot a little bit bigger than default one. plt.figure(figsize=(10, 10)) # Go through the thetas and print them. for plot_index in range(numbers_to_display): # Extrace digit data. digit_pixels = thetas[plot_index][1:] # Calculate image size (remember that each picture has square proportions). image_size = int(math.sqrt(digit_pixels.shape[0])) # Convert image vector into the matrix of pixels. frame = digit_pixels.reshape((image_size, image_size)) # Plot the number matrix. plt.subplot(num_cells, num_cells, plot_index + 1) plt.imshow(frame, cmap='Greys') plt.title(plot_index) plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False) # Plot all subplots. plt.subplots_adjust(hspace=0.5, wspace=0.5) plt.show()
_____no_output_____
MIT
notebooks/logistic_regression/multivariate_logistic_regression_demo.ipynb
pugnator-12/homemade-machine-learning