path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
examples/Molecule_mapping_beta_pictoris.ipynb | ###Markdown
Molecule mapping of the $\beta$ Pictoris system In this tutorial we will simulate an observation of the $\beta$ Pictoris system with a Medium-Resolution Integral Field Spectrograph similar to VLT/SINFONI or VLT/ERIS. After simulating the observation we will detect both planets using the molecular mapping technique from Hoeijmakers et al. 2018.
###Code
#------ manually fix import
import sys
import os
import pathlib
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
#------
import hcipy as hp
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from ObservationSimulator import *
from scipy.ndimage import gaussian_filter1d
###Output
_____no_output_____
###Markdown
First, we define the wavelength grid that we observe. In our case, we simulate the wavelength sampling of K-band setting of VLT/SINFONI.
###Code
spectral_resolution = 5000
wavelengths = np.arange(1.929, 2.472, 0.25e-3)*u.micron
###Output
_____no_output_____
###Markdown
SourcesNow we have to define the sources that we observe. Here, we simulate a star with the properties of beta pictoris and the two planets. We also include the sky background, as that may be important in the K-band.
###Code
#Star
star = Star(wavelengths, distance=19.44*u.pc, radius=1.8*u.R_sun,
mass=1.75*u.M_sun, radial_velocity=20*u.km/u.s, temperature=8000*u.K)
#Planets
beta_pic_b = Planet(wavelengths, host=star, orbital_phase=-np.pi/4*u.rad,
sma=9.9*u.au, radius=1.46*u.R_jup, inclination=90*u.deg, temperature=1700*u.K)
beta_pic_c = Planet(wavelengths, host=star, orbital_phase=np.pi/2*u.rad,
sma=2.7*u.au, radius=1.2*u.R_jup, inclination=90*u.deg, temperature=1200*u.K)
#Sky background
sky_background = Background(wavelengths)
sky_spec = get_sky_emission(fov=0.8*u.arcsec)
sky_background.spectral_model = InterpolatedSpectrum(sky_spec.wavelengths, sky_spec,
spectral_resolution=spectral_resolution)
###Output
_____no_output_____
###Markdown
Now we define the spectral models of the star and the plane. We use a PHOENIX spectrum for the star and BT-Settl models for the planets.
###Code
star_spec = get_PHOENIX_spectrum(temperature=star.temperature, log_g=4)
star.spectral_model = InterpolatedSpectrum(star_spec.wavelengths, star_spec, spectral_resolution=spectral_resolution)
beta_pic_b_spec = get_BT_SETTL_spectrum(temperature = beta_pic_b.temperature, log_g=4)
beta_pic_b.spectral_model = InterpolatedSpectrum(beta_pic_b_spec.wavelengths, beta_pic_b_spec,
spectral_resolution=spectral_resolution)
beta_pic_c_spec = get_BT_SETTL_spectrum(temperature = beta_pic_c.temperature, log_g=4)
beta_pic_c.spectral_model = InterpolatedSpectrum(beta_pic_c_spec.wavelengths, beta_pic_c_spec,
spectral_resolution=spectral_resolution)
plot_spectrum(sky_background.spectrum, label='Sky background', alpha = 0.8)
plot_spectrum(star.spectrum, label='Star')
plot_spectrum(beta_pic_b.spectrum, label='Planet b')
plot_spectrum(beta_pic_c.spectrum, label='Planet c')
plt.yscale('log')
plt.ylabel('Received Flux at the telescope')
plt.xlabel('Wavelength [m]')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
InstrumentNext, we define our instrument.
###Code
#------------Define instrument---------
fov = 0.5*u.arcsec
read_noise = 7
dark_current_rate = 0.05 * u.Hz
telescope = Telescope(diameter=8*u.m, field_of_view=fov, pupil_grid_size=64, focal_grid_size=32)
input_pup = hcipyPupilGenerator(telescope)
x = input_pup
x = AtmosphericTransmission(wavelengths)(input_pup)
x = EffectiveThroughput(0.1)(x)
x = telescope.aperture()(x)
x = Atmosphere(seeing=0.9*u.arcsec, tau_0=0.005*u.s, L0=40*u.m, N_layers=3)(x)
x = IdealAO(num_modes=60, mode_basis='actuators', noise_level=400*u.nm)(x)
x = telescope.propagator()(x)
#We make use of the Detector3D wrapper, which basically acts as a seperate detector at each wavelength.
detector = PhysicalDetector(read_noise=read_noise, dark_current_rate=dark_current_rate
,well_depth=np.inf, gain=1, quantum_efficiency=0.8)
ifs = Detector3D(detector, wavelengths)(x)
###Output
_____no_output_____
###Markdown
And then we run the simulation.
###Code
#---------Run simulation-----------------
sim = Simulation(first_component=input_pup)
DIT = 4*60*u.s
N_DIT = 10
time_resolution = 4*60*u.s
for x in range(N_DIT):
print('Simulating exoposure {0}/{1}'.format(x, N_DIT))
sim.run([star, beta_pic_b, beta_pic_c], observing_time=DIT, time_resolution=time_resolution)
if x==0:
datacube = ifs.read_out(destructive=True)
else:
datacube += ifs.read_out(destructive=True)
sim.statistics()
np.save('/users/ricolandman/Documents/Research/MedRes/Simulations/sinfoni_like_beta_pic.npy', datacube)
###Output
==========
Total runtime: 23.3 s.
Time spent on overhead: -160 s.
----------
Time spent per component:
<ObservationSimulator.simulation.components_hcipy.hcipyPupilGenerator object at 0x1357f45b0>: 16.6 s
<ObservationSimulator.simulation.atmosphere.AtmosphericTransmission object at 0x1357f4130>: 17.7 s
<ObservationSimulator.simulation.atmosphere.EffectiveThroughput object at 0x140642a30>: 2.81 s
<ObservationSimulator.simulation.components_hcipy.Apodizer object at 0x1470d4d90>: 8.98 s
<ObservationSimulator.simulation.atmosphere.Atmosphere object at 0x1355affa0>: 6.78 s
<ObservationSimulator.simulation.ao.IdealAO object at 0x14654b5e0>: 17.5 s
<ObservationSimulator.simulation.components_hcipy.FraunhoferPropagator object at 0x135459310>: 105 s
<ObservationSimulator.simulation.detector.Detector3D object at 0x1354596a0>: 7.23 s
==========
###Markdown
ResultsWe can then read out the detectors and plot for example the broadband image.
###Code
if not isinstance(fov, float):
fov = fov.value
broadband_image = np.sum(datacube, axis=0)
#strehl = broadband_image.max()/diff_lim_image.max()
#print('Strehl ratio:', strehl)
fig, axes = plt.subplots(1,2,sharey=True, sharex=True, figsize=(10,4))
plt.sca(axes[0])
plt.imshow(broadband_image, cmap='inferno', extent=[-fov, fov, -fov, fov])
plt.ylabel('Y [arcsec]')
plt.xlabel('X [arcsec]')
plt.colorbar(label='Counts [e-]')
print(broadband_image)
plt.sca(axes[1])
plt.imshow(np.log10(broadband_image), cmap='inferno', extent=[-fov, fov, -fov, fov])
plt.xlabel('X [arcsec]')
plt.colorbar(label='Log10 counts [e-]')
plt.tight_layout()
plt.show()
###Output
[[4051765.86945263 4245177.41097001 4436963.58726139 ... 4736518.2504104
4604384.58496592 4310411.10247798]
[3710948.33391212 3946514.79092023 4273816.40501663 ... 4702018.61167024
4600049.86337742 4372461.89337557]
[3304864.24150782 3498828.27780148 3853578.0829894 ... 4571151.89849611
4538799.61382801 4410062.71646856]
...
[4499127.0559773 4511623.34894653 4590106.8204693 ... 3307737.77780764
3449785.53999612 3577776.91827851]
[4345484.38041244 4424144.9395436 4511329.60245116 ... 3140671.11181435
3267643.91753326 3336856.02177418]
[4238713.92046972 4343340.8975472 4363733.12628636 ... 3279144.63823972
3374286.30385627 3403542.50832658]]
###Markdown
Data AnalysisHere, we apply the Molecule mapping technique from Hoeijmakers et al. 2018, where a template spectrum is cross-correlated with the datacube.
###Code
def molecule_mapping(datacube, wavelengths, N_pca, template, rv_grid, smoothing_kernel_width=30, exclude_rv=1000):
#Find brightest spaxels
tot_image = np.sum(datacube,axis=0)
pixel_brightness_sorted = np.argsort(tot_image.ravel())[::-1]
reshaped_cube = datacube.reshape(datacube.shape[0],-1)
brightest_pix = np.argsort(np.sum(reshaped_cube.ravel(),axis=0))[::-1][:20]
#Normalize
norm_cube = reshaped_cube/np.nansum(reshaped_cube,axis=0)
#Construct master spectrum and divide by it
brightest_spec = norm_cube[:,brightest_pix]
master_spec = np.median(brightest_spec,axis=1)
stellar_normalised = (norm_cube.T/master_spec).T
#Remove low-pass filtered version of appropriate master spectrum
data_kernel_width = smoothing_kernel_width
low_pass = np.array([gaussian_filter1d(spec, data_kernel_width) for spec in stellar_normalised.T]).T
stellar_model = (low_pass.T*master_spec).T
star_removed = norm_cube-stellar_model
#PCA
um,sm,vm =np.linalg.svd(star_removed, full_matrices=False)
s_new = sm.copy()
s_new[:N_pca] = 0
pca_sub = (um.dot(np.diag(s_new))).dot(vm)
#Molecule mapping
beta = 1-rv_grid/3e5
shifted_wavelengths = wavelengths * beta[:, np.newaxis]
T = template.at(shifted_wavelengths.ravel()).reshape(shifted_wavelengths.shape).value
cross_corr = T.dot(pca_sub)
cross_corr -= np.nanmedian(cross_corr[np.abs(rv_grid)>exclude_rv],axis=0)
cross_corr /= np.nanstd(cross_corr[np.abs(rv_grid)>exclude_rv],axis=0)
reshaped_ccf_map = cross_corr.reshape(rv_grid.size, datacube.shape[1], datacube.shape[2])
return reshaped_ccf_map
flat_model = convolve_to_resolution(beta_pic_b_spec, spectral_resolution)
continuum = gaussian_filter1d(flat_model, 30)
flat_model = flat_model/continuum
flat_model = InterpolatedSpectrum(flux_density=flat_model, wavelengths=flat_model.wavelengths)
flat_model.initialise_for(beta_pic_b)
plt.plot(flat_model.at(wavelengths))
plt.show()
rv_grid = np.linspace(-3000,3000,500)
result= molecule_mapping(datacube, wavelengths, N_pca=5, template=flat_model,rv_grid = rv_grid)
rv_idx = np.argmin(np.abs(rv_grid-beta_pic_b.radial_velocity.to(u.km/u.s).value))
plt.imshow(result[rv_idx],extent=[-fov, fov, -fov, fov])
idx = np.argmax(result[result.shape[0]//2+2])
plt.colorbar(label='Signal-to-noise ratio')
plt.scatter(*beta_pic_b.location.to(u.arcsec), marker='x',color='red' )
plt.figure()
plt.plot(rv_grid,result.reshape(result.shape[0],-1)[:,idx])
plt.axvline(beta_pic_b.radial_velocity.to(u.km/u.s).value, ls='--', color='black', alpha=0.8)
plt.axhline(0, ls='--', color='black', alpha=0.8)
plt.ylabel("Signal-to-noise ratio")
plt.xlabel("Radial velocity [km/s]")
flat_model = convolve_to_resolution(beta_pic_c_spec, spectral_resolution)
continuum = gaussian_filter1d(flat_model, 30)
flat_model = flat_model/continuum
flat_model = InterpolatedSpectrum(flux_density=flat_model, wavelengths=flat_model.wavelengths)
flat_model.initialise_for(beta_pic_c)
rv_grid = np.linspace(-3000,3000,300)
result= molecule_mapping(datacube, wavelengths, N_pca=30, template=flat_model,rv_grid = rv_grid)
rv_idx = np.argmin(np.abs(rv_grid-beta_pic_c.radial_velocity.to(u.km/u.s).value))
plt.imshow(result[rv_idx],extent=[-fov, fov, -fov, fov])
plt.xlabel('X [arcsec]')
plt.ylabel('Y [arcsec]')
offset = 11
idx = [result.shape[1]//2, result.shape[2]//2+offset]
plt.colorbar(label='Signal-to-noise ratio')
plt.scatter(*beta_pic_c.location.to(u.arcsec),marker='x', color='red')
plt.figure()
plt.plot(rv_grid, result[:,idx[0], idx[1]])
plt.axvline(beta_pic_c.radial_velocity.to(u.km/u.s).value, ls='--', color='black', alpha=0.8)
plt.axhline(0, ls='--', color='black', alpha=0.8)
plt.ylabel("Signal-to-noise ratio")
plt.xlabel("Radial velocity [km/s]")
###Output
_____no_output_____ |
Sentiment_Model_using_PyTorch.ipynb | ###Markdown
###Code
!pip install transformers==3
import pandas as pd
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import torch.nn as nn
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from sklearn.utils import class_weight
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,confusion_matrix
df_train = pd.read_csv('train.csv', header = None)
df_train.columns = ['rating', 'review']
df_test_set = pd.read_csv('test.csv', header = None)
df_test_set.columns = ['rating', 'review']
df_train_set, df_valid_set = train_test_split(df_train, test_size = 0.2, random_state = 42)
# sns.countplot(df.rating)
class_name = ['most-negative', 'negative', 'neutral', 'positive', 'most-positive']
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
PRE_TRAINED_MODEL_NAME = 'bert-base-cased'
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
#from tqdm import tqdm
#token_lens = []
#for txt in tqdm(df_train.review):
# tokens = tokenizer.encode(txt, max_length = 512)
# token_lens.append(len(tokens))
#print(max(token_lens))
# sns.distplot(token_lens)
MAX_LEN = 120
df_train
# a Dataset object loads training or test data into memory, and a DataLoader object
# fetches data from a Dataset and serves the data up in batches.
class YelpReviewDataset(Dataset):
def __init__(self, reviews, targets, tokenizer, max_len):
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
review = str(self.reviews[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
# string
review,
# Whether or not to encode the sequences with the special tokens relative to their model.
add_special_tokens = True,
# Controls the maximum length to use by one of the truncation/padding parameters.
max_length = self.max_len,
# These require two different sequences to be joined in a
# single โinput_idsโ entry, which usually is performed with the help of special tokens,
# such as the classifier ([CLS]) and separator ([SEP]) tokens.
return_token_type_ids = False,
pad_to_max_length = True,
return_attention_mask = True,
# If set, will return tensors instead of list of python integers.
# 'pt': Return PyTorch torch.Tensor objects.
return_tensors = 'pt'
)
# A BatchEncoding with the fields:
return {
'review_text' : review,
'input_ids' : encoding['input_ids'].flatten(),
'attention_mask' : encoding['attention_mask'].flatten(),
'targets' : torch.tensor(target, dtype=torch.long)
}
# print(df_train_set.shape)
# print(df_valid_set.shape)
# print(df_test_set.shape)
# Checking the uniform distribution of a dataset
# class_weights = class_weight.compute_class_weight('balanced',
# np.unique(df_train.rating.values),
# df_train.rating.values)
def create_data_loader(df, tokenizer, max_len, batch_size):
reviewDataset = YelpReviewDataset(
# Convert the DataFrame (df-column in this case) to a NumPy array.
reviews = df.review.to_numpy(),
targets = df.rating.to_numpy(),
tokenizer = tokenizer,
max_len = max_len
)
# DataLoader represents a Python iterable over a dataset
return DataLoader(reviewDataset,
batch_size = batch_size,
num_workers = 4)
BATCH_SIZE = 32
train_data_loader = create_data_loader(df_train_set, tokenizer, MAX_LEN, BATCH_SIZE)
valid_data_loader = create_data_loader(df_valid_set, tokenizer, MAX_LEN, BATCH_SIZE)
test_valid_loader = create_data_loader(df_test_set, tokenizer, MAX_LEN, BATCH_SIZE)
# bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.drop = nn.Dropout(p = 0.4)
self.out1 = nn.Linear(self.bert.config.hidden_size, 128)
self.drop1 = nn.Dropout(p = 0.4)
self.relu = nn.ReLU()
self.out = nn.Linear(128, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids = input_ids,
attention_mask = attention_mask
)
output = self.drop(pooled_output)
output = self.out1(output)
output = self.relu(output)
output = self.drop1(output)
return self.out(output)
model = SentimentClassifier(len(class_name)+1)
model = model.to(device)
EPOCHS = 5
optimizer = AdamW(model.parameters(), lr = 2e-5, correct_bias = False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps
)
loss_fn = nn.CrossEntropyLoss().to(device)
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):
# Set the module in training mode
model = model.train()
losses = []
correct_predictions = 0
for data in data_loader:
input_ids = data['input_ids'].to(device)
attention_mask = data['attention_mask'].to(device)
targets = data['targets'].to(device)
# Although the recipe for forward pass needs to be defined within this function, one should call the
#Module instance afterwards instead of this since the former takes care of running the registered hooks
#while the latter silently ignores them.
outputs = model(
input_ids = input_ids,
attention_mask = attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm = 1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(df_train_set)
)
print(f'Train loss {train_loss} accuracy {train_acc}')
val_acc, val_loss = eval_model(
model,
valid_data_loader,
loss_fn,
device,
len(df_valid_set)
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
#plt.plot(history['train_acc'], label='train accuracy')
#plt.plot(history['val_acc'], label='validation accuracy')
#plt.title('Training history')
#plt.ylabel('Accuracy')
#plt.xlabel('Epoch')
#plt.legend()
#plt.ylim([0, 1]);
def get_predictions(model, data_loader):
model = model.eval()
review_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
texts = d["review_text"]
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
review_texts.extend(texts)
predictions.extend(preds)
prediction_probs.extend(outputs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return review_texts, predictions, prediction_probs, real_values
y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(
model,
test_data_loader
)
print(classification_report(y_test, y_pred, target_names=class_name))
review_text = "the food was delicious but it was spicy"
encoded_review = tokenizer.encode_plus(
review_text,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
input_ids = encoded_review['input_ids'].to(device)
attention_mask = encoded_review['attention_mask'].to(device)
output = model(input_ids, attention_mask)
_, prediction = torch.max(output, dim=1)
print(f'Review text: {review_text}')
print(f'Sentiment : {class_name[prediction]}')
###Output
_____no_output_____ |
Mersenne Numbers and Primes.ipynb | ###Markdown
Mersenne Prime (๋ฉ๋ฅด์ผ ์์) ๋ฉ๋ฅด์ผ?- ๋ง๋ฆฐ ๋ฉ๋ฅด์ผ(1588-1648)์ ํ๋์ค ์๋์น์ผ๋ก ํ๋ฅด๋ง, ํ์ค์นผ, ๊ฐ๋ฆด๋ ์ค ๋ฑ๊ณผ ๋์๋๋ฅผ ์ฐ ์ธ๋ฌผ- ๋น์์๋ ์ํ ์ ๋ ๊ฐ์ ๊ฒ์ด ์์ด์ ๋ฉ๋ฅด์ผ์ ์ํ์์์ ๋ฐ๊ฒฌ์ ๋ฐ์ ์ ๋ฆฌํ๊ณ ๋ฐฐํฌํ๋ ์ญํ (clearing house)์ ํ๋ค๊ณ ํจ- ๋ฌผ๋ก ๋ณธ์ธ๋ ์ฒ ํ์์ด๋ฉด์ ์ํ์ ์์๋ฅผ ๊ณต์์ผ๋ก ํํํ๊ณ ์ ํ๋ ๋
ธ๋ ฅ- ๋ฌดํ๋์ ์์๊ฐ ์์์ ์ฆ๋ช
ํ ์ ์๋ ๊ฐ๋จํ ๋ฐฉ๋ฒ- $x^{2} + x + 41$์ด ๋ํ์
###Code
[i for i in range(1, 50) if not sympy.ntheory.isprime(i * i + i + 41)]
###Output
_____no_output_____
###Markdown
1~39๊น์ง๋ ์ฐธ์ด์ง๋ง 40๋ถํฐ ์ฑ๋ฆฝํ์ง ์๋ ๊ฒฐ๊ณผ๊ฐ ๋์ค๊ธฐ ์์ Mersenne numbers and primes- ๋ฉ๋ฅด์ผ๋ ๊ณต์์ ํ๋ ๋ง๋ค์๋ค. $M_{n} = 2^{n} - 1$- ์ด ์๋ฅผ Mersenne numbers๋ผ๊ณ ๋ถ๋ ๊ณ ์ด ์ค ์์์ธ ์ซ์๊ฐ Mersenne prime์ด๋ผ๊ณ ๋ถ๋ฆฐ๋ค- $n$์ด ์์๊ฐ ์๋๋ฉด $M_{n}$๋ ์์๊ฐ ์๋๋ค- $n$์ด ์์๋ฉด?
###Code
primes = [sympy.prime(n) for n in range(1, 9)]
[(n, 2 ** n - 1, sympy.ntheory.isprime(2 ** n - 1)) for n in primes]
###Output
_____no_output_____
###Markdown
- 11์ ์ ์ธํ๊ณ 19๊น์ง ๊ณต์์ด ์ฑ๋ฆฝํ ๊ฑธ ๋ณธ ๋ฉ๋ฅด์ผ์ 19 ์ด์์ ์์์ ๋ํด ์กฐ์ฌ๋ฅผ ํ๊ธฐ ์์ ~~์๋์น์ ์๊ฐ์ด ๋ง์๊ฒ ...~~- $M_{31}$, $M_{67}$, $M_{127}$, $M_{257}$์ด ์์๋ผ๊ณ ๊ฒฐ๋ก
###Code
[(n, 2 ** n - 1, sympy.ntheory.isprime(2 ** n - 1)) for n in [31, 67, 127, 257]]
###Output
_____no_output_____
###Markdown
- ๋ฉ๋ฅด์ผ์ด ์ด๋ป๊ฒ ์ด ์ซ์ ์ฐพ์ ๊ณผ์ ์ ์๋ ค์ง์ง ์์- $M_{31}$๋ง ํด๋ 10์๋ฆฌ ์ซ์๋ก ์์ฃผ ํฐ ์ซ์ ~~๋น์ ๊ธฐ์ ๋ก๋ ...~~ ๊ทธ ํ- 1750๋
์ค์ผ๋ฌ ํ๋์ด $M_{31}$์ ์์๊ฐ ๋ง๋ค๊ณ ์ฆ๋ช
- 1876๋
Edouard Lucas๊ฐ $M_{67}$์ด ์์๊ฐ ์๋๋ผ๊ณ ๋ฐํํ์ง๋ง ์ด๋ค ์๋ก ์์ธ์๋ถํด๋๋์ง๋ ๋ฐํ์ง ๋ชปํจ- ์ดํ Derrick Lehmer๋ผ๋ ์ฌ๋์ด Lucas์ ๋ฐฉ๋ฒ์ ๊ฐ๊ฒฐํ๊ฒ ๊ฐ์
###Code
def lucas(p):
U = 4
Mp = 2 ** p - 1
for _ in range(p - 2):
U = (U * U - 2) % Mp
return U == 0
[lucas(p) for p in (31, 67, 127, 257)]
###Output
_____no_output_____
###Markdown
- ๋๋ค! ๊ทผ๋ฐ ์ ๋๋๊ฑฐ์ง? ~~์ ๋ ๋ชฐ๋ผ์~~- ๊ทธ ๋ค๋ก๋ $M_{61}$, $M_{89}$, $M_{107}$์ด ๋ฉ๋ฅด์ผ ์์์์ด ๋ฐํ์ง- ๊ฒฐ๋ก ์ ์ผ๋ก ๋ฉ๋ฅด์ผ ํ๋์ ์ฐ๊ธฐ๋ ๋ฐํ์: $M_{31}$(O), $M_{67}$(X), $M_{127}$(O), $M_{257}$(X) ~~๋ง์ ์ํ์๋ค์ ๋์ ์ด๊ฑธ ์ฆ๋ช
ํ๊ฒ ๋ง๋ ๊ฒ์ ํฐ ๊ทธ๋ฆผ~~ $M_{67}$์ ์์ธ์ ๋ถํด- Lucas์ ๋ฐฉ๋ฒ์ผ๋ก $M_{67}$์ด ์์๊ฐ ์๋ ๊ฑด ์์์ง๋ง ์ด ์๋ ์ด๋ค ์๋ค์ ๊ณฑ์ธ์ง๋ ๋ฐํ์ง์ง ์์ ์ํฉ- 1903๋
, ์ํ์ Frank Nelson Cole(1861-1926)์ American Mathematical Society์์ 'On the Factorisation of Large Numbers'๋ผ๋ ๊ฐ์๋ฅผ ํ๊ฒ ๋๋๋ฐ...- ์๋ฌด๋ง ํ์ง ์๊ณ $2^{67}$์ ๊ณ์ฐํ๊ธฐ ์์, ๊ทธ๋ฆฌ๊ณ 1์ ์ ์คํ๊ฒ(carefully) ๋บ๋ค- ์ด ์ซ์๋ ๋ฌด๋ ค
###Code
m67 = 2**67 - 1
f'{m67:_}'
###Output
_____no_output_____
###Markdown
- ๊ทธ๋ฆฌ๊ณ ๋ค๋ฅธ ์น ํ์ 193,707,721์ 761,838,257,287๋ฅผ ์ฐ๊ณ ๊ณฑํ๊ธฐ ์์
###Code
193_707_721 * 761_838_257_287
###Output
_____no_output_____
###Markdown
- ์๋ฌด ๋ง ์์ด ๊ณ์ฐ์ ๋๋๊ณ , Cole์ ๊ธฐ๋ฆฝ ๋ฐ์๋ฅผ ๋ฐ์- Cole์ด ํ์ ๋ฐํ๊ธธ ์ด๊ฑธ ์ฐพ์๋ด๋๋ฐ ๋งค์ฃผ ์ผ์์ผ์ ํฌ์ํ๊ณ 3๋
์ด ๊ฑธ๋ ธ๋ค๊ณ ํจ
###Code
sympy.ntheory.isprime(2**67 - 1)
%time
print(sympy.factorint(2**67 - 1))
###Output
Wall time: 0 ns
{193707721: 1, 761838257287: 1}
|
online-retail-k-means-Tableau.ipynb | ###Markdown
CAPSTONE PROJECT:3 - RETAIL Importing Neccessary Python Packages and Libraries.
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import datetime as dt
# importing required libraries for clustering
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
###Output
_____no_output_____
###Markdown
Reading and Understanding Data
###Code
x={1,2,3,4,4}
x.update([4,5,6,7])
print(x)
print(bool('False'))
print(bool)
i=0
while i<3:
print(i)
i+=1
else:
print(0)
# Reading the data from excel to pandas dataframe on which analysis needs to be done.
retail = pd.read_excel('Online_Retail.xlsx')
retail.head()
###Output
_____no_output_____
###Markdown
Performing a preliminary data inspection and data cleaning.
###Code
# shape of df
retail.shape #showing no. of rows and columns in the dataset.
# df info
retail.info() #showing no. of rows with non null entries and datatype of each variable with name.
# df description
retail.describe() #This gives statistical summary of numerical variable by default.
###Output
_____no_output_____
###Markdown
Describe function has revealed strange insight with negative values in Quantity variable and UnitPrice variable, which needs further investigation.
###Code
retail[retail.Quantity<0],retail[retail.Quantity>0]
###Output
_____no_output_____
###Markdown
Filtering and comparing data for both negative and non negative values of Quantity, and correlating with information provided we can infer that negative value in Quantity is associated with cancelled order.
###Code
retail[retail.UnitPrice<0]
###Output
_____no_output_____
###Markdown
From above filtering and description it's not clear as to what is 'adjustment of bad debt' and needs consultation from client or domain expertise for it's treatment. Checking for missing data and formulating an apt strategy to treat them.
###Code
# Calculating the Missing Values % contribution in DF
df_null = round(100*(retail.isnull().sum())/len(retail), 5)
df_null
# Droping rows having missing values
retail = retail.dropna()
retail.shape
###Output
_____no_output_____
###Markdown
Dealing with duplicate records.
###Code
retail[retail.duplicated()].shape
retail.loc[retail.duplicated(keep=False)]
retail[['StockCode','Description']][retail[['StockCode','Description']].duplicated(keep=False)]
# Dropping duplicate records.
retail.drop_duplicates(inplace= True)
retail.shape
###Output
_____no_output_____
###Markdown
Descriptive analysis on dataset.
###Code
retail.describe() #This is giving the statistical summary such as count, mean, min, max and quartiles values.
retail[retail.Quantity<0].shape # This shows that 8872 no. of products has been returned or cancelled.
retail.CustomerID.nunique() #This shows the total no. of unique customers who are contributing to overall sales.
retail.Country.unique(),retail.Country.nunique() #This shows that there are 37 countries with their names contributing to sales.
retail.Country.value_counts() # This shows the total no. of product bought for each country for that period, with UK being the highest contributor in sales.
retail.Country.value_counts().plot(kind= 'bar')
retail.InvoiceNo.nunique() # this shows the total no. of unique transactions for the given period.
len(retail.InvoiceNo.value_counts())# this shows the total no. of unique transactions for the given period.
len(retail.Description.unique()),retail.Description.nunique() # This shows the no. of distinct products bought by the customer during the given period.
sns.boxplot(retail.UnitPrice)
retail.UnitPrice.quantile([0,.25,.50,.75,.8,.9,.99,1])#This shows the relative prices of products where price of maximum products lie below 15 sterling
#per unit, with max price being 38970.
retail.UnitPrice[retail.UnitPrice<25].plot(kind='hist')
retail.Description.mode() #Highest selling product.
retail.Description.value_counts().head(25) # This is a list of top 25 selling products.
###Output
_____no_output_____
###Markdown
Cohort analysis : As per requirement we will build a cohort on the basis of the 1st invoice date of available data in a month and then check for retention metric. Cohort size will be 1 month and we will consider the whole data as range under analysis.
###Code
retail.info()
#Converting Customer ID to object datatype.
retail1=retail.copy()
retail1['CustomerID']=retail1['CustomerID'].astype(str)
retail1.info()
# for this analysis we are going to drop negative Quantity orders if not done so it would be calculated in ordered more than once.
retail1.drop(retail1[retail1.Quantity<1].index, inplace = True)
retail1[retail1.Quantity<1]
n_orders = retail1.groupby(['CustomerID'])['InvoiceNo'].nunique()
mult_orders_perc = np.sum(n_orders > 1) / retail1['CustomerID'].nunique()
print(f'{100 * mult_orders_perc:.2f}% of customers ordered more than once.')
# This show that 65% out of all data provided to us in this given period ordered more than once.
# We can have a look at the distribution of the number of orders per customer.
ax = sns.distplot(n_orders, kde=False, hist=True)
ax.set(title='Distribution of number of orders per customer',
xlabel='no. of orders',
ylabel='no. of customers');
###Output
_____no_output_____
###Markdown
We keep only the relevant columns and drop duplicated values โ one order (indicated by InvoiceNo) can contain multiple items (indicated by StockCode) for analysis
###Code
retail1 = retail1[['CustomerID', 'InvoiceNo', 'InvoiceDate']].drop_duplicates()
retail1
###Output
_____no_output_____
###Markdown
Creating the cohort and order_month variables. The first one indicates the monthly cohort based on the first purchase date (calculated per customer).The latter one is the truncated month of the purchase date.
###Code
retail1['order_month'] = retail1['InvoiceDate'].dt.to_period('M')
retail1['cohort'] = retail1.groupby('CustomerID')['InvoiceDate'] \
.transform('min') \
.dt.to_period('M')
retail1.head(25)
###Output
_____no_output_____
###Markdown
Aggregating the data per cohort and order_month and count the number of unique customers in each group. Additionally, we add the period_number, which indicates the number of periods between the cohort month and the month of the purchase.
###Code
df_cohort = retail1.groupby(['cohort', 'order_month']) \
.agg(n_customers=('CustomerID', 'nunique')) \
.reset_index(drop=False)
from operator import attrgetter
df_cohort['period_number'] = (df_cohort.order_month - df_cohort.cohort).apply(attrgetter('n'))
df_cohort.head(14)
###Output
_____no_output_____
###Markdown
Now we will pivot the df_cohort table in a way that each row contains information about a given cohort and each column contains values for certain period.
###Code
cohort_pivot = df_cohort.pivot_table(index = 'cohort',
columns = 'period_number',
values = 'n_customers')
###Output
_____no_output_____
###Markdown
To obtain the retention matrix, we need to divide the values each row by the row's first value, which is actually the cohort size โ all customers who made their first purchase in the given month.
###Code
cohort_size = cohort_pivot.iloc[:,0]
retention_matrix = cohort_pivot.divide(cohort_size, axis = 0)
###Output
_____no_output_____
###Markdown
We plot the retention matrix as a heatmap. Additionally, we wanted to include extra information regarding the cohort size. That is why we in fact created two heatmaps, where the one indicating the cohort size is using a white only colormap โ no coloring at all.
###Code
import matplotlib.colors as mcolors
with sns.axes_style("white"):
fig, ax = plt.subplots(1, 2, figsize=(12, 8), sharey=True, gridspec_kw={'width_ratios': [1, 11]})
# retention matrix
sns.heatmap(retention_matrix,
mask=retention_matrix.isnull(),
annot=True,
fmt='.0%',
cmap='RdYlGn',
ax=ax[1])
ax[1].set_title('Monthly Cohorts: User Retention', fontsize=16)
ax[1].set(xlabel='no. of periods',
ylabel='')
# cohort size
cohort_size_df = pd.DataFrame(cohort_size).rename(columns={0: 'cohort_size'})
white_cmap = mcolors.ListedColormap(['white'])
sns.heatmap(cohort_size_df,
annot=True,
cbar=False,
fmt='g',
cmap=white_cmap,
ax=ax[0])
fig.tight_layout()
###Output
_____no_output_____
###Markdown
After going through the heatmap it can be inferred that the Cohort in 2010-12 had maximum retention with 27% for the 12th period and an average of around 34% throughout the year. Whereas there is sharp decrease in retention rate for the 2nd group at the end period, average retention rate is around 26% throughout the period. Horizontally it shows gradual increase except for final sharp decline. All in all we can say- the cohort in proceeding months are observed to show a gradual decline in retention rate. But the average retention rate for the end period for all cohort is seen to be around 10%, except for the 1st cohort. Data Preparation for RFM analysis. We are going to analyse the Customers based on below 3 factors:- R (Recency): Number of days since last purchase- F (Frequency): Number of transactions- M (Monetary): Total amount of transactions (revenue contributed)
###Code
# New Attribute : Monetary
retail['Amount'] = retail['Quantity']*retail['UnitPrice']
rfm_m = retail.groupby('CustomerID')['Amount'].sum()
rfm_m = rfm_m.reset_index()
rfm_m.shape
rfm_m.head()
# New Attribute : Frequency
rfm_f = retail.groupby('CustomerID')['InvoiceNo'].count()
rfm_f = rfm_f.reset_index()
rfm_f.columns = ['CustomerID', 'Frequency']
rfm_f.head()
# Merging the two dfs
rfm = pd.merge(rfm_m, rfm_f, on='CustomerID', how='inner')
rfm.head()
# New Attribute : Recency
# Convert 'InvoiceDate' to datetime datatype.
retail['InvoiceDate'] = pd.to_datetime(retail['InvoiceDate'],format='%d-%m-%Y %H:%M')
retail.info()
# Compute the maximum date to know the last transaction date
max_date = max(retail['InvoiceDate'])
max_date
# Compute the difference between max date and transaction date
retail['Diff'] = max_date - retail['InvoiceDate']
retail
# Compute last transaction date to get the recency of customers
rfm_p = retail.groupby('CustomerID')['Diff'].min()
rfm_p = rfm_p.reset_index()
rfm_p.head()
# Extract number of days only
rfm_p['Diff'] = rfm_p['Diff'].dt.days
rfm_p.head()
# Merging tha dataframes to get the final RFM dataframe
rfm = pd.merge(rfm, rfm_p, on='CustomerID', how='inner')
rfm.columns = ['CustomerID', 'Amount', 'Frequency', 'Recency']
rfm.head()
x=[]
for i in rfm.Amount:
if i<=rfm.Amount.quantile(.25):
x.append(1)
elif i<=rfm.Amount.quantile(0.50):
x.append(2)
elif i<=rfm.Amount.quantile(0.75):
x.append(3)
elif i<=rfm.Amount.quantile(1.0):
x.append(4)
rfm["Amount_star"]=x
rfm
y=[]
for i in rfm.Frequency:
if i<=rfm.Frequency.quantile(.25):
y.append(1)
elif i<=rfm.Frequency.quantile(0.50):
y.append(2)
elif i<=rfm.Frequency.quantile(0.75):
y.append(3)
elif i<=rfm.Frequency.quantile(1.0):
y.append(4)
rfm["Frequency_star"]=y
rfm
z=[]
for i in rfm.Recency:
if i<=rfm.Recency.quantile(.25):
z.append(4)
elif i<=rfm.Recency.quantile(0.50):
z.append(3)
elif i<=rfm.Recency.quantile(0.75):
z.append(2)
elif i<=rfm.Recency.quantile(1.0):
z.append(1)
rfm['Recency_star']=z
rfm
rfm['rfm_score']=rfm.Amount_star+rfm.Frequency_star+rfm.Recency_star
rfm
rfm['rfm_score%']=round(((rfm.rfm_score/12)*100),2) #this I have created to have better clarity while analysing.
rfm
rfm['rfm_segment']=rfm.Amount_star.astype(str) + "-" + rfm.Frequency_star.astype(str) + "-" + rfm.Recency_star.astype(str)
rfm
rfm['rfm_segment1']=rfm.Amount_star.astype(str) + rfm.Frequency_star.astype(str) + rfm.Recency_star.astype(str)
rfm
rfm.rfm_segment.value_counts().head(60) # this shows all the unique combination of star ratings.
#((rfm.Amount_star== 4).sum()/retail.CustomerID.nunique())*100
###Output
_____no_output_____
###Markdown
Inferences from RFM segmentation:->From above rfm_segment we can easily filter and make inferences that any value starting with 4 or 3 (or say at hundreds place) will be a high revenue generating customer, thus they should be made to feel valued customer.->Any customer with rfm_score% of 75 or higher can be considered a high value customer even if they have not made more purchases because they can be approached to gain insight as to what offer lures them to buy more, and thus creating proper strategy to target those group to give offers or discounts.-> Best Customers โ This group consists of those customers who are found in M-4, F-4, and R-4 meaning that they transacted recently, do so often and spend more than other customers. A shortened notation for this segment is 4-4-4;->High-spending New Customers โ This group consists of those customers in 4-1-1 and 4-2-1. These are customers who transacted only once, but very recently and they spent a lot.->Lowest-Spending Active Loyal Customers โ This group consists of those customers in segments 1-1-3 and 1-1-4 (they transacted recently and do so often, but spend the least).->Churned Best Customers โ This segment consists of those customers in groups 4-1-1, 4-1-2, 4-2-1 and 4-2-2 (they transacted frequently and spent a lot, but itโs been a long time since theyโve transacted)->Best Customers โ Communications with this group should make them feel valued and appreciated. These customers likely generate a disproportionately high percentage of overall revenues and thus focusing on keeping them happy should be a top priority. Further analyzing their individual preferences and affinities will provide additional information.->High-spending New Customers โ It is always a good idea to carefully incubate all new customers, but because these new customers spent a lot on their first purchase, itโs even more important. Like with the Best Customers group, itโs important to make them feel valued and appreciated โ and to give them terrific incentives to continue interacting with the client.->Lowest-Spending Active Loyal Customers โ These repeat customers are active and loyal, but they are low spenders. Marketers should create campaigns for this group that make them feel valued, and incentivize them to increase their spend levels. As loyal customers, it often also pays to reward them with special offers if they spread the word about the brand to their friends, e.g., via social networks.->Churned Best Customers โ These are valuable customers who stopped transacting a long time ago. While itโs often challenging to re-engage churned customers, the high value of these customers makes it worthwhile trying. Like with the Best Customers group, itโs important to communicate with them on the basis of their specific preferences, as known from earlier transaction data. Data Preparation for Algorithm. There are 2 types of outliers and we will treat outliers as it can skew our dataset.- Statistical- Domain specific
###Code
# Outlier Analysis of Amount Frequency and Recency
attributes = ['Amount','Frequency','Recency']
plt.rcParams['figure.figsize'] = [10,8]
sns.boxplot(data = rfm[attributes], orient="v", palette="Set2" ,whis=1.5,saturation=1, width=0.7)
plt.title("Outliers Variable Distribution", fontsize = 14, fontweight = 'bold')
plt.ylabel("Range", fontweight = 'bold')
plt.xlabel("Attributes", fontweight = 'bold')
# Removing (statistical) outliers for Amount
Q1 = rfm.Amount.quantile(0.05)
Q3 = rfm.Amount.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Amount >= Q1 - 1.5*IQR) & (rfm.Amount <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Recency
Q1 = rfm.Recency.quantile(0.05)
Q3 = rfm.Recency.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Recency >= Q1 - 1.5*IQR) & (rfm.Recency <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Frequency
Q1 = rfm.Frequency.quantile(0.05)
Q3 = rfm.Frequency.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Frequency >= Q1 - 1.5*IQR) & (rfm.Frequency <= Q3 + 1.5*IQR)]
###Output
_____no_output_____
###Markdown
Standardizing the data.It is extremely important to rescale the variables so that they have a comparable scale.|There are two common ways of rescaling:1. Min-Max scaling 2. Standardisation (mean-0, sigma-1) Here, we will use Standardisation Scaling.
###Code
# Rescaling the attributes
rfm_df = rfm[['Amount', 'Frequency', 'Recency']]
# Instantiate
scaler = StandardScaler()
# fit_transform
rfm_df_scaled = scaler.fit_transform(rfm_df)
rfm_df_scaled.shape
rfm_df_scaled = pd.DataFrame(rfm_df_scaled)
rfm_df_scaled.columns = ['Amount', 'Frequency', 'Recency']
rfm_df_scaled.head()
###Output
_____no_output_____
###Markdown
Building the Model K-Means Clustering K-means clustering is one of the simplest and popular unsupervised machine learning algorithms.The algorithm works as follows:- First we initialize k points, called means, randomly.- We categorize each item to its closest mean and we update the meanโs coordinates, which are the averages of the items categorized in that mean so far.- We repeat the process for a given number of iterations and at the end, we have our clusters.
###Code
# k-means with some arbitrary k
kmeans = KMeans(n_clusters=3, max_iter=50)
kmeans.fit(rfm_df_scaled)
rfm_df_scaled
kmeans.labels_
###Output
_____no_output_____
###Markdown
Deciding the optimum number of clusters to be formed. Using Elbow Curve to get the right number of Clusters.A fundamental step for any unsupervised algorithm is to determine the optimal number of clusters into which the data may be clustered. The Elbow Method is one of the most popular methods to determine this optimal value of k.
###Code
# Elbow-curve/SSD
ssd = []
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
kmeans = KMeans(n_clusters=num_clusters, max_iter=50)
kmeans.fit(rfm_df_scaled)
ssd.append(kmeans.inertia_)
# plot the SSDs for each n_clusters
plt.plot(range_n_clusters,ssd)
###Output
_____no_output_____
###Markdown
Exporting dataframe to excel for data visualization in Tableau
###Code
import openpyxl
import xlsxwriter
error_cluster= pd.DataFrame(list(zip(range_n_clusters,ssd)), columns=["no.of clusters","Error"])
error_cluster.to_excel("error_cluster.xlsx")
###Output
_____no_output_____
###Markdown
Silhouette Analysis$$\text{silhouette score}=\frac{p-q}{max(p,q)}$$$p$ is the mean distance to the points in the nearest cluster that the data point is not a part of$q$ is the mean intra-cluster distance to all the points in its own cluster.* The value of the silhouette score range lies between -1 to 1. * A score closer to 1 indicates that the data point is very similar to other data points in the cluster, * A score closer to -1 indicates that the data point is not similar to the data points in its cluster.
###Code
# Silhouette analysis
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
# intialise kmeans
kmeans = KMeans(n_clusters=num_clusters, max_iter=50)
kmeans.fit(rfm_df_scaled)
cluster_labels = kmeans.labels_
# silhouette score
silhouette_avg = silhouette_score(rfm_df_scaled, cluster_labels)
print("For n_clusters={0}, the silhouette score is {1}".format(num_clusters, silhouette_avg))
# Final model with k=3
kmeans = KMeans(n_clusters=3, max_iter=50)
kmeans.fit(rfm_df_scaled)
kmeans.labels_
# assign the label
rfm['Cluster_Id'] = kmeans.labels_
rfm.head(60)
# Box plot to visualize Cluster Id vs Amount
sns.boxplot(x='Cluster_Id', y='Amount', data=rfm)
# Box plot to visualize Cluster Id vs Frequency
sns.boxplot(x='Cluster_Id', y='Frequency', data=rfm)
# Box plot to visualize Cluster Id vs Recency
sns.boxplot(x='Cluster_Id', y='Recency', data=rfm)
###Output
_____no_output_____
###Markdown
Final Analysis Inferences:K-Means Clustering with 3 Cluster Ids- Customers with Cluster Id 1 are frequent buyers, recent buyers and high spender as well, should be considered as the best customer group.- Customers with Cluster Id 0 are the customers with comparatively good monetary value, frequency and receny for all transactions under observation. Thus these customer group can be a target for conversion into high spending cohort.- Customers with Cluster Id 2 are not recent buyers, frequency is also low and hence least of importance from business point of view.
###Code
rfm
###Output
_____no_output_____
###Markdown
Exporting files from dataframe to excel for visualization and dashboard building in Tableau.
###Code
import openpyxl
import xlsxwriter
rfm.to_excel("rfm.xlsx")
###Output
_____no_output_____ |
examples/Practical_tutorial/ELS_practical.ipynb | ###Markdown
The ELS matching procedureThis practical is based on the concepts introduced for optimising electrical contacts in photovoltaic cells. The procedure was published in [J. Mater. Chem. C (2016)]((http://pubs.rsc.org/en/content/articlehtml/2016/tc/c5tc04091d).In this practical we screen electrical contact materials for CH$_3$NH$_3$PbI$_3$. There are three main steps:* Electronic matching of band energies* Lattice matching of surface vectors * Site matching of under-coordinated surface atoms 1. Electronic matching BackgroundEffective charge extraction requires a low barrier to electron or hole transport accross an the interface. This barrier is exponential in the discontinuity of the band energies across the interface. To a first approximation the offset or discontinuity can be estimated by comparing the ionisation potentials (IPs) or electron affinities (EAs) of the two materials, this is known as [Anderson's rule](https://en.wikipedia.org/wiki/Anderson%27s_rule).Here we have collected a database of 173 measured or estimated semiconductor IPs and EAs (`CollatedData.txt`). We use it as the first step in our screening. The screening is performed by the script `scan_energies.py`. We enforce several criteria:* The IP and EA of the target material are supplied using the flags `-i` and `-e`* The IP/EA must be within a certain range from the target material; by default this is set to 0.5 eV, but it can be contolled by the flag `-w`. The window is the full width so the max offset is 0.5*window* A selective contact should be a semiconductor, so we apply a criterion based on its band gap. If the gap is too large we consider that it would be an insulator. By default this is set to 4.0 eV and is controlled by the flag `-g`
###Code
%%bash
cd Electronic/
python scan_energies.py -h
###Output
_____no_output_____
###Markdown
Now let's do a proper scan* IP = 5.7 eV* EA = 4.0 eV* Window = 0.25 eV* Insulating threshold = 4.0 eV
###Code
%%bash
cd Electronic/
python scan_energies.py -i 5.7 -e 4.0 -w 0.5 -g 4.0
###Output
_____no_output_____
###Markdown
2. Lattice matching BackgroundFor stable interfaces there should be an integer relation between the lattice constants of the two surfaces in contact, which allows for perfect matching, with minimal strain. Generally a strain value of ~ 3% is considered acceptable, above this the interface will be incoherent.This section uses the [ASE package](https://wiki.fysik.dtu.dk/ase/) to construct the low index surfaces of the materials identified in the electronic step, as well as those of the target material. The code `LatticeMatch.py` to identify optimal matches.First we need `.cif` files of the materials obtained from the electronic matching. These are obtained from the [Materials Project website](https://www.materialsproject.org). Most of the `.cif` files are there already, but we should add Cu$_2$O and GaN, just for practice. Lattice matching routineThe lattice matching routine involves obtaining reduced cells for each surface and looking for multiples of each side which match. The procedure is described in more detail in [our paper](http://pubs.rsc.org/en/content/articlehtml/2016/tc/c5tc04091d).The actual clever stuff of the algorithm comes from a paper from Zur and McGill in [J. Appl. Physics (1984)](http://scitation.aip.org/content/aip/journal/jap/55/2/10.1063/1.333084). The scriptThe work is done by a python script called `LatticeMatch.py`. As input it reads `.cif` files. It takes a number of flags: * `-a` the file containing the crystallographic information of the first material* `-b` the file containing the crystallographic information of the second material * `-s` the strain threshold above which to cutoff, defaults to 0.05* `-l` the maximum number of times to expand either surface to find matching conditions, defaults to 5We will run the script in a bash loop to iterate over all interfaces of our contact materials with the (100) and (110) surfaces of pseudo-cubic CH$_3$NH$_3$PbI$_3$. Note that I have made all lattice parameters of CH$_3$NH$_3$PbI$_3$ exactly equal, this is to facilitate the removal of duplicate surfaces by the script.
###Code
%%bash
cd Lattice/
for file in *.cif; do python LatticeMatch.py -a MAPI/CH3NH3PbI3.cif -b $file -s 0.03; done
###Output
_____no_output_____
###Markdown
3. Site matchingSo far the interface matching considered only the magnitude of the lattice vectors. It would be nice to be able to include some measure of how well the dangling bonds can passivate one another. We do this by calculating the site overlap. Basically, we determine the undercoordinated surface atoms on each side and project their positions into a 2D plane. We then lay the planes over each other and slide them around until there is the maximum coincidence. We calculate the overlap factor from $$ ASO = \frac{2S_C}{S_A + S_B}$$where $S_C$ is the number of overlapping sites in the interface, and $S_A$ and $S_B$ are the number of sites in each surface. The scriptThis section can be run in a stand-alone script called `csl.py`. It relies on a library of the 2D projections of lattice sites from different surfaces, which is called `surface_points.py`. Currently this contains a number of common materials types, but sometimes must be expanded as new materials are identified from the electronic and lattice steps.`csl.py` takes the following input parameters:* `-a` The first material to consider* `-b` The second material to consider* `-x` The first materials miller index to consider, format : 001* `-y` The second materials miller index to consider, format : 001* `-u` The first materials multiplicity, format : 2,2* `-v` The second materials multiplicity, format : 2,2We can run it for one example from the previous step, let's say GaN (010)x(2,5) with CH$_3$NH$_3$PbI$_3$ (110)x(1,3)
###Code
%%bash
cd Site/
python csl.py -a CH3NH3PbI3 -b GaN -x 110 -y 010 -u 1,3 -v 2,5
###Output
_____no_output_____
###Markdown
All togetherThe lattice and site examples above give a a feel for what is going on. For a proper screening procedure it would be nice to be able to run them together. That's exactly what happens with the `LatticeSite.py` script. It uses a new class `Pair` to store and pass information about the interface pairings. This includes the materials names, miller indices of matching surfaces, strians, multiplicities etc.The `LatticeSite.py` script takes the same variables as `LatticeMatch.py`. It just takes a little longer to run, so a bit of patience is required.This script outputs the standard pair information as well as the site matching factor, which is calculated as$$ \frac{100\times ASO}{1 + |\epsilon|}$$where the $ASO$ was defined above, and $\epsilon$ in the average of the $u$ and $v$ strains. The number is a measure of the mechanical stability of an interface. A perfect interface of a material with itself would have a fator of 100.Where lattices match but no information on the structure of the surface exists it is flagged up. You can always add new surfaces as required.
###Code
%%bash
cd Site/
for file in *cif; do python LatticeSite.py -a MAPI/CH3NH3PbI3.cif -b $file -s 0.03; done
###Output
_____no_output_____ |
notebooks/figure_1/figure_1.ipynb | ###Markdown
Read and process image
###Code
image = plt.imread('./c0911b81ee5266fa.jpg')
image = np.sum(image, axis=2)
image = image[:np.min(image.shape),:np.min(image.shape)]
image = image.astype('float')
image = image[180:-181,180:-181 ]
image = image - image.mean()
image = image - image.min()
image = image/image.max()
image.shape
###Output
_____no_output_____
###Markdown
Show image and its FT power spectrum
###Code
r = np.arange(180).reshape(180,1)
radial_masks = np.apply_along_axis(radial_mask, 1, r, int(image.shape[0]/2), int(image.shape[0]/2), \
np.arange(0, image.shape[0]), np.arange(0, image.shape[0]), 1 )
radial_masks.shape
ft_2d = np.fft.fft2(image)
ft_2d = np.fft.fftshift(ft_2d)
ft_2d = np.abs(ft_2d)
ft_2d[radial_masks[170]] = 0
ft_2d[radial_masks[85]] = 0
ft_2d[radial_masks[42]] = 0
fig = plt.figure(figsize=(2*1.75, 1.7))
#subplot 1
ax1 = plt.Axes(fig, [0.03, 0.02, 0.38, 0.95])
fig.add_axes(ax1)
ax1.imshow(image, cmap='gray')
plt.xticks([])
plt.yticks([])
ax1.set_title('A', size=8)
#subplot 2
ax2 = plt.Axes(fig, [0.44, 0.0, 0.415, 0.99])
fig.add_axes(ax2)
im = ax2.imshow(np.log2(ft_2d), cmap='twilight_shifted')
plt.xticks([])
plt.yticks([])
ax2.set_title('B')
divider = make_axes_locatable(ax2)
cax = divider.new_horizontal(size="5%", pad=0.05)
fig = ax2.get_figure()
fig.add_axes(cax)
cbar = plt.colorbar(im ,cax=cax)
cbar.set_label(r'$Log_2$ magnitude', rotation=270,labelpad=10)
fig.savefig('figure_1.png', dpi=300)
###Output
/Users/rzepiela/anaconda/envs/py37_tf2/lib/python3.7/site-packages/ipykernel_launcher.py:15: RuntimeWarning: divide by zero encountered in log2
from ipykernel import kernelapp as app
|
examples/reference/elements/bokeh/Bars.ipynb | ###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib Plotly
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
A ``Bars`` element can be sliced and selecting on like any other element:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
It is possible to define an explicit ordering for a set of Bars by explicit declaring `Dimension.values` either in the Dimension constructor or using the `.redim.values()` approach:
###Code
occupants = hv.Dimension('Car occupants', values=['three', 'two', 'four', 'one', 'five', 'six'])
hv.Bars(data, occupants, 'Count') # or using .redim.values(**{'Car Occupants': ['three', 'two', 'four', 'one', 'five', 'six']})
###Output
_____no_output_____
###Markdown
``Bars`` support nested categorical groupings, e.g. here we will create a random sample of pets sub-divided by male and female:
###Code
N = 100
pets = ['Cat', 'Dog', 'Hamster', 'Rabbit']
genders = ['Female', 'Male']
pets_sample = np.random.choice(pets, N)
gender_sample = np.random.choice(genders, N)
bars = hv.Bars((pets_sample, gender_sample, np.ones(N)), ['Pets', 'Gender']).aggregate(function=np.sum)
bars.opts(width=500)
###Output
_____no_output_____
###Markdown
Just as before we can provide an explicit ordering by declaring the `Dimension.values`. Alternatively we can also make use of the `.sort` method, internally `Bars` will use topological sorting to ensure consistent ordering.
###Code
bars.redim.values(Pets=pets, Gender=genders) + bars.sort()
###Output
_____no_output_____
###Markdown
To drop the second level of tick labels we can set `multi_level=False`, which will indicate the groupings using a legend instead:
###Code
bars.sort() + bars.clone().opts(multi_level=False)
###Output
_____no_output_____
###Markdown
Lastly, Bars can be also be stacked by setting `stacked=True`:
###Code
bars.opts(stacked=True)
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support nested categorical grouping as well as stacking if more than one key dimension is defined, to switch between the two set ``stacked=True/False``:
###Code
%%opts Bars.Stacked [stacked=True]
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
['Index', 'Group'], 'Count')
bars.relabel(group='Grouped') + bars.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be stacked by supplying a second key dimensions representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions and one value dimension. See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, kdims=[hv.Dimension('Car occupants')], vdims=['Count'])
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support stacking just like the ``Area`` element as well as grouping by a second key dimension. To activate grouping and stacking set the ``group_index`` or ``stack_index`` to the dimension name or dimension index:
###Code
%%opts Bars.Grouped [group_index='Group'] Bars.Stacked [stack_index='Group']
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
kdims=['Index', 'Group'], vdims=['Count'])
bars.relabel(group='Grouped') + bars.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support nested categorical grouping as well as stacking if more than one key dimension is defined, to switch between the two set ``stacked=True/False``:
###Code
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
['Index', 'Group'], 'Count')
stacked = bars.opts(stacked=True, clone=True)
bars.relabel(group='Grouped') + stacked.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support nested categorical grouping as well as stacking if more than one key dimension is defined, to switch between the two set ``stacked=True/False``:
###Code
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
['Index', 'Group'], 'Count')
stacked = bars.opts(stacked=True, clone=True)
bars.relabel(group='Grouped') + stacked.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
**Title**: Bars Element**Dependencies**: Bokeh**Backends**: [Bokeh](./Bars.ipynb), [Matplotlib](../matplotlib/Bars.ipynb), [Plotly](../plotly/Bars.ipynb)
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
A ``Bars`` element can be sliced and selecting on like any other element:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
It is possible to define an explicit ordering for a set of Bars by explicit declaring `Dimension.values` either in the Dimension constructor or using the `.redim.values()` approach:
###Code
occupants = hv.Dimension('Car occupants', values=['three', 'two', 'four', 'one', 'five', 'six'])
# or using .redim.values(**{'Car Occupants': ['three', 'two', 'four', 'one', 'five', 'six']})
hv.Bars(data, occupants, 'Count')
###Output
_____no_output_____
###Markdown
``Bars`` support nested categorical groupings, e.g. here we will create a random sample of pets sub-divided by male and female:
###Code
samples = 100
pets = ['Cat', 'Dog', 'Hamster', 'Rabbit']
genders = ['Female', 'Male']
pets_sample = np.random.choice(pets, samples)
gender_sample = np.random.choice(genders, samples)
bars = hv.Bars((pets_sample, gender_sample, np.ones(samples)), ['Pets', 'Gender']).aggregate(function=np.sum)
bars.opts(width=500)
###Output
_____no_output_____
###Markdown
Just as before we can provide an explicit ordering by declaring the `Dimension.values`. Alternatively we can also make use of the `.sort` method, internally `Bars` will use topological sorting to ensure consistent ordering.
###Code
bars.redim.values(Pets=pets, Gender=genders) + bars.sort()
###Output
_____no_output_____
###Markdown
To drop the second level of tick labels we can set `multi_level=False`, which will indicate the groupings using a legend instead:
###Code
bars.sort() + bars.clone().opts(multi_level=False)
###Output
_____no_output_____
###Markdown
Lastly, Bars can be also be stacked by setting `stacked=True`:
###Code
bars.opts(stacked=True)
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be stacked by supplying a second key dimensions representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions and one value dimension. See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support stacking just like the ``Area`` element as well as grouping by a second key dimension. To activate grouping and stacking set the ``group_index`` or ``stack_index`` to the dimension name or dimension index:
###Code
%%opts Bars.Grouped [group_index='Group'] Bars.Stacked [stack_index='Group']
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
['Index', 'Group'], 'Count')
bars.relabel(group='Grouped') + bars.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
Title Bars Element Dependencies Bokeh Backends Bokeh Matplotlib
###Code
import numpy as np
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be stacked by supplying a second key dimensions representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions and one value dimension. See the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
You can 'slice' a ``Bars`` element by selecting categories as follows:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
``Bars`` support stacking just like the ``Area`` element as well as grouping by a second key dimension. To activate grouping and stacking set the ``group_index`` or ``stack_index`` to the dimension name or dimension index:
###Code
%%opts Bars.Grouped [group_index='Group'] Bars.Stacked [stack_index='Group']
from itertools import product
np.random.seed(3)
index, groups = ['A', 'B'], ['a', 'b']
keys = product(index, groups)
bars = hv.Bars([k+(np.random.rand()*100.,) for k in keys],
['Index', 'Group'], 'Count')
bars.relabel(group='Grouped') + bars.relabel(group='Stacked')
###Output
_____no_output_____
###Markdown
**Title**: Bars Element**Dependencies**: Bokeh**Backends**: [Bokeh](./Bars.ipynb), [Matplotlib](../matplotlib/Bars.ipynb), [Plotly](../plotly/Bars.ipynb)
###Code
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh')
###Output
_____no_output_____
###Markdown
The ``Bars`` Element uses bars to show discrete, numerical comparisons across categories. One axis of the chart shows the specific categories being compared and the other axis represents a continuous value.Bars may also be grouped or stacked by supplying a second key dimension representing sub-categories. Therefore the ``Bars`` Element expects a tabular data format with one or two key dimensions (``kdims``) and one or more value dimensions (``vdims``). See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays.
###Code
data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)]
bars = hv.Bars(data, hv.Dimension('Car occupants'), 'Count')
bars
###Output
_____no_output_____
###Markdown
We can achieve the same plot using a Pandas DataFrame:```hv.Bars(pd.DataFrame(data, columns=['Car occupants','Count']))``` A ``Bars`` element can be sliced and selecting on like any other element:
###Code
bars[['one', 'two', 'three']] + bars[['four', 'five', 'six']]
###Output
_____no_output_____
###Markdown
It is possible to define an explicit ordering for a set of Bars by explicit declaring `Dimension.values` either in the Dimension constructor or using the `.redim.values()` approach:
###Code
occupants = hv.Dimension('Car occupants', values=['three', 'two', 'four', 'one', 'five', 'six'])
# or using .redim.values(**{'Car Occupants': ['three', 'two', 'four', 'one', 'five', 'six']})
hv.Bars(data, occupants, 'Count')
###Output
_____no_output_____
###Markdown
``Bars`` also supports nested categorical groupings. Next we'll use a Pandas DataFrame to construct a random sample of pets sub-divided by male and female:
###Code
samples = 100
pets = ['Cat', 'Dog', 'Hamster', 'Rabbit']
genders = ['Female', 'Male']
pets_sample = np.random.choice(pets, samples)
gender_sample = np.random.choice(genders, samples)
count = np.random.randint(1, 5, size=samples)
df = pd.DataFrame({'Pets': pets_sample, 'Gender': gender_sample, 'Count': count})
df.head(2)
bars = hv.Bars(df, kdims=['Pets', 'Gender']).aggregate(function=np.sum)
bars.opts(width=500)
###Output
_____no_output_____
###Markdown
Just as before we can provide an explicit ordering by declaring the `Dimension.values`. Alternatively we can also make use of the `.sort` method, internally `Bars` will use topological sorting to ensure consistent ordering.
###Code
bars.redim.values(Pets=pets, Gender=genders) + bars.sort()
###Output
_____no_output_____
###Markdown
To drop the second level of tick labels we can set `multi_level=False`, which will indicate the groupings using a legend instead:
###Code
bars.sort() + bars.clone().opts(multi_level=False)
###Output
_____no_output_____
###Markdown
Lastly, Bars can be also be stacked by setting `stacked=True`:
###Code
bars.opts(stacked=True)
###Output
_____no_output_____ |
Anomaly Detection/Multi-Objective Generative Adversarial Active Learning/MO_GAAL_MinMaxScaler.ipynb | ###Markdown
Multi-Objective Generative Adversarial Active Learning with MinMaxScaler This code template is for Anomaly detection/outlier analysis using the MO_GAAL Algorithm implemented using pyod library and feature scaling using MinMaxScaler.
Required Packages
###Code
!pip install plotly
!pip install pyod
import time
import warnings
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import Isomap
from pyod.models.mo_gaal import MO_GAAL
from sklearn.preprocessing import LabelEncoder,MinMaxScaler
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
file_path= ''
###Output
_____no_output_____
###Markdown
List of features which are required for model training
###Code
features = []
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X.
###Code
X=df[features]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
###Output
_____no_output_____
###Markdown
Calling preprocessing functions on the feature set.
###Code
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
###Output
_____no_output_____
###Markdown
Data RescalingMinMaxScalerMinMaxScaler subtracts the minimum value in the feature and then divides by the range, where range is the difference between the original maximum and original minimum. We will fit an object of MinMaxScaler to train data then transform the same data via fit_transform(X_train) method, following which we will transform test data via transform(X_test) method.
###Code
X_Scaled=MinMaxScaler().fit_transform(X)
X_Scaled=pd.DataFrame(data = X_Scaled,columns = X.columns)
X_Scaled.head()
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test=train_test_split(X_Scaled,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
ModelMO_GAALMO_GAAL directly generates informative potential outliers to assist the classifier in describing a boundary that can separate outliers from normal data effectively. Moreover, to prevent the generator from falling into the mode collapsing problem, the network structure of SO-GAAL is expanded from a single generator (SO-GAAL) to multiple generators with different objectives (MO-GAAL) to generate a reasonable reference distribution for the whole dataset.[For more information](https://pyod.readthedocs.io/en/latest/pyod.models.htmlpyod.models.mo_gaal.MO_GAAL)
###Code
model = MO_GAAL(contamination=0.001,stop_epochs=11,k=3)
model.fit(x_train)
###Output
Epoch 1 of 33
Testing for epoch 1 index 1:
Testing for epoch 1 index 2:
Testing for epoch 1 index 3:
Testing for epoch 1 index 4:
Testing for epoch 1 index 5:
Testing for epoch 1 index 6:
Testing for epoch 1 index 7:
Testing for epoch 1 index 8:
Epoch 2 of 33
Testing for epoch 2 index 1:
Testing for epoch 2 index 2:
Testing for epoch 2 index 3:
Testing for epoch 2 index 4:
Testing for epoch 2 index 5:
Testing for epoch 2 index 6:
Testing for epoch 2 index 7:
Testing for epoch 2 index 8:
Epoch 3 of 33
Testing for epoch 3 index 1:
Testing for epoch 3 index 2:
Testing for epoch 3 index 3:
Testing for epoch 3 index 4:
Testing for epoch 3 index 5:
Testing for epoch 3 index 6:
Testing for epoch 3 index 7:
Testing for epoch 3 index 8:
Epoch 4 of 33
Testing for epoch 4 index 1:
Testing for epoch 4 index 2:
Testing for epoch 4 index 3:
Testing for epoch 4 index 4:
Testing for epoch 4 index 5:
Testing for epoch 4 index 6:
Testing for epoch 4 index 7:
Testing for epoch 4 index 8:
Epoch 5 of 33
Testing for epoch 5 index 1:
Testing for epoch 5 index 2:
Testing for epoch 5 index 3:
Testing for epoch 5 index 4:
Testing for epoch 5 index 5:
Testing for epoch 5 index 6:
Testing for epoch 5 index 7:
Testing for epoch 5 index 8:
Epoch 6 of 33
Testing for epoch 6 index 1:
Testing for epoch 6 index 2:
Testing for epoch 6 index 3:
Testing for epoch 6 index 4:
Testing for epoch 6 index 5:
Testing for epoch 6 index 6:
Testing for epoch 6 index 7:
Testing for epoch 6 index 8:
Epoch 7 of 33
Testing for epoch 7 index 1:
Testing for epoch 7 index 2:
Testing for epoch 7 index 3:
Testing for epoch 7 index 4:
Testing for epoch 7 index 5:
Testing for epoch 7 index 6:
Testing for epoch 7 index 7:
Testing for epoch 7 index 8:
Epoch 8 of 33
Testing for epoch 8 index 1:
Testing for epoch 8 index 2:
Testing for epoch 8 index 3:
Testing for epoch 8 index 4:
Testing for epoch 8 index 5:
Testing for epoch 8 index 6:
Testing for epoch 8 index 7:
Testing for epoch 8 index 8:
Epoch 9 of 33
Testing for epoch 9 index 1:
Testing for epoch 9 index 2:
Testing for epoch 9 index 3:
Testing for epoch 9 index 4:
Testing for epoch 9 index 5:
Testing for epoch 9 index 6:
Testing for epoch 9 index 7:
Testing for epoch 9 index 8:
Epoch 10 of 33
Testing for epoch 10 index 1:
Testing for epoch 10 index 2:
Testing for epoch 10 index 3:
Testing for epoch 10 index 4:
Testing for epoch 10 index 5:
Testing for epoch 10 index 6:
Testing for epoch 10 index 7:
Testing for epoch 10 index 8:
Epoch 11 of 33
Testing for epoch 11 index 1:
Testing for epoch 11 index 2:
Testing for epoch 11 index 3:
Testing for epoch 11 index 4:
Testing for epoch 11 index 5:
Testing for epoch 11 index 6:
Testing for epoch 11 index 7:
Testing for epoch 11 index 8:
Epoch 12 of 33
Testing for epoch 12 index 1:
Testing for epoch 12 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6868
16/16 [==============================] - 0s 1ms/step - loss: 0.7024
16/16 [==============================] - 0s 1ms/step - loss: 0.7061
Testing for epoch 12 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6864
16/16 [==============================] - 0s 1ms/step - loss: 0.7011
16/16 [==============================] - 0s 1ms/step - loss: 0.7046
Testing for epoch 12 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6880
16/16 [==============================] - 0s 1ms/step - loss: 0.7028
16/16 [==============================] - 0s 1ms/step - loss: 0.7063
Testing for epoch 12 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6888
16/16 [==============================] - 0s 1ms/step - loss: 0.7020
16/16 [==============================] - 0s 1ms/step - loss: 0.7051
Testing for epoch 12 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6878
16/16 [==============================] - 0s 1ms/step - loss: 0.7024
16/16 [==============================] - 0s 1ms/step - loss: 0.7059
Testing for epoch 12 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6870
16/16 [==============================] - 0s 2ms/step - loss: 0.7032
16/16 [==============================] - 0s 1ms/step - loss: 0.7070
Testing for epoch 12 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6851
16/16 [==============================] - 0s 2ms/step - loss: 0.7029
16/16 [==============================] - 0s 2ms/step - loss: 0.7072
Epoch 13 of 33
Testing for epoch 13 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6872
16/16 [==============================] - 0s 1ms/step - loss: 0.7027
16/16 [==============================] - 0s 1ms/step - loss: 0.7064
Testing for epoch 13 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6867
16/16 [==============================] - 0s 1ms/step - loss: 0.7030
16/16 [==============================] - 0s 1ms/step - loss: 0.7069
Testing for epoch 13 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6878
16/16 [==============================] - 0s 1ms/step - loss: 0.7033
16/16 [==============================] - 0s 1ms/step - loss: 0.7069
Testing for epoch 13 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6874
16/16 [==============================] - 0s 1ms/step - loss: 0.7037
16/16 [==============================] - 0s 1ms/step - loss: 0.7076
Testing for epoch 13 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6875
16/16 [==============================] - 0s 1ms/step - loss: 0.7036
16/16 [==============================] - 0s 1ms/step - loss: 0.7074
Testing for epoch 13 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6865
16/16 [==============================] - 0s 1ms/step - loss: 0.7044
16/16 [==============================] - 0s 2ms/step - loss: 0.7086
Testing for epoch 13 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6871
16/16 [==============================] - 0s 1ms/step - loss: 0.7048
16/16 [==============================] - 0s 1ms/step - loss: 0.7089
Testing for epoch 13 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6884
16/16 [==============================] - 0s 1ms/step - loss: 0.7047
16/16 [==============================] - 0s 1ms/step - loss: 0.7085
Epoch 14 of 33
Testing for epoch 14 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6881
16/16 [==============================] - 0s 1ms/step - loss: 0.7051
16/16 [==============================] - 0s 1ms/step - loss: 0.7091
Testing for epoch 14 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6869
16/16 [==============================] - 0s 1ms/step - loss: 0.7043
16/16 [==============================] - 0s 1ms/step - loss: 0.7084
Testing for epoch 14 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6888
16/16 [==============================] - 0s 1ms/step - loss: 0.7038
16/16 [==============================] - 0s 1ms/step - loss: 0.7072
Testing for epoch 14 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6861
16/16 [==============================] - 0s 1ms/step - loss: 0.7054
16/16 [==============================] - 0s 1ms/step - loss: 0.7100
Testing for epoch 14 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6841
16/16 [==============================] - 0s 1ms/step - loss: 0.7055
16/16 [==============================] - 0s 1ms/step - loss: 0.7106
Testing for epoch 14 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6864
16/16 [==============================] - 0s 1ms/step - loss: 0.7057
16/16 [==============================] - 0s 1ms/step - loss: 0.7103
Testing for epoch 14 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6851
16/16 [==============================] - 0s 1ms/step - loss: 0.7057
16/16 [==============================] - 0s 1ms/step - loss: 0.7106
Testing for epoch 14 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6877
16/16 [==============================] - 0s 1ms/step - loss: 0.7061
16/16 [==============================] - 0s 1ms/step - loss: 0.7104
Epoch 15 of 33
Testing for epoch 15 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6854
16/16 [==============================] - 0s 1ms/step - loss: 0.7066
16/16 [==============================] - 0s 2ms/step - loss: 0.7116
Testing for epoch 15 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6849
16/16 [==============================] - 0s 1ms/step - loss: 0.7062
16/16 [==============================] - 0s 1ms/step - loss: 0.7112
Testing for epoch 15 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6855
16/16 [==============================] - 0s 1ms/step - loss: 0.7072
16/16 [==============================] - 0s 1ms/step - loss: 0.7123
Testing for epoch 15 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6855
16/16 [==============================] - 0s 1ms/step - loss: 0.7056
16/16 [==============================] - 0s 2ms/step - loss: 0.7102
Testing for epoch 15 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6845
16/16 [==============================] - 0s 1ms/step - loss: 0.7088
16/16 [==============================] - 0s 1ms/step - loss: 0.7146
Testing for epoch 15 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6840
16/16 [==============================] - 0s 2ms/step - loss: 0.7078
16/16 [==============================] - 0s 2ms/step - loss: 0.7134
Testing for epoch 15 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6846
16/16 [==============================] - 0s 2ms/step - loss: 0.7088
16/16 [==============================] - 0s 2ms/step - loss: 0.7144
Testing for epoch 15 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6829
16/16 [==============================] - 0s 1ms/step - loss: 0.7101
16/16 [==============================] - 0s 1ms/step - loss: 0.7165
Epoch 16 of 33
Testing for epoch 16 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6848
16/16 [==============================] - 0s 1ms/step - loss: 0.7087
16/16 [==============================] - 0s 2ms/step - loss: 0.7142
Testing for epoch 16 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6846
16/16 [==============================] - 0s 1ms/step - loss: 0.7089
16/16 [==============================] - 0s 1ms/step - loss: 0.7145
Testing for epoch 16 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6867
16/16 [==============================] - 0s 1ms/step - loss: 0.7081
16/16 [==============================] - 0s 2ms/step - loss: 0.7129
Testing for epoch 16 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6840
16/16 [==============================] - 0s 1ms/step - loss: 0.7090
16/16 [==============================] - 0s 1ms/step - loss: 0.7147
Testing for epoch 16 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6844
16/16 [==============================] - 0s 2ms/step - loss: 0.7097
16/16 [==============================] - 0s 1ms/step - loss: 0.7154
Testing for epoch 16 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6832
16/16 [==============================] - 0s 1ms/step - loss: 0.7095
16/16 [==============================] - 0s 1ms/step - loss: 0.7154
Testing for epoch 16 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6851
16/16 [==============================] - 0s 1ms/step - loss: 0.7075
16/16 [==============================] - 0s 2ms/step - loss: 0.7125
Testing for epoch 16 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6847
16/16 [==============================] - 0s 1ms/step - loss: 0.7106
16/16 [==============================] - 0s 2ms/step - loss: 0.7164
Epoch 17 of 33
Testing for epoch 17 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6836
16/16 [==============================] - 0s 2ms/step - loss: 0.7104
16/16 [==============================] - 0s 1ms/step - loss: 0.7164
Testing for epoch 17 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6833
16/16 [==============================] - 0s 1ms/step - loss: 0.7092
16/16 [==============================] - 0s 2ms/step - loss: 0.7150
Testing for epoch 17 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6866
16/16 [==============================] - 0s 1ms/step - loss: 0.7088
16/16 [==============================] - 0s 1ms/step - loss: 0.7137
Testing for epoch 17 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6859
16/16 [==============================] - 0s 1ms/step - loss: 0.7104
16/16 [==============================] - 0s 1ms/step - loss: 0.7158
Testing for epoch 17 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6818
16/16 [==============================] - 0s 1ms/step - loss: 0.7116
16/16 [==============================] - 0s 1ms/step - loss: 0.7183
Testing for epoch 17 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6827
16/16 [==============================] - 0s 1ms/step - loss: 0.7110
16/16 [==============================] - 0s 1ms/step - loss: 0.7174
Testing for epoch 17 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6826
16/16 [==============================] - 0s 1ms/step - loss: 0.7122
16/16 [==============================] - 0s 1ms/step - loss: 0.7189
Testing for epoch 17 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6847
16/16 [==============================] - 0s 1ms/step - loss: 0.7112
16/16 [==============================] - 0s 1ms/step - loss: 0.7171
Epoch 18 of 33
Testing for epoch 18 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6829
16/16 [==============================] - 0s 1ms/step - loss: 0.7129
16/16 [==============================] - 0s 1ms/step - loss: 0.7196
Testing for epoch 18 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6825
16/16 [==============================] - 0s 2ms/step - loss: 0.7133
16/16 [==============================] - 0s 2ms/step - loss: 0.7202
Testing for epoch 18 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6785
16/16 [==============================] - 0s 1ms/step - loss: 0.7128
16/16 [==============================] - 0s 1ms/step - loss: 0.7205
Testing for epoch 18 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6850
16/16 [==============================] - 0s 1ms/step - loss: 0.7125
16/16 [==============================] - 0s 1ms/step - loss: 0.7186
Testing for epoch 18 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6815
16/16 [==============================] - 0s 1ms/step - loss: 0.7130
16/16 [==============================] - 0s 2ms/step - loss: 0.7201
Testing for epoch 18 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6836
16/16 [==============================] - 0s 1ms/step - loss: 0.7154
16/16 [==============================] - 0s 1ms/step - loss: 0.7225
Testing for epoch 18 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6839
16/16 [==============================] - 0s 2ms/step - loss: 0.7124
16/16 [==============================] - 0s 1ms/step - loss: 0.7187
Testing for epoch 18 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6803
16/16 [==============================] - 0s 1ms/step - loss: 0.7138
16/16 [==============================] - 0s 1ms/step - loss: 0.7213
Epoch 19 of 33
Testing for epoch 19 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6834
16/16 [==============================] - 0s 1ms/step - loss: 0.7111
16/16 [==============================] - 0s 1ms/step - loss: 0.7172
Testing for epoch 19 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6808
16/16 [==============================] - 0s 1ms/step - loss: 0.7126
16/16 [==============================] - 0s 2ms/step - loss: 0.7196
Testing for epoch 19 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6819
16/16 [==============================] - 0s 1ms/step - loss: 0.7143
16/16 [==============================] - 0s 1ms/step - loss: 0.7215
Testing for epoch 19 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6801
16/16 [==============================] - 0s 2ms/step - loss: 0.7152
16/16 [==============================] - 0s 1ms/step - loss: 0.7230
Testing for epoch 19 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6814
16/16 [==============================] - 0s 2ms/step - loss: 0.7149
16/16 [==============================] - 0s 1ms/step - loss: 0.7224
Testing for epoch 19 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6833
16/16 [==============================] - 0s 1ms/step - loss: 0.7137
16/16 [==============================] - 0s 1ms/step - loss: 0.7204
Testing for epoch 19 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6809
16/16 [==============================] - 0s 1ms/step - loss: 0.7157
16/16 [==============================] - 0s 2ms/step - loss: 0.7235
Testing for epoch 19 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6802
16/16 [==============================] - 0s 1ms/step - loss: 0.7170
16/16 [==============================] - 0s 1ms/step - loss: 0.7251
Epoch 20 of 33
Testing for epoch 20 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6817
16/16 [==============================] - 0s 1ms/step - loss: 0.7170
16/16 [==============================] - 0s 1ms/step - loss: 0.7247
Testing for epoch 20 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6814
16/16 [==============================] - 0s 1ms/step - loss: 0.7162
16/16 [==============================] - 0s 1ms/step - loss: 0.7238
Testing for epoch 20 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6780
16/16 [==============================] - 0s 2ms/step - loss: 0.7188
16/16 [==============================] - 0s 1ms/step - loss: 0.7278
Testing for epoch 20 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6773
16/16 [==============================] - 0s 2ms/step - loss: 0.7195
16/16 [==============================] - 0s 1ms/step - loss: 0.7288
Testing for epoch 20 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6784
16/16 [==============================] - 0s 2ms/step - loss: 0.7160
16/16 [==============================] - 0s 1ms/step - loss: 0.7242
Testing for epoch 20 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6802
16/16 [==============================] - 0s 1ms/step - loss: 0.7193
16/16 [==============================] - 0s 1ms/step - loss: 0.7279
Testing for epoch 20 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6839
16/16 [==============================] - 0s 2ms/step - loss: 0.7176
16/16 [==============================] - 0s 1ms/step - loss: 0.7249
Testing for epoch 20 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6810
16/16 [==============================] - 0s 2ms/step - loss: 0.7166
16/16 [==============================] - 0s 1ms/step - loss: 0.7243
Epoch 21 of 33
Testing for epoch 21 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6799
16/16 [==============================] - 0s 2ms/step - loss: 0.7169
16/16 [==============================] - 0s 2ms/step - loss: 0.7250
Testing for epoch 21 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6782
16/16 [==============================] - 0s 1ms/step - loss: 0.7199
16/16 [==============================] - 0s 2ms/step - loss: 0.7291
Testing for epoch 21 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6783
16/16 [==============================] - 0s 1ms/step - loss: 0.7216
16/16 [==============================] - 0s 2ms/step - loss: 0.7311
Testing for epoch 21 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6798
16/16 [==============================] - 0s 2ms/step - loss: 0.7184
16/16 [==============================] - 0s 2ms/step - loss: 0.7268
Testing for epoch 21 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6783
16/16 [==============================] - 0s 1ms/step - loss: 0.7175
16/16 [==============================] - 0s 1ms/step - loss: 0.7260
Testing for epoch 21 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6771
16/16 [==============================] - 0s 1ms/step - loss: 0.7170
16/16 [==============================] - 0s 1ms/step - loss: 0.7256
Testing for epoch 21 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6765
16/16 [==============================] - 0s 1ms/step - loss: 0.7182
16/16 [==============================] - 0s 2ms/step - loss: 0.7273
Testing for epoch 21 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6784
16/16 [==============================] - 0s 1ms/step - loss: 0.7204
16/16 [==============================] - 0s 1ms/step - loss: 0.7296
Epoch 22 of 33
Testing for epoch 22 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6796
16/16 [==============================] - 0s 1ms/step - loss: 0.7219
16/16 [==============================] - 0s 1ms/step - loss: 0.7311
Testing for epoch 22 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6817
16/16 [==============================] - 0s 2ms/step - loss: 0.7181
16/16 [==============================] - 0s 1ms/step - loss: 0.7258
Testing for epoch 22 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6788
16/16 [==============================] - 0s 1ms/step - loss: 0.7232
16/16 [==============================] - 0s 1ms/step - loss: 0.7329
Testing for epoch 22 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6792
16/16 [==============================] - 0s 2ms/step - loss: 0.7219
16/16 [==============================] - 0s 1ms/step - loss: 0.7312
Testing for epoch 22 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6797
16/16 [==============================] - 0s 1ms/step - loss: 0.7214
16/16 [==============================] - 0s 2ms/step - loss: 0.7304
Testing for epoch 22 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6815
16/16 [==============================] - 0s 1ms/step - loss: 0.7210
16/16 [==============================] - 0s 1ms/step - loss: 0.7295
Testing for epoch 22 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6782
16/16 [==============================] - 0s 1ms/step - loss: 0.7215
16/16 [==============================] - 0s 2ms/step - loss: 0.7309
Testing for epoch 22 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6801
16/16 [==============================] - 0s 1ms/step - loss: 0.7227
16/16 [==============================] - 0s 1ms/step - loss: 0.7320
Epoch 23 of 33
Testing for epoch 23 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6756
16/16 [==============================] - 0s 1ms/step - loss: 0.7229
16/16 [==============================] - 0s 1ms/step - loss: 0.7333
Testing for epoch 23 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6766
16/16 [==============================] - 0s 1ms/step - loss: 0.7234
16/16 [==============================] - 0s 1ms/step - loss: 0.7336
Testing for epoch 23 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6762
16/16 [==============================] - 0s 1ms/step - loss: 0.7237
16/16 [==============================] - 0s 2ms/step - loss: 0.7339
Testing for epoch 23 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6749
16/16 [==============================] - 0s 1ms/step - loss: 0.7252
16/16 [==============================] - 0s 1ms/step - loss: 0.7362
Testing for epoch 23 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6755
16/16 [==============================] - 0s 1ms/step - loss: 0.7267
16/16 [==============================] - 0s 1ms/step - loss: 0.7378
Testing for epoch 23 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6782
16/16 [==============================] - 0s 2ms/step - loss: 0.7235
16/16 [==============================] - 0s 1ms/step - loss: 0.7333
Testing for epoch 23 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6802
16/16 [==============================] - 0s 2ms/step - loss: 0.7227
16/16 [==============================] - 0s 2ms/step - loss: 0.7318
Testing for epoch 23 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6804
16/16 [==============================] - 0s 2ms/step - loss: 0.7231
16/16 [==============================] - 0s 1ms/step - loss: 0.7323
Epoch 24 of 33
Testing for epoch 24 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6793
16/16 [==============================] - 0s 1ms/step - loss: 0.7268
16/16 [==============================] - 0s 1ms/step - loss: 0.7370
Testing for epoch 24 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6772
16/16 [==============================] - 0s 2ms/step - loss: 0.7259
16/16 [==============================] - 0s 1ms/step - loss: 0.7365
Testing for epoch 24 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6807
16/16 [==============================] - 0s 1ms/step - loss: 0.7251
16/16 [==============================] - 0s 1ms/step - loss: 0.7346
Testing for epoch 24 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6785
16/16 [==============================] - 0s 2ms/step - loss: 0.7240
16/16 [==============================] - 0s 2ms/step - loss: 0.7338
Testing for epoch 24 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6754
16/16 [==============================] - 0s 2ms/step - loss: 0.7257
16/16 [==============================] - 0s 2ms/step - loss: 0.7365
Testing for epoch 24 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6755
16/16 [==============================] - 0s 1ms/step - loss: 0.7255
16/16 [==============================] - 0s 2ms/step - loss: 0.7363
Testing for epoch 24 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6796
16/16 [==============================] - 0s 2ms/step - loss: 0.7291
16/16 [==============================] - 0s 2ms/step - loss: 0.7398
Testing for epoch 24 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6801
16/16 [==============================] - 0s 2ms/step - loss: 0.7283
16/16 [==============================] - 0s 2ms/step - loss: 0.7387
Epoch 25 of 33
Testing for epoch 25 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6776
16/16 [==============================] - 0s 1ms/step - loss: 0.7289
16/16 [==============================] - 0s 1ms/step - loss: 0.7399
Testing for epoch 25 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6800
16/16 [==============================] - 0s 1ms/step - loss: 0.7260
16/16 [==============================] - 0s 2ms/step - loss: 0.7358
Testing for epoch 25 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6732
16/16 [==============================] - 0s 2ms/step - loss: 0.7336
16/16 [==============================] - 0s 2ms/step - loss: 0.7466
Testing for epoch 25 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6759
16/16 [==============================] - 0s 2ms/step - loss: 0.7289
16/16 [==============================] - 0s 2ms/step - loss: 0.7403
Testing for epoch 25 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6741
16/16 [==============================] - 0s 1ms/step - loss: 0.7305
16/16 [==============================] - 0s 2ms/step - loss: 0.7425
Testing for epoch 25 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6746
16/16 [==============================] - 0s 2ms/step - loss: 0.7304
16/16 [==============================] - 0s 1ms/step - loss: 0.7422
Testing for epoch 25 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6695
16/16 [==============================] - 0s 2ms/step - loss: 0.7328
16/16 [==============================] - 0s 1ms/step - loss: 0.7464
Testing for epoch 25 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6743
16/16 [==============================] - 0s 2ms/step - loss: 0.7314
16/16 [==============================] - 0s 1ms/step - loss: 0.7436
Epoch 26 of 33
Testing for epoch 26 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6774
16/16 [==============================] - 0s 1ms/step - loss: 0.7273
16/16 [==============================] - 0s 2ms/step - loss: 0.7378
Testing for epoch 26 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6738
16/16 [==============================] - 0s 2ms/step - loss: 0.7297
16/16 [==============================] - 0s 1ms/step - loss: 0.7415
Testing for epoch 26 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6726
16/16 [==============================] - 0s 2ms/step - loss: 0.7335
16/16 [==============================] - 0s 1ms/step - loss: 0.7464
Testing for epoch 26 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6766
16/16 [==============================] - 0s 1ms/step - loss: 0.7290
16/16 [==============================] - 0s 1ms/step - loss: 0.7399
Testing for epoch 26 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6745
16/16 [==============================] - 0s 2ms/step - loss: 0.7328
16/16 [==============================] - 0s 2ms/step - loss: 0.7452
Testing for epoch 26 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6699
16/16 [==============================] - 0s 1ms/step - loss: 0.7323
16/16 [==============================] - 0s 1ms/step - loss: 0.7456
Testing for epoch 26 index 7:
16/16 [==============================] - 0s 1ms/step - loss: 0.6741
16/16 [==============================] - 0s 2ms/step - loss: 0.7293
16/16 [==============================] - 0s 2ms/step - loss: 0.7409
Testing for epoch 26 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6782
16/16 [==============================] - 0s 1ms/step - loss: 0.7319
16/16 [==============================] - 0s 1ms/step - loss: 0.7432
Epoch 27 of 33
Testing for epoch 27 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6732
16/16 [==============================] - 0s 1ms/step - loss: 0.7324
16/16 [==============================] - 0s 1ms/step - loss: 0.7449
Testing for epoch 27 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6735
16/16 [==============================] - 0s 2ms/step - loss: 0.7338
16/16 [==============================] - 0s 1ms/step - loss: 0.7466
Testing for epoch 27 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6696
16/16 [==============================] - 0s 2ms/step - loss: 0.7340
16/16 [==============================] - 0s 1ms/step - loss: 0.7477
Testing for epoch 27 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6686
16/16 [==============================] - 0s 1ms/step - loss: 0.7360
16/16 [==============================] - 0s 1ms/step - loss: 0.7502
Testing for epoch 27 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6719
16/16 [==============================] - 0s 1ms/step - loss: 0.7390
16/16 [==============================] - 0s 2ms/step - loss: 0.7532
Testing for epoch 27 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6703
16/16 [==============================] - 0s 3ms/step - loss: 0.7373
16/16 [==============================] - 0s 2ms/step - loss: 0.7514
Testing for epoch 27 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6632
16/16 [==============================] - 0s 2ms/step - loss: 0.7418
16/16 [==============================] - 0s 2ms/step - loss: 0.7586
Testing for epoch 27 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6701
16/16 [==============================] - 0s 1ms/step - loss: 0.7355
16/16 [==============================] - 0s 1ms/step - loss: 0.7493
Epoch 28 of 33
Testing for epoch 28 index 1:
16/16 [==============================] - 0s 1ms/step - loss: 0.6728
16/16 [==============================] - 0s 2ms/step - loss: 0.7317
16/16 [==============================] - 0s 2ms/step - loss: 0.7441
Testing for epoch 28 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6662
16/16 [==============================] - 0s 1ms/step - loss: 0.7400
16/16 [==============================] - 0s 2ms/step - loss: 0.7557
Testing for epoch 28 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6725
16/16 [==============================] - 0s 1ms/step - loss: 0.7374
16/16 [==============================] - 0s 1ms/step - loss: 0.7510
Testing for epoch 28 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6688
16/16 [==============================] - 0s 1ms/step - loss: 0.7388
16/16 [==============================] - 0s 2ms/step - loss: 0.7535
Testing for epoch 28 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6668
16/16 [==============================] - 0s 2ms/step - loss: 0.7388
16/16 [==============================] - 0s 2ms/step - loss: 0.7540
Testing for epoch 28 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6711
16/16 [==============================] - 0s 2ms/step - loss: 0.7348
16/16 [==============================] - 0s 2ms/step - loss: 0.7481
Testing for epoch 28 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6648
16/16 [==============================] - 0s 2ms/step - loss: 0.7408
16/16 [==============================] - 0s 2ms/step - loss: 0.7568
Testing for epoch 28 index 8:
16/16 [==============================] - 0s 1ms/step - loss: 0.6680
16/16 [==============================] - 0s 1ms/step - loss: 0.7397
16/16 [==============================] - 0s 2ms/step - loss: 0.7547
Epoch 29 of 33
Testing for epoch 29 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6685
16/16 [==============================] - 0s 2ms/step - loss: 0.7427
16/16 [==============================] - 0s 2ms/step - loss: 0.7582
Testing for epoch 29 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6666
16/16 [==============================] - 0s 2ms/step - loss: 0.7444
16/16 [==============================] - 0s 2ms/step - loss: 0.7606
Testing for epoch 29 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6685
16/16 [==============================] - 0s 2ms/step - loss: 0.7439
16/16 [==============================] - 0s 2ms/step - loss: 0.7596
Testing for epoch 29 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6728
16/16 [==============================] - 0s 1ms/step - loss: 0.7408
16/16 [==============================] - 0s 1ms/step - loss: 0.7547
Testing for epoch 29 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6722
16/16 [==============================] - 0s 2ms/step - loss: 0.7401
16/16 [==============================] - 0s 1ms/step - loss: 0.7540
Testing for epoch 29 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6683
16/16 [==============================] - 0s 2ms/step - loss: 0.7409
16/16 [==============================] - 0s 2ms/step - loss: 0.7558
Testing for epoch 29 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6687
16/16 [==============================] - 0s 2ms/step - loss: 0.7447
16/16 [==============================] - 0s 1ms/step - loss: 0.7603
Testing for epoch 29 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6710
16/16 [==============================] - 0s 2ms/step - loss: 0.7430
16/16 [==============================] - 0s 2ms/step - loss: 0.7577
Epoch 30 of 33
Testing for epoch 30 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6710
16/16 [==============================] - 0s 1ms/step - loss: 0.7382
16/16 [==============================] - 0s 2ms/step - loss: 0.7519
Testing for epoch 30 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6762
16/16 [==============================] - 0s 1ms/step - loss: 0.7392
16/16 [==============================] - 0s 1ms/step - loss: 0.7518
Testing for epoch 30 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6759
16/16 [==============================] - 0s 2ms/step - loss: 0.7424
16/16 [==============================] - 0s 2ms/step - loss: 0.7558
Testing for epoch 30 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6683
16/16 [==============================] - 0s 1ms/step - loss: 0.7396
16/16 [==============================] - 0s 2ms/step - loss: 0.7540
Testing for epoch 30 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6706
16/16 [==============================] - 0s 2ms/step - loss: 0.7416
16/16 [==============================] - 0s 2ms/step - loss: 0.7559
Testing for epoch 30 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6692
16/16 [==============================] - 0s 2ms/step - loss: 0.7460
16/16 [==============================] - 0s 2ms/step - loss: 0.7614
Testing for epoch 30 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6738
16/16 [==============================] - 0s 2ms/step - loss: 0.7442
16/16 [==============================] - 0s 2ms/step - loss: 0.7582
Testing for epoch 30 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6706
16/16 [==============================] - 0s 2ms/step - loss: 0.7394
16/16 [==============================] - 0s 2ms/step - loss: 0.7531
Epoch 31 of 33
Testing for epoch 31 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6634
16/16 [==============================] - 0s 2ms/step - loss: 0.7495
16/16 [==============================] - 0s 1ms/step - loss: 0.7669
Testing for epoch 31 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6644
16/16 [==============================] - 0s 2ms/step - loss: 0.7506
16/16 [==============================] - 0s 2ms/step - loss: 0.7680
Testing for epoch 31 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6649
16/16 [==============================] - 0s 2ms/step - loss: 0.7535
16/16 [==============================] - 0s 2ms/step - loss: 0.7713
Testing for epoch 31 index 4:
16/16 [==============================] - 0s 1ms/step - loss: 0.6685
16/16 [==============================] - 0s 2ms/step - loss: 0.7478
16/16 [==============================] - 0s 2ms/step - loss: 0.7636
Testing for epoch 31 index 5:
16/16 [==============================] - 0s 1ms/step - loss: 0.6669
16/16 [==============================] - 0s 2ms/step - loss: 0.7477
16/16 [==============================] - 0s 2ms/step - loss: 0.7639
Testing for epoch 31 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6657
16/16 [==============================] - 0s 2ms/step - loss: 0.7470
16/16 [==============================] - 0s 2ms/step - loss: 0.7632
Testing for epoch 31 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6640
16/16 [==============================] - 0s 2ms/step - loss: 0.7529
16/16 [==============================] - 0s 2ms/step - loss: 0.7707
Testing for epoch 31 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6626
16/16 [==============================] - 0s 2ms/step - loss: 0.7533
16/16 [==============================] - 0s 1ms/step - loss: 0.7715
Epoch 32 of 33
Testing for epoch 32 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6692
16/16 [==============================] - 0s 2ms/step - loss: 0.7507
16/16 [==============================] - 0s 2ms/step - loss: 0.7669
Testing for epoch 32 index 2:
16/16 [==============================] - 0s 2ms/step - loss: 0.6641
16/16 [==============================] - 0s 2ms/step - loss: 0.7470
16/16 [==============================] - 0s 2ms/step - loss: 0.7635
Testing for epoch 32 index 3:
16/16 [==============================] - 0s 1ms/step - loss: 0.6641
16/16 [==============================] - 0s 2ms/step - loss: 0.7496
16/16 [==============================] - 0s 2ms/step - loss: 0.7667
Testing for epoch 32 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6635
16/16 [==============================] - 0s 2ms/step - loss: 0.7544
16/16 [==============================] - 0s 2ms/step - loss: 0.7727
Testing for epoch 32 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6643
16/16 [==============================] - 0s 1ms/step - loss: 0.7505
16/16 [==============================] - 0s 2ms/step - loss: 0.7676
Testing for epoch 32 index 6:
16/16 [==============================] - 0s 1ms/step - loss: 0.6655
16/16 [==============================] - 0s 2ms/step - loss: 0.7450
16/16 [==============================] - 0s 1ms/step - loss: 0.7607
Testing for epoch 32 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6615
16/16 [==============================] - 0s 1ms/step - loss: 0.7555
16/16 [==============================] - 0s 2ms/step - loss: 0.7742
Testing for epoch 32 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6670
16/16 [==============================] - 0s 2ms/step - loss: 0.7488
16/16 [==============================] - 0s 2ms/step - loss: 0.7650
Epoch 33 of 33
Testing for epoch 33 index 1:
16/16 [==============================] - 0s 2ms/step - loss: 0.6591
16/16 [==============================] - 0s 1ms/step - loss: 0.7608
16/16 [==============================] - 0s 1ms/step - loss: 0.7811
Testing for epoch 33 index 2:
16/16 [==============================] - 0s 1ms/step - loss: 0.6614
16/16 [==============================] - 0s 2ms/step - loss: 0.7509
16/16 [==============================] - 0s 1ms/step - loss: 0.7687
Testing for epoch 33 index 3:
16/16 [==============================] - 0s 2ms/step - loss: 0.6687
16/16 [==============================] - 0s 1ms/step - loss: 0.7495
16/16 [==============================] - 0s 2ms/step - loss: 0.7654
Testing for epoch 33 index 4:
16/16 [==============================] - 0s 2ms/step - loss: 0.6572
16/16 [==============================] - 0s 2ms/step - loss: 0.7565
16/16 [==============================] - 0s 2ms/step - loss: 0.7762
Testing for epoch 33 index 5:
16/16 [==============================] - 0s 2ms/step - loss: 0.6616
16/16 [==============================] - 0s 1ms/step - loss: 0.7621
16/16 [==============================] - 0s 2ms/step - loss: 0.7821
Testing for epoch 33 index 6:
16/16 [==============================] - 0s 2ms/step - loss: 0.6626
16/16 [==============================] - 0s 1ms/step - loss: 0.7550
16/16 [==============================] - 0s 2ms/step - loss: 0.7733
Testing for epoch 33 index 7:
16/16 [==============================] - 0s 2ms/step - loss: 0.6590
16/16 [==============================] - 0s 2ms/step - loss: 0.7566
16/16 [==============================] - 0s 1ms/step - loss: 0.7759
Testing for epoch 33 index 8:
16/16 [==============================] - 0s 2ms/step - loss: 0.6620
16/16 [==============================] - 0s 2ms/step - loss: 0.7623
16/16 [==============================] - 0s 1ms/step - loss: 0.7821
###Markdown
Anomaly Prediction
###Code
result=x_test.copy(deep=True)
result['Anomaly']=model.predict(x_test)
result.head()
###Output
_____no_output_____
###Markdown
Anomaly Visualization Bar Plot
###Code
result['Anomaly'].value_counts().plot(kind='bar',color=['green','red'])
###Output
_____no_output_____
###Markdown
Pie Chart
###Code
fig = px.pie(result['Anomaly'],names=result['Anomaly'], title='Anomaly rate',)
fig.show()
###Output
_____no_output_____
###Markdown
AnomaliesIn this part we will perform Dimensionality Reduction technique to visualize data. This can be performed using technique such as PCA or TSNE algorithms.
###Code
pca = PCA(n_components=2)
pca_results = pca.fit_transform(result.drop('Anomaly',axis=1))
plt.rcParams["figure.figsize"] = (20,10)
plt.scatter(x=pca_results[:,0],y=pca_results[:,1],c=result.iloc[:,result.columns.get_loc('Anomaly')])
plt.show()
###Output
_____no_output_____ |
AI_Class/000/์ ์ฒ๋ฆฌ-์ฐธ๊ณ ์.ipynb | ###Markdown
์ ์ฒ๋ฆฌ ์ฐธ๊ณ ์์ฌ์ฉํ ๋ฐ์ดํฐ๋ ํ์ดํ๋ ๋ฐ์ดํฐ๋ฐ์ดํฐ๋ df๋ผ๋ ๋ณ์์ ๋ด๊ฒจ์ ธ์๋ค๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ค๋ ์ฝ๋๋ ๊ฐ์ฅ ์๋์ค์ ๋ฐฐ์น
###Code
import numpy as np # ์ํ์ ์ธ ํจ์๊ฐ ๋ง์ด ๋ค์ด์๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
import pandas as pd # ๋ฐ์ดํฐํ๋ ์์ ๋ค๋ฃจ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ ํ์ธ.head(x)x๊ฐ์ ์ค์ ๋ณด์ฌ์ค.tail(y)๋ฐ์ดํฐ ๋ง์ง๋ง์ y๊ฐ์ ์ค์ ๋ณด์ฌ์ค
###Code
df.head()
df.tail()
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ์ ์นผ๋ผ๋ช
์ ์์ ํด์ผ ํ ๊ฒฝ์ฐ
###Code
df.columns = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o']
df.head()
df.columns.values[0] = 'survived'
df.head()
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ ์๋ฃํ ํ์ธ
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 889 entries, 0 to 890
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 survived 889 non-null int64
1 pclass 889 non-null int64
2 sex 889 non-null object
3 age 712 non-null float64
4 sibsp 889 non-null int64
5 parch 889 non-null int64
6 fare 889 non-null float64
7 embarked 889 non-null object
8 class 889 non-null category
9 who 889 non-null object
10 adult_male 889 non-null bool
11 deck 201 non-null category
12 embark_town 889 non-null object
13 alive 889 non-null object
14 alone 889 non-null float64
dtypes: bool(1), category(2), float64(3), int64(4), object(5)
memory usage: 93.4+ KB
###Markdown
๋ฐ์ดํฐ ํต๊ณ ์์ฝ์ ๋ณด ํ์ธ๋ฐ์ดํฐ์ ์๋ฃํ์ด int, float, double ๋ฑ์ ์ซ์ํ์ธ ์นผ๋ผ๋ง ์ ์ฉ
###Code
df.describe()
###Output
_____no_output_____
###Markdown
๊ฒฐ์ธก์น ํ์ธ
###Code
df.isna().sum()
###Output
_____no_output_____
###Markdown
์นผ๋ผ์ ๊ณ ์ ๊ฐ ํ์ธ๋ฐ์ดํฐ๋ช
.['์นผ๋ผ๋ช
'].unique()
###Code
df['alone'].unique()
df['embark_town'].unique()
###Output
_____no_output_____
###Markdown
๋๋ฝ๋ ํ ์ญ์ nan์ผ๋ก ํํ๋ ๋ฐ์ดํฐ ์ญ์ axis : 0์ ํ๋จ์, 1์ ์ด๋จ์๋ก ์ฒ๋ฆฌ
###Code
df.dropna(subset=['embark_town'], axis=0, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
์๋ฃํ ๋ณํ.astype('๋ฐ๊ฟ ์๋ฃํ')
###Code
df = df[['alone','pclass']]
df.info()
df['alone'] = df['alone'].astype('int')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 alone 891 non-null int64
1 pclass 891 non-null int64
dtypes: int64(2)
memory usage: 14.0 KB
###Markdown
์๋ฃ๋ด์ฉ์ค ํน์ ๊ฐ๋ค์ ๋ณํ.replace('a', 'b', inplace=True)์๋ฃ์์ a ๊ฐ๋ค์ b๋ก ๋ณํ
###Code
df['embark_town'].unique()
df['embark_town'].replace('Queenstown', 'Q', inplace=True)
df['embark_town'].unique()
###Output
_____no_output_____
###Markdown
ํน์ ์ด(row) ์ ํ
###Code
new_row = df[['pclass', 'age', 'embarked']]
new_row.head()
###Output
_____no_output_____
###Markdown
ํน์ ์ด(row) ์ญ์
###Code
df.head()
df.drop(['deck', 'embark_town'], axis=1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
์ํซ์ธ์ฝ๋ฉ ๊ธฐ๋ณธ์ ์ผ๋ก ๋จธ์ ๋ฌ๋ ์๊ณ ๋ฆฌ์ฆ์ ๋ฌธ์์ด ๊ฐ์ ์
๋ ฅ ๊ฐ์ผ๋ก ์ธ์ํ์ง ์์. ๊ทธ๋ ๊ธฐ ๋๋ฌธ์ ๋ชจ๋ ๋ฌธ์์ด ๊ฐ๋ค์ ์ซ์ ํ์ผ๋ก ์ธ์ฝ๋ฉํ๋ ์ ์ฒ๋ฆฌ ์์
ํ์ ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ์ ํ์ต์ ์์ผ์ผ ํจ.๊ฐ๋จํ๊ฒ ํผ์ฒ ๊ฐ์ ์ ํ์ ๋ฐ๋ผ ์๋ก์ด ํผ์ฒ๋ฅผ ์ถ๊ฐํด ๊ณ ์ ๊ฐ์ ํด๋นํ๋ ์นผ๋ผ์๋ง 1์ ํ์ํ๊ณ ๋๋จธ์ง ์นผ๋ผ์๋ 0์ ํ์ํ๋ ๋ฐฉ๋ฒ.
###Code
# ์ํซ์ธ์ฝ๋ฉ
onehot_embarked = pd.get_dummies(df['embarked'], prefix='town')
# ๋ฐ์ดํฐ ํฉ์น๊ธฐ
df = pd.concat([df, onehot_embarked], axis=1)
onehot_embarked.head()
df.head()
import seaborn as sns
df = sns.load_dataset('titanic')
###Output
_____no_output_____ |
examples/car_simulation_to_analysis.ipynb | ###Markdown
From CMB simulations to power spectra analysisIn this tutorial, we will show how to generate CMB simulations from theoritical $C_\ell$, produce the corresponding CMB map tiles and finally analyze them using `psplay`. CMB simulationUsing the attached $C_\ell$ [file](bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat), we will first generate simulations with `pspy` in `CAR` pixellisation.
###Code
from pspy import so_map
ncomp = 3
ra0, ra1, dec0, dec1 = -30, 30, -30, 30
res = 0.5
template = so_map.car_template(ncomp, ra0, ra1, dec0, dec1, res)
cl_file = "bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat"
cmb = template.synfast(cl_file)
###Output
_____no_output_____
###Markdown
Then, we make 2 splits out of it, each with $5$ ยตK.arcmin rms in temperature and $5\times\sqrt{2}$ ยตK.arcmin in polarisation
###Code
import numpy as np
nsplits = 2
splits = [cmb.copy() for i in range(nsplits)]
for i in range(nsplits):
noise = so_map.white_noise(cmb, rms_uKarcmin_T=5, rms_uKarcmin_pol=np.sqrt(2)*5)
splits[i].data += noise.data
###Output
_____no_output_____
###Markdown
We finally write on disk the two corresponding `fits` files
###Code
for i in range(nsplits):
splits[i].write_map("split{}_IQU_CAR.fits".format(i))
###Output
_____no_output_____
###Markdown
Generation of tile filesWe now have a total of 6 maps in `CAR` pixellisation we want to plot them into tile files. A tile file corresponds to a static `PNG` file representing a part of the sky at a given zoom level. To convert the maps, we will use a set of tools from `psplay`. Conversion to tile filesYou should get two files of ~ 1 Gb size. We should keep these files since we will use them later for the power spectra computation. Last step in the conversion process, we have to generate the different tiles
###Code
from psplay import tools
for i in range(nsplits):
tools.car2tiles(input_file="split{}_IQU_car.fits".format(i),
output_dir="tiles/split{}_IQU_car.fits".format(i))
###Output
_____no_output_____
###Markdown
At the end of the process, we get a new directory `tiles` with two sub-directories `split0_IQU_car.fits` and `split1_IQU_car_fits`. Within these two directories we have 6 directories which names refer to the zoom level : the directory `-5` correspond to the smallest zoom whereas the directory `0` corresponds to the most precise tiles. Using `psplay` to visualize the mapsNow that we have generated the different tiles, we can interactively see the maps with `psplay`. The configuration of `psplay` can be either done using a dictionary or more easily with a `yaml`file. Basically, the configuration file needs to know where the original `FITS` files and the tile files are located. Other options can be set but we won't enter into too much details for this tutorial. For the purpose of this tutorial we have already created the `yaml` [file](simulation_to_analysis.yml) associated to the files we have created so far. Here is a copy-paste of it```yamlmap: layers: - cmb: tags: splits: values: [0, 1] keybindings: [j, k] components: values: [0, 1, 2] keybindings: [c, v] substitutes: ["T", "Q", "U"] tile: files/tiles/split{splits}_IQU_car.fits/{z}/tile_{y}_{x}_{components}.png name: CMB simulation - split {splits} - {components}data: maps: - id: split0 file: split0_IQU_car.fits - id: split1 file: split1_IQU_car.fits theory_file: bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat plot: lmax: 2000```There are 3 sections related to the 3 main steps namely the map visualization, the spectra computation and the graphical representation of the spectra. The two first sections are mandatory. The `map` section corresponds to the tile files generated so far and can be dynamically expanded given different `tags`. Here for instance, we will built all the combination of split and component values. The tile and the name fields will be generated for each combination given the tag values. Dedicated keybindings can also be defined in order to switch between the different split and/or components.**The tricker part of this configuration is to set the path to tiles relatively to where your notebook/JupyterLab instance has been started**. We can't set an absolute path and so you have to make sure that your notebook has been initiated from the `examples` directory. Otherwise, you should change the path to tile files given that you have access to them from your JupyterLab instance. So make sure to initiate your JupyterLab session from a "top" directory.We can now create an instance of `psplay` application and show the different maps
###Code
from psplay import App
my_app = App("simulation_to_analysis.yml")
my_app.show_map()
###Output
_____no_output_____
###Markdown
If we unzoom enough, we will see the $\pm$ 30ยฐ patch size. We can also switch between the different I, Q, U and split layers. There are other options like the colormap and color scale which one can play and see how things change. Selecting sub-patches and computing the corresponding power spectraGiven the different map, we can now select patches by clicking on the square or the disk icons located just below the +/- zoom button. For instance, if we select a rectangle and another disk whose size are more or less the total size of our patch, we will get two surfaces of almost 3000 square degrees. Now we can ask `psplay` to compute the power spectra of both regions. Let's initiate the plot application
###Code
my_app.show_plot()
###Output
_____no_output_____
###Markdown
From CMB simulations to power spectra analysisIn this tutorial, we will show how to generate CMB simulations from theoritical $C_\ell$, produce the corresponding CMB map tiles and finally analyze them using `psplay`. CMB simulationUsing the attached $C_\ell$ [file](bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat), we will first generate simulations with `pspy` in `CAR` pixellisation.
###Code
from pspy import so_map
ncomp= 3
ra0, ra1, dec0, dec1 = -30, 30, -30, 30
res = 0.5
template = so_map.car_template(ncomp, ra0, ra1, dec0, dec1, res)
cl_file = "bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat"
cmb = template.synfast(cl_file)
###Output
_____no_output_____
###Markdown
Then, we make 2 splits out of it, each with $5$ ยตK.arcmin rms in temperature and $5\times\sqrt{2}$ ยตK.arcmin in polarisation
###Code
import numpy as np
nsplits = 2
splits = [cmb.copy() for i in range(nsplits)]
for i in range(nsplits):
noise = so_map.white_noise(cmb, rms_uKarcmin_T=5, rms_uKarcmin_pol=np.sqrt(2)*5)
splits[i].data += noise.data
###Output
_____no_output_____
###Markdown
We finally write on disk the two corresponding `fits` files
###Code
for i in range(nsplits):
splits[i].write_map("split{}_IQU_CAR.fits".format(i))
###Output
_____no_output_____
###Markdown
Generation of tile filesWe now have a total of 6 maps in `CAR` pixellisation we want to plot them into tile files. A tile file corresponds to a static `PNG` file representing a part of the sky at a given zoom level. To convert the maps, we will use a set of tools from `psplay`. Conversion to tile filesYou should get two files of ~ 1 Gb size. We should keep these files since we will use them later for the power spectra computation. Last step in the conversion process, we have to generate the different tiles
###Code
from psplay import tools
for i in range(nsplits):
tools.car2tiles(input_file="split{}_IQU_car.fits".format(i),
output_dir="tiles/split{}_IQU_car.fits".format(i))
###Output
_____no_output_____
###Markdown
At the end of the process, we get a new directory `tiles` with two sub-directories `split0_IQU_car.fits` and `split1_IQU_car_fits`. Within these two directories we have 6 directories which names refer to the zoom level : the directory `-5` correspond to the smallest zoom whereas the directory `0` corresponds to the most precise tiles. Using `psplay` to visualize the mapsNow that we have generated the different tiles, we can interactively see the maps with `psplay`. The configuration of `psplay` can be either done using a dictionary or more easily with a `yaml`file. Basically, the configuration file needs to know where the original `FITS` files and the tile files are located. Other options can be set but we won't enter into too much details for this tutorial. For the purpose of this tutorial we have already created the `yaml` [file](simulation_to_analysis.yml) associated to the files we have created so far. Here is a copy-paste of it```yamlmap: layers: - cmb: tags: splits: values: [0, 1] keybindings: [j, k] components: values: [0, 1, 2] keybindings: [c, v] substitutes: ["T", "Q", "U"] tile: files/tiles/split{splits}_IQU_car.fits/{z}/tile_{y}_{x}_{components}.png name: CMB simulation - split {splits} - {components}data: maps: - id: split0 file: split0_IQU_car.fits - id: split1 file: split1_IQU_car.fits theory_file: bode_almost_wmap5_lmax_1e4_lensedCls_startAt2.dat plot: lmax: 2000```There are 3 sections related to the 3 main steps namely the map visualization, the spectra computation and the graphical representation of the spectra. The two first sections are mandatory. The `map` section corresponds to the tile files generated so far and can be dynamically expanded given different `tags`. Here for instance, we will built all the combination of split and component values. The tile and the name fields will be generated for each combination given the tag values. Dedicated keybindings can also be defined in order to switch between the different split and/or components.**The tricker part of this configuration is to set the path to tiles relatively to where your notebook/JupyterLab instance has been started**. We can't set an absolute path and so you have to make sure that your notebook has been initiated from the `examples` directory. Otherwise, you should change the path to tile files given that you have access to them from your JupyterLab instance. So make sure to initiate your JupyterLab session from a "top" directory.We can now create an instance of `psplay` application and show the different maps
###Code
from psplay import App
my_app = App("simulation_to_analysis.yml")
my_app.show_map()
###Output
_____no_output_____
###Markdown
If we unzoom enough, we will see the $\pm$ 30ยฐ patch size. We can also switch between the different I, Q, U and split layers. There are other options like the colormap and color scale which one can play and see how things change. Selecting sub-patches and computing the corresponding power spectraGiven the different map, we can now select patches by clicking on the square or the disk icons located just below the +/- zoom button. For instance, if we select a rectangle and another disk whose size are more or less the total size of our patch, we will get two surfaces of almost 3000 square degrees. Now we can ask `psplay` to compute the power spectra of both regions. Let's initiate the plot application
###Code
my_app.show_plot()
###Output
_____no_output_____ |
Chapter7/ufo-algorithms-lab.ipynb | ###Markdown
UFO Sightings Algorithms LabThe goal of this notebook is to build out models to use for predicting the legitimacy of a UFO sighting using the XGBoost and Linear Learner algorithm.What we plan on accompishling is the following:1. [Load dataset onto Notebook instance memory from S3](Step-1:-Load-the-data-from-Amazon-S3)1. [Cleaning, transforming, analyize, and preparing the dataset](Step-2:-Cleaning,-transforming,-analyize,-and-preparing-the-dataset)1. [Create and train our model (XGBoost)](Step-3:-Creating-and-training-our-model-(XGBoost))1. [Create and train our model (Linear Learner)](Step-4:-Creating-and-training-our-model-(Linear-Learner)) First let's go ahead and import all the needed libraries.
###Code
import io
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import boto3
import sagemaker
import sagemaker.amazon.common as smac
from sagemaker import get_execution_role
###Output
_____no_output_____
###Markdown
Step 1: Loading the data from Amazon S3Let's get the UFO sightings data that is stored in S3 and load it into memory.
###Code
role = get_execution_role()
boto3_sess = boto3.Session()
bucket = "tc-ml-cert-training"
data_key = "ufo_dataset/ufo_fullset.csv"
data_location = f"s3://{bucket}/{data_key}"
df = pd.read_csv(data_location, low_memory=False)
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Cleaning, transforming, analysing, and preparing the datasetThis step is so important. It's crucial that we clean and prepare our data before we do anything else.Let's check to see if there are any missing values:
###Code
missing_values = df.isnull().values.any()
if missing_values:
display(df[df.isnull().any(axis=1)])
df["shape"].value_counts()
# Replace the missing values with the most common shape
df["shape"].fillna(df["shape"].value_counts().index[0], inplace=True)
###Output
_____no_output_____
###Markdown
Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of.1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types.1. Convert the `shape` and `weather` to a category data type.1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`.1. Convert the `researchOutcome` to a category data type (target attribute).
###Code
df["reportedTimestamp"] = pd.to_datetime(df["reportedTimestamp"])
df["eventDate"] = pd.to_datetime(df["eventDate"])
df["shape"] = df["shape"].astype("category")
df["weather"] = df["weather"].astype("category")
df["physicalEvidence"] = df["physicalEvidence"].replace({"Y": 1, "N": 0})
df["contact"] = df["contact"].replace({"Y": 1, "N": 0})
df["researchOutcome"] = df["researchOutcome"].astype("category")
df.dtypes
###Output
_____no_output_____
###Markdown
Let's visualize some of the data to see if we can find out any important information.
###Code
%matplotlib inline
sns.set_context("paper", font_scale=1.4)
m_cts = df["contact"].value_counts()
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5, 5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title("UFO Sightings and Contact")
ax.set_xlabel("Was contact made?")
ax.set_ylabel("Number of Sightings")
ax.set_xticklabels(["No", "Yes"])
plt.xticks(rotation=45)
plt.show()
m_cts = df["physicalEvidence"].value_counts()
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5, 5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title("UFO Sightings and Physical Evidence")
ax.set_xlabel("Was there physical evidence?")
ax.set_ylabel("Number of Sightings")
ax.set_xticklabels(["No", "Yes"])
plt.xticks(rotation=45)
plt.show()
m_cts = df["shape"].value_counts()
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(9, 5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title("UFO Sightings by Shape")
ax.set_xlabel("UFO Shape")
ax.set_ylabel("Number of Sightings")
plt.xticks(rotation=45)
plt.show()
m_cts = df["weather"].value_counts()
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5, 5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title("UFO Sightings by Weather")
ax.set_xlabel("Weather")
ax.set_ylabel("Number of Sightings")
plt.xticks(rotation=45)
plt.show()
m_cts = df["researchOutcome"].value_counts()
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5, 5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title("UFO Sightings and Research Outcome")
ax.set_xlabel("Research Outcome")
ax.set_ylabel("Number of Sightings")
plt.xticks(rotation=45)
plt.show()
ufo_yr = df["eventDate"].dt.year # series with the year exclusively
## Set axes ##
years_data = ufo_yr.value_counts()
years_index = years_data.index # x ticks
years_values = years_data.to_numpy()
## Create Bar Plot ##
plt.figure(figsize=(15, 8))
plt.xticks(rotation=60)
plt.title("UFO Sightings by Year")
plt.ylabel("Number of Sightings")
plt.xlabel("Year")
years_plot = sns.barplot(x=years_index[:60], y=years_values[:60])
df.corr()
###Output
_____no_output_____
###Markdown
Let's drop the columns that are not important. 1. We can drop `sighting` becuase it is always 'Y' or Yes. 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`.1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting.1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them.
###Code
df.drop(
columns=[
"firstName",
"lastName",
"sighting",
"reportedTimestamp",
"eventDate",
"eventTime",
],
inplace=True,
)
df.head()
###Output
_____no_output_____
###Markdown
Let's apply one-hot encoding1. We need to one-hot both the `weather` attribute and the `shape` attribute. 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2.
###Code
# Let's one-hot the weather and shape attribute
df = pd.get_dummies(df, columns=["weather", "shape"])
# Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable
df["researchOutcome"] = df["researchOutcome"].replace(
{"unexplained": 0, "explained": 1, "probable": 2}
)
display(df.head())
display(df.shape)
###Output
_____no_output_____
###Markdown
Let's randomize and split the data into training, validation, and testing.1. First we need to randomize the data.1. Next Let's use 80% of the dataset for our training set.1. Then use 10% for validation during training.1. Finally we will use 10% for testing our model after it is deployed.
###Code
# Let's go ahead and randomize our data.
df = df.sample(frac=1).reset_index(drop=True)
# Next, Let's split the data into a training, validation, and testing.
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8 # 80% for training
val_list = (rand_split >= 0.8) & (rand_split < 0.9) # 10% for validation
test_list = rand_split >= 0.9 # 10% for testing
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
###Output
_____no_output_____
###Markdown
Next, let's go ahead and rearrange our attributes so the first attribute is our target attribute `researchOutcome`. This is what AWS requires and the XGBoost algorithms expects. You can read all about it here in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.htmlInputOutput-XGBoost).After that we will go ahead and create those files on our Notebook instance (stored as CSV) and then upload them to S3.
###Code
# Simply moves the researchOutcome attribute to the first position before creating CSV files
pd.concat(
[data_train["researchOutcome"], data_train.drop(["researchOutcome"], axis=1)],
axis=1,
).to_csv("train.csv", index=False, header=False)
pd.concat(
[data_val["researchOutcome"], data_val.drop(["researchOutcome"], axis=1)], axis=1
).to_csv("validation.csv", index=False, header=False)
# Next we can take the files we just stored onto our Notebook instance and upload them to S3.
ml_bucket = boto3_sess.resource("s3").Bucket(bucket)
ml_bucket.Object("algorithms_lab/xgboost_train/train.csv").upload_file("train.csv")
ml_bucket.Object("algorithms_lab/xgboost_validation/validation.csv").upload_file(
"validation.csv"
)
###Output
_____no_output_____
###Markdown
Step 3: Creating and training our model (XGBoost)This is where the magic happens. We will get the ECR container hosted in ECR for the XGBoost algorithm.
###Code
container = sagemaker.image_uris.retrieve("xgboost", boto3_sess.region_name, "1")
###Output
_____no_output_____
###Markdown
Next, because we're training with the CSV file format, we'll create inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
###Code
s3_input_train = sagemaker.inputs.TrainingInput(
s3_data=f"s3://{bucket}/algorithms_lab/xgboost_train", content_type="csv"
)
s3_input_validation = sagemaker.inputs.TrainingInput(
s3_data=f"s3://{bucket}/algorithms_lab/xgboost_validation",
content_type="csv",
)
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a XGBoost model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `xgboost` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [XGBoost Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
# Create a training job name
dt_now = datetime.now().strftime("%Y%m%d%H%M%S")
job_name = f"ufo-xgboost-job-{dt_now}"
print(f"Job name: {job_name}")
# Here is where the model artifact will be stored
output_location = f"s3://{bucket}/algorithms_lab/xgboost_output"
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(
container,
role,
instance_count=1,
instance_type="ml.m4.xlarge",
output_path=output_location,
sagemaker_session=sess,
)
xgb.set_hyperparameters(objective="multi:softmax", num_class=3, num_round=100)
data_channels = {"train": s3_input_train, "validation": s3_input_validation}
xgb.fit(data_channels, job_name=job_name)
print(
f"Location of the trained XGBoost model: {output_location}/{job_name}/output/model.tar.gz"
)
###Output
_____no_output_____
###Markdown
After we train our model we can see the default evaluation metric in the logs. The `merror` is used in multiclass classification error rate. It is calculated as (wrong cases)/(all cases). We want this to be minimized (so we want this to be super small). --- Step 4: Creating and training our model (Linear Learner)Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute)
###Code
np.random.seed(0)
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
test_list = rand_split >= 0.9
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
# This rearranges the columns
cols = list(data_train)
cols.insert(0, cols.pop(cols.index("researchOutcome")))
data_train = data_train[cols]
cols = list(data_val)
cols.insert(0, cols.pop(cols.index("researchOutcome")))
data_val = data_val[cols]
cols = list(data_test)
cols.insert(0, cols.pop(cols.index("researchOutcome")))
data_test = data_test[cols]
# Breaks the datasets into attribute numpy.ndarray and the same for target attribute.
train_X = data_train.drop(columns="researchOutcome").values
train_y = data_train["researchOutcome"].values
val_X = data_val.drop(columns="researchOutcome").values
val_y = data_val["researchOutcome"].values
test_X = data_test.drop(columns="researchOutcome").values
test_y = data_test["researchOutcome"].values
###Output
_____no_output_____
###Markdown
Next, Let's create recordIO file for the training data and upload it to S3.
###Code
train_file = "ufo_sightings_train_recordIO_protobuf.data"
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(
f, train_X.astype("float32"), train_y.astype("float32")
)
f.seek(0)
ml_bucket.Object(f"algorithms_lab/linearlearner_train/{train_file}").upload_fileobj(f)
training_recordIO_protobuf_location = (
f"s3://{bucket}/algorithms_lab/linearlearner_train/{train_file}"
)
print(
f"The Pipe mode recordIO protobuf training data: {training_recordIO_protobuf_location}"
)
###Output
_____no_output_____
###Markdown
Let's create recordIO file for the validation data and upload it to S3
###Code
validation_file = "ufo_sightings_validatioin_recordIO_protobuf.data"
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, val_X.astype("float32"), val_y.astype("float32"))
f.seek(0)
ml_bucket.Object(
f"algorithms_lab/linearlearner_validation/{validation_file}"
).upload_fileobj(f)
validate_recordIO_protobuf_location = (
f"s3://{bucket}/algorithms_lab/linearlearner_validation/{validation_file}"
)
print(
f"The Pipe mode recordIO protobuf validation data: {validate_recordIO_protobuf_location}"
)
###Output
_____no_output_____
###Markdown
---Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm.
###Code
container = sagemaker.image_uris.retrieve("linear-learner", boto3_sess.region_name, "1")
# Create a training job name
dt_now = datetime.now().strftime("%Y%m%d%H%M%S")
job_name = f"ufo-linear-learner-job-{dt_now}"
print(f"Job name {job_name}")
# Here is where the model-artifact will be stored
output_location = f"s3://{bucket}/algorithms_lab/linearlearner_output"
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `linear-learner` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [The input type (Pipe)](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)1. [Linear Learner Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
print(f"The feature_dim hyperparameter needs to be set to {data_train.shape[1] - 1}.")
sess = sagemaker.Session()
# Setup the LinearLeaner algorithm from the ECR container
linear = sagemaker.estimator.Estimator(
container,
role,
instance_count=1,
instance_type="ml.c4.xlarge",
output_path=output_location,
sagemaker_session=sess,
input_mode="Pipe",
)
# Setup the hyperparameters
linear.set_hyperparameters(
feature_dim=22, # number of attributes (minus the researchOutcome attribute)
predictor_type="multiclass_classifier", # type of classification problem
num_classes=3,
) # number of classes in out researchOutcome (explained, unexplained, probable)
# Launch a training job. This method calls the CreateTrainingJob API call
data_channels = {
"train": training_recordIO_protobuf_location,
"validation": validate_recordIO_protobuf_location,
}
linear.fit(data_channels, job_name=job_name)
print(
f"Location of the trained Linear Learner model: {output_location}/{job_name}/output/model.tar.gz"
)
###Output
_____no_output_____
###Markdown
UFO Sightings Algorithms LabThe goal of this notebook is to build out models to use for predicting the legitimacy of a UFO sighting using the XGBoost and Linear Learner algorithm.What we plan on accompishling is the following:1. [Load dataset onto Notebook instance memory from S3](Step-1:-Load-the-data-from-Amazon-S3)1. [Cleaning, transforming, analyize, and preparing the dataset](Step-2:-Cleaning,-transforming,-analyize,-and-preparing-the-dataset)1. [Create and train our model (XGBoost)](Step-3:-Creating-and-training-our-model-(XGBoost))1. [Create and train our model (Linear Learner)](Step-4:-Creating-and-training-our-model-(Linear-Learner)) First let's go ahead and import all the needed libraries.
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import io
import sagemaker.amazon.common as smac
import boto3
from sagemaker import get_execution_role
import sagemaker
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Step 1: Loading the data from Amazon S3Let's get the UFO sightings data that is stored in S3 and load it into memory.
###Code
role = get_execution_role()
bucket='<INSERT_YOUR_BUCKET_NAME_HERE>'
sub_folder = 'ufo_dataset'
data_key = 'ufo_fullset.csv'
data_location = 's3://{}/{}/{}'.format(bucket, sub_folder, data_key)
df = pd.read_csv(data_location, low_memory=False)
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Cleaning, transforming, analyize, and preparing the datasetThis step is so important. It's crucial that we clean and prepare our data before we do anything else.
###Code
# Let's check to see if there are any missing values
missing_values = df.isnull().values.any()
if(missing_values):
display(df[df.isnull().any(axis=1)])
df['shape'].value_counts()
# Replace the missing values with the most common shape
df['shape'] = df['shape'].fillna(df['shape'].value_counts().index[0])
###Output
_____no_output_____
###Markdown
Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of.1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types.1. Convert the `shape` and `weather` to a category data type.1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`.1. Convert the `researchOutcome` to a category data type (target attribute).
###Code
df['reportedTimestamp'] = pd.to_datetime(df['reportedTimestamp'])
df['eventDate'] = pd.to_datetime(df['eventDate'])
df['shape'] = df['shape'].astype('category')
df['weather'] = df['weather'].astype('category')
df['physicalEvidence'] = df['physicalEvidence'].replace({'Y': 1, 'N': 0})
df['contact'] = df['contact'].replace({'Y': 1, 'N': 0})
df['researchOutcome'] = df['researchOutcome'].astype('category')
df.dtypes
###Output
_____no_output_____
###Markdown
Let's visualize some of the data to see if we can find out any important information.
###Code
%matplotlib inline
sns.set_context("paper", font_scale=1.4)
m_cts = (df['contact'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Contact')
ax.set_xlabel('Was contact made?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['physicalEvidence'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Physical Evidence')
ax.set_xlabel('Was there physical evidence?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['shape'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(9,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Shape')
ax.set_xlabel('UFO Shape')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['weather'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Weather')
ax.set_xlabel('Weather')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['researchOutcome'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Research Outcome')
ax.set_xlabel('Research Outcome')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
ufo_yr = df['eventDate'].dt.year # series with the year exclusively
## Set axes ##
years_data = ufo_yr.value_counts()
years_index = years_data.index # x ticks
years_values = years_data.get_values()
## Create Bar Plot ##
plt.figure(figsize=(15,8))
plt.xticks(rotation = 60)
plt.title('UFO Sightings by Year')
plt.ylabel('Number of Sightings')
plt.xlabel('Year')
years_plot = sns.barplot(x=years_index[:60],y=years_values[:60])
df.corr()
###Output
_____no_output_____
###Markdown
Let's drop the columns that are not important. 1. We can drop `sighting` becuase it is always 'Y' or Yes. 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`.1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting.1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them.
###Code
df.drop(columns=['firstName', 'lastName', 'sighting', 'reportedTimestamp', 'eventDate', 'eventTime'], inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Let's apply one-hot encoding1. We need to one-hot both the `weather` attribute and the `shape` attribute. 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2.
###Code
# Let's one-hot the weather and shape attribute
df = pd.get_dummies(df, columns=['weather', 'shape'])
# Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable
df['researchOutcome'] = df['researchOutcome'].replace({'unexplained': 0, 'explained': 1, 'probable': 2})
display(df.head())
display(df.shape)
###Output
_____no_output_____
###Markdown
Let's randomize and split the data into training, validation, and testing.1. First we need to randomize the data.1. Next Let's use 80% of the dataset for our training set.1. Then use 10% for validation during training.1. Finally we will use 10% for testing our model after it is deployed.
###Code
# Let's go ahead and randomize our data.
df = df.sample(frac=1).reset_index(drop=True)
# Next, Let's split the data into a training, validation, and testing.
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8 # 80% for training
val_list = (rand_split >= 0.8) & (rand_split < 0.9) # 10% for validation
test_list = rand_split >= 0.9 # 10% for testing
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
###Output
_____no_output_____
###Markdown
Next, let's go ahead and rearrange our attributes so the first attribute is our target attribute `researchOutcome`. This is what AWS requires and the XGBoost algorithms expects. You can read all about it here in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.htmlInputOutput-XGBoost).After that we will go ahead and create those files on our Notebook instance (stored as CSV) and then upload them to S3.
###Code
# Simply moves the researchOutcome attribute to the first position before creating CSV files
pd.concat([data_train['researchOutcome'], data_train.drop(['researchOutcome'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([data_val['researchOutcome'], data_val.drop(['researchOutcome'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
# Next we can take the files we just stored onto our Notebook instance and upload them to S3.
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_train/train.csv').upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_validation/validation.csv').upload_file('validation.csv')
###Output
_____no_output_____
###Markdown
Step 3: Creating and training our model (XGBoost)This is where the magic happens. We will get the ECR container hosted in ECR for the XGBoost algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, 'xgboost')
###Output
_____no_output_____
###Markdown
Next, because we're training with the CSV file format, we'll create inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
###Code
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_train'.format(bucket), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_validation'.format(bucket), content_type='csv')
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a XGBoost model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `xgboost` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [XGBoost Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
# Create a training job name
job_name = 'ufo-xgboost-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model artifact will be stored
output_location = 's3://{}/algorithms_lab/xgboost_output'.format(bucket)
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path=output_location,
sagemaker_session=sess)
xgb.set_hyperparameters(objective='multi:softmax',
num_class=3,
num_round=100)
data_channels = {
'train': s3_input_train,
'validation': s3_input_validation
}
xgb.fit(data_channels, job_name=job_name)
print('Here is the location of the trained XGBoost model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
After we train our model we can see the default evaluation metric in the logs. The `merror` is used in multiclass classification error rate. It is calculated as (wrong cases)/(all cases). We want this to be minimized (so we want this to be super small). --- Step 4: Creating and training our model (Linear Learner)Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute)
###Code
np.random.seed(0)
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
test_list = rand_split >= 0.9
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
# This rearranges the columns
cols = list(data_train)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_train = data_train[cols]
cols = list(data_val)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_val = data_val[cols]
cols = list(data_test)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_test = data_test[cols]
# Breaks the datasets into attribute numpy.ndarray and the same for target attribute.
train_X = data_train.drop(columns='researchOutcome').values
train_y = data_train['researchOutcome'].values
val_X = data_val.drop(columns='researchOutcome').values
val_y = data_val['researchOutcome'].values
test_X = data_test.drop(columns='researchOutcome').values
test_y = data_test['researchOutcome'].values
###Output
_____no_output_____
###Markdown
Next, Let's create recordIO file for the training data and upload it to S3.
###Code
train_file = 'ufo_sightings_train_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, train_X.astype('float32'), train_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_train/{}'.format(train_file)).upload_fileobj(f)
training_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_train/{}'.format(bucket, train_file)
print('The Pipe mode recordIO protobuf training data: {}'.format(training_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
Let's create recordIO file for the validation data and upload it to S3
###Code
validation_file = 'ufo_sightings_validatioin_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, val_X.astype('float32'), val_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_validation/{}'.format(validation_file)).upload_fileobj(f)
validate_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_validation/{}'.format(bucket, validation_file)
print('The Pipe mode recordIO protobuf validation data: {}'.format(validate_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
---Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
import sagemaker
container = get_image_uri(boto3.Session().region_name, 'linear-learner', "1")
# Create a training job name
job_name = 'ufo-linear-learner-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model-artifact will be stored
output_location = 's3://{}/algorithms_lab/linearlearner_output'.format(bucket)
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `linear-learner` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [The input type (Pipe)](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)1. [Linear Learner Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
print('The feature_dim hyperparameter needs to be set to {}.'.format(data_train.shape[1] - 1))
sess = sagemaker.Session()
# Setup the LinearLeaner algorithm from the ECR container
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_location,
sagemaker_session=sess,
input_mode='Pipe')
# Setup the hyperparameters
linear.set_hyperparameters(feature_dim=22, # number of attributes (minus the researchOutcome attribute)
predictor_type='multiclass_classifier', # type of classification problem
num_classes=3) # number of classes in out researchOutcome (explained, unexplained, probable)
# Launch a training job. This method calls the CreateTrainingJob API call
data_channels = {
'train': training_recordIO_protobuf_location,
'validation': validate_recordIO_protobuf_location
}
linear.fit(data_channels, job_name=job_name)
print('Here is the location of the trained Linear Learner model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
UFO Sightings Algorithms LabThe goal of this notebook is to build out models to use for predicting the legitimacy of a UFO sighting using the XGBoost and Linear Learner algorithm.What we plan on accompishling is the following:1. [Load dataset onto Notebook instance memory from S3](Step-1:-Load-the-data-from-Amazon-S3)1. [Cleaning, transforming, analyize, and preparing the dataset](Step-2:-Cleaning,-transforming,-analyize,-and-preparing-the-dataset)1. [Create and train our model (XGBoost)](Step-3:-Creating-and-training-our-model-(XGBoost))1. [Create and train our model (Linear Learner)](Step-4:-Creating-and-training-our-model-(Linear-Learner)) First let's go ahead and import all the needed libraries.
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import io
import sagemaker.amazon.common as smac
import boto3
from sagemaker import get_execution_role
import sagemaker
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Step 1: Loading the data from Amazon S3Let's get the UFO sightings data that is stored in S3 and load it into memory.
###Code
role = get_execution_role()
bucket='<INSERT_YOUR_BUCKET_NAME_HERE>'
sub_folder = 'ufo_dataset'
data_key = 'ufo_fullset.csv'
data_location = 's3://{}/{}/{}'.format(bucket, sub_folder, data_key)
df = pd.read_csv(data_location, low_memory=False)
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Cleaning, transforming, analyize, and preparing the datasetThis step is so important. It's crucial that we clean and prepare our data before we do anything else.
###Code
# Let's check to see if there are any missing values
missing_values = df.isnull().values.any()
if(missing_values):
display(df[df.isnull().any(axis=1)])
df['shape'].value_counts()
# Replace the missing values with the most common shape
df['shape'] = df['shape'].fillna(df['shape'].value_counts().index[0])
###Output
_____no_output_____
###Markdown
Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of.1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types.1. Convert the `shape` and `weather` to a category data type.1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`.1. Convert the `researchOutcome` to a category data type (target attribute).
###Code
df['reportedTimestamp'] = pd.to_datetime(df['reportedTimestamp'])
df['eventDate'] = pd.to_datetime(df['eventDate'])
df['shape'] = df['shape'].astype('category')
df['weather'] = df['weather'].astype('category')
df['physicalEvidence'] = df['physicalEvidence'].replace({'Y': 1, 'N': 0})
df['contact'] = df['contact'].replace({'Y': 1, 'N': 0})
df['researchOutcome'] = df['researchOutcome'].astype('category')
df.dtypes
###Output
_____no_output_____
###Markdown
Let's visualize some of the data to see if we can find out any important information.
###Code
%matplotlib inline
sns.set_context("paper", font_scale=1.4)
m_cts = (df['contact'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Contact')
ax.set_xlabel('Was contact made?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['physicalEvidence'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Physical Evidence')
ax.set_xlabel('Was there physical evidence?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['shape'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(9,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Shape')
ax.set_xlabel('UFO Shape')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['weather'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Weather')
ax.set_xlabel('Weather')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['researchOutcome'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.to_numpy()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Research Outcome')
ax.set_xlabel('Research Outcome')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
ufo_yr = df['eventDate'].dt.year # series with the year exclusively
## Set axes ##
years_data = ufo_yr.value_counts()
years_index = years_data.index # x ticks
years_values = years_data.to_numpy()
## Create Bar Plot ##
plt.figure(figsize=(15,8))
plt.xticks(rotation = 60)
plt.title('UFO Sightings by Year')
plt.ylabel('Number of Sightings')
plt.xlabel('Year')
years_plot = sns.barplot(x=years_index[:60],y=years_values[:60])
df.corr()
###Output
_____no_output_____
###Markdown
Let's drop the columns that are not important. 1. We can drop `sighting` becuase it is always 'Y' or Yes. 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`.1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting.1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them.
###Code
df.drop(columns=['firstName', 'lastName', 'sighting', 'reportedTimestamp', 'eventDate', 'eventTime'], inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Let's apply one-hot encoding1. We need to one-hot both the `weather` attribute and the `shape` attribute. 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2.
###Code
# Let's one-hot the weather and shape attribute
df = pd.get_dummies(df, columns=['weather', 'shape'])
# Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable
df['researchOutcome'] = df['researchOutcome'].replace({'unexplained': 0, 'explained': 1, 'probable': 2})
display(df.head())
display(df.shape)
###Output
_____no_output_____
###Markdown
Let's randomize and split the data into training, validation, and testing.1. First we need to randomize the data.1. Next Let's use 80% of the dataset for our training set.1. Then use 10% for validation during training.1. Finally we will use 10% for testing our model after it is deployed.
###Code
# Let's go ahead and randomize our data.
df = df.sample(frac=1).reset_index(drop=True)
# Next, Let's split the data into a training, validation, and testing.
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8 # 80% for training
val_list = (rand_split >= 0.8) & (rand_split < 0.9) # 10% for validation
test_list = rand_split >= 0.9 # 10% for testing
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
###Output
_____no_output_____
###Markdown
Next, let's go ahead and rearrange our attributes so the first attribute is our target attribute `researchOutcome`. This is what AWS requires and the XGBoost algorithms expects. You can read all about it here in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.htmlInputOutput-XGBoost).After that we will go ahead and create those files on our Notebook instance (stored as CSV) and then upload them to S3.
###Code
# Simply moves the researchOutcome attribute to the first position before creating CSV files
pd.concat([data_train['researchOutcome'], data_train.drop(['researchOutcome'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([data_val['researchOutcome'], data_val.drop(['researchOutcome'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
# Next we can take the files we just stored onto our Notebook instance and upload them to S3.
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_train/train.csv').upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_validation/validation.csv').upload_file('validation.csv')
###Output
_____no_output_____
###Markdown
Step 3: Creating and training our model (XGBoost)This is where the magic happens. We will get the ECR container hosted in ECR for the XGBoost algorithm.
###Code
from sagemaker import image_uris
container = image_uris.retrieve('xgboost', boto3.Session().region_name, '1')
###Output
_____no_output_____
###Markdown
Next, because we're training with the CSV file format, we'll create inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
###Code
s3_input_train = sagemaker.inputs.TrainingInput(s3_data='s3://{}/algorithms_lab/xgboost_train'.format(bucket), content_type='csv')
s3_input_validation = sagemaker.inputs.TrainingInput(s3_data='s3://{}/algorithms_lab/xgboost_validation'.format(bucket), content_type='csv')
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a XGBoost model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `xgboost` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [XGBoost Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
# Create a training job name
job_name = 'ufo-xgboost-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model artifact will be stored
output_location = 's3://{}/algorithms_lab/xgboost_output'.format(bucket)
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(container,
role,
instance_count=1,
instance_type='ml.m4.xlarge',
output_path=output_location,
sagemaker_session=sess)
xgb.set_hyperparameters(objective='multi:softmax',
num_class=3,
num_round=100)
data_channels = {
'train': s3_input_train,
'validation': s3_input_validation
}
xgb.fit(data_channels, job_name=job_name)
print('Here is the location of the trained XGBoost model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
After we train our model we can see the default evaluation metric in the logs. The `merror` is used in multiclass classification error rate. It is calculated as (wrong cases)/(all cases). We want this to be minimized (so we want this to be super small). --- Step 4: Creating and training our model (Linear Learner)Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute)
###Code
np.random.seed(0)
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
test_list = rand_split >= 0.9
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
# This rearranges the columns
cols = list(data_train)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_train = data_train[cols]
cols = list(data_val)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_val = data_val[cols]
cols = list(data_test)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_test = data_test[cols]
# Breaks the datasets into attribute numpy.ndarray and the same for target attribute.
train_X = data_train.drop(columns='researchOutcome').values
train_y = data_train['researchOutcome'].values
val_X = data_val.drop(columns='researchOutcome').values
val_y = data_val['researchOutcome'].values
test_X = data_test.drop(columns='researchOutcome').values
test_y = data_test['researchOutcome'].values
###Output
_____no_output_____
###Markdown
Next, Let's create recordIO file for the training data and upload it to S3.
###Code
train_file = 'ufo_sightings_train_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, train_X.astype('float32'), train_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_train/{}'.format(train_file)).upload_fileobj(f)
training_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_train/{}'.format(bucket, train_file)
print('The Pipe mode recordIO protobuf training data: {}'.format(training_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
Let's create recordIO file for the validation data and upload it to S3
###Code
validation_file = 'ufo_sightings_validatioin_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, val_X.astype('float32'), val_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_validation/{}'.format(validation_file)).upload_fileobj(f)
validate_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_validation/{}'.format(bucket, validation_file)
print('The Pipe mode recordIO protobuf validation data: {}'.format(validate_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
---Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm.
###Code
from sagemaker import image_uris
container = image_uris.retrieve('linear-learner', boto3.Session().region_name, '1')
# Create a training job name
job_name = 'ufo-linear-learner-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model-artifact will be stored
output_location = 's3://{}/algorithms_lab/linearlearner_output'.format(bucket)
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `linear-learner` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [The input type (Pipe)](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)1. [Linear Learner Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
print('The feature_dim hyperparameter needs to be set to {}.'.format(data_train.shape[1] - 1))
sess = sagemaker.Session()
# Setup the LinearLeaner algorithm from the ECR container
linear = sagemaker.estimator.Estimator(container,
role,
instance_count=1,
instance_type='ml.c4.xlarge',
output_path=output_location,
sagemaker_session=sess,
input_mode='Pipe')
# Setup the hyperparameters
linear.set_hyperparameters(feature_dim=22, # number of attributes (minus the researchOutcome attribute)
predictor_type='multiclass_classifier', # type of classification problem
num_classes=3) # number of classes in out researchOutcome (explained, unexplained, probable)
# Launch a training job. This method calls the CreateTrainingJob API call
data_channels = {
'train': training_recordIO_protobuf_location,
'validation': validate_recordIO_protobuf_location
}
linear.fit(data_channels, job_name=job_name)
print('Here is the location of the trained Linear Learner model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
UFO Sightings Algorithms LabThe goal of this notebook is to build out models to use for predicting the legitimacy of a UFO sighting using the XGBoost and Linear Learner algorithm.What we plan on accompishling is the following:1. [Load dataset onto Notebook instance memory from S3](Step-1:-Load-the-data-from-Amazon-S3)1. [Cleaning, transforming, analyize, and preparing the dataset](Step-2:-Cleaning,-transforming,-analyize,-and-preparing-the-dataset)1. [Create and train our model (XGBoost)](Step-3:-Creating-and-training-our-model-(XGBoost))1. [Create and train our model (Linear Learner)](Step-4:-Creating-and-training-our-model-(Linear-Learner)) First let's go ahead and import all the needed libraries.
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import io
import sagemaker.amazon.common as smac
import boto3
from sagemaker import get_execution_role
import sagemaker
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Step 1: Loading the data from Amazon S3Let's get the UFO sightings data that is stored in S3 and load it into memory.
###Code
role = get_execution_role()
bucket='<INSERT_YOUR_BUCKET_NAME_HERE>'
sub_folder = 'ufo_dataset'
data_key = 'ufo_fullset.csv'
data_location = 's3://{}/{}/{}'.format(bucket, sub_folder, data_key)
df = pd.read_csv(data_location, low_memory=False)
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Cleaning, transforming, analyize, and preparing the datasetThis step is so important. It's crucial that we clean and prepare our data before we do anything else.
###Code
# Let's check to see if there are any missing values
missing_values = df.isnull().values.any()
if(missing_values):
display(df[df.isnull().any(axis=1)])
df['shape'].value_counts()
# Replace the missing values with the most common shape
df['shape'] = df['shape'].fillna(df['shape'].value_counts().index[0])
###Output
_____no_output_____
###Markdown
Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of.1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types.1. Convert the `shape` and `weather` to a category data type.1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`.1. Convert the `researchOutcome` to a category data type (target attribute).
###Code
df['reportedTimestamp'] = pd.to_datetime(df['reportedTimestamp'])
df['eventDate'] = pd.to_datetime(df['eventDate'])
df['shape'] = df['shape'].astype('category')
df['weather'] = df['weather'].astype('category')
df['physicalEvidence'] = df['physicalEvidence'].replace({'Y': 1, 'N': 0})
df['contact'] = df['contact'].replace({'Y': 1, 'N': 0})
df['researchOutcome'] = df['researchOutcome'].astype('category')
df.dtypes
###Output
_____no_output_____
###Markdown
Let's visualize some of the data to see if we can find out any important information.
###Code
%matplotlib inline
sns.set_context("paper", font_scale=1.4)
m_cts = (df['contact'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Contact')
ax.set_xlabel('Was contact made?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['physicalEvidence'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Physical Evidence')
ax.set_xlabel('Was there physical evidence?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['shape'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(9,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Shape')
ax.set_xlabel('UFO Shape')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['weather'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Weather')
ax.set_xlabel('Weather')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['researchOutcome'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Research Outcome')
ax.set_xlabel('Research Outcome')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
ufo_yr = df['eventDate'].dt.year # series with the year exclusively
## Set axes ##
years_data = ufo_yr.value_counts()
years_index = years_data.index # x ticks
years_values = years_data.get_values()
## Create Bar Plot ##
plt.figure(figsize=(15,8))
plt.xticks(rotation = 60)
plt.title('UFO Sightings by Year')
plt.ylabel('Number of Sightings')
plt.xlabel('Year')
years_plot = sns.barplot(x=years_index[:60],y=years_values[:60])
df.corr()
###Output
_____no_output_____
###Markdown
Let's drop the columns that are not important. 1. We can drop `sighting` becuase it is always 'Y' or Yes. 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`.1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting.1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them.
###Code
df.drop(columns=['firstName', 'lastName', 'sighting', 'reportedTimestamp', 'eventDate', 'eventTime'], inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Let's apply one-hot encoding1. We need to one-hot both the `weather` attribute and the `shape` attribute. 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2.
###Code
# Let's one-hot the weather and shape attribute
df = pd.get_dummies(df, columns=['weather', 'shape'])
# Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable
df['researchOutcome'] = df['researchOutcome'].replace({'unexplained': 0, 'explained': 1, 'probable': 2})
display(df.head())
display(df.shape)
###Output
_____no_output_____
###Markdown
Let's randomize and split the data into training, validation, and testing.1. First we need to randomize the data.1. Next Let's use 80% of the dataset for our training set.1. Then use 10% for validation during training.1. Finally we will use 10% for testing our model after it is deployed.
###Code
# Let's go ahead and randomize our data.
df = df.sample(frac=1).reset_index(drop=True)
# Next, Let's split the data into a training, validation, and testing.
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8 # 80% for training
val_list = (rand_split >= 0.8) & (rand_split < 0.9) # 10% for validation
test_list = rand_split >= 0.9 # 10% for testing
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
###Output
_____no_output_____
###Markdown
Next, let's go ahead and rearrange our attributes so the first attribute is our target attribute `researchOutcome`. This is what AWS requires and the XGBoost algorithms expects. You can read all about it here in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.htmlInputOutput-XGBoost).After that we will go ahead and create those files on our Notebook instance (stored as CSV) and then upload them to S3.
###Code
# Simply moves the researchOutcome attribute to the first position before creating CSV files
pd.concat([data_train['researchOutcome'], data_train.drop(['researchOutcome'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([data_val['researchOutcome'], data_val.drop(['researchOutcome'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
# Next we can take the files we just stored onto our Notebook instance and upload them to S3.
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_train/train.csv').upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_validation/validation.csv').upload_file('validation.csv')
###Output
_____no_output_____
###Markdown
Step 3: Creating and training our model (XGBoost)This is where the magic happens. We will get the ECR container hosted in ECR for the XGBoost algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, 'xgboost')
###Output
_____no_output_____
###Markdown
Next, because we're training with the CSV file format, we'll create inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
###Code
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_train'.format(bucket), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_validation'.format(bucket), content_type='csv')
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a XGBoost model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `xgboost` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [XGBoost Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
# Create a training job name
job_name = 'ufo-xgboost-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model artifact will be stored
output_location = 's3://{}/algorithms_lab/xgboost_output'.format(bucket)
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path=output_location,
sagemaker_session=sess)
xgb.set_hyperparameters(objective='multi:softmax',
num_class=3,
num_round=100)
data_channels = {
'train': s3_input_train,
'validation': s3_input_validation
}
xgb.fit(data_channels, job_name=job_name)
print('Here is the location of the trained XGBoost model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
After we train our model we can see the default evaluation metric in the logs. The `merror` is used in multiclass classification error rate. It is calculated as (wrong cases)/(all cases). We want this to be minimized (so we want this to be super small). --- Step 4: Creating and training our model (Linear Learner)Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute)
###Code
np.random.seed(0)
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
test_list = rand_split >= 0.9
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
# This rearranges the columns
cols = list(data_train)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_train = data_train[cols]
cols = list(data_val)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_val = data_val[cols]
cols = list(data_test)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_test = data_test[cols]
# Breaks the datasets into attribute numpy.ndarray and the same for target attribute.
train_X = data_train.drop(columns='researchOutcome').as_matrix()
train_y = data_train['researchOutcome'].as_matrix()
val_X = data_val.drop(columns='researchOutcome').as_matrix()
val_y = data_val['researchOutcome'].as_matrix()
test_X = data_test.drop(columns='researchOutcome').as_matrix()
test_y = data_test['researchOutcome'].as_matrix()
###Output
_____no_output_____
###Markdown
Next, Let's create recordIO file for the training data and upload it to S3.
###Code
train_file = 'ufo_sightings_train_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, train_X.astype('float32'), train_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_train/{}'.format(train_file)).upload_fileobj(f)
training_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_train/{}'.format(bucket, train_file)
print('The Pipe mode recordIO protobuf training data: {}'.format(training_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
Let's create recordIO file for the validation data and upload it to S3
###Code
validation_file = 'ufo_sightings_validatioin_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, val_X.astype('float32'), val_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_validation/{}'.format(validation_file)).upload_fileobj(f)
validate_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_validation/{}'.format(bucket, validation_file)
print('The Pipe mode recordIO protobuf validation data: {}'.format(validate_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
---Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
import sagemaker
container = get_image_uri(boto3.Session().region_name, 'linear-learner', "1")
# Create a training job name
job_name = 'ufo-linear-learner-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model-artifact will be stored
output_location = 's3://{}/algorithms_lab/linearlearner_output'.format(bucket)
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `linear-learner` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [The input type (Pipe)](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)1. [Linear Learner Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
print('The feature_dim hyperparameter needs to be set to {}.'.format(data_train.shape[1] - 1))
sess = sagemaker.Session()
# Setup the LinearLeaner algorithm from the ECR container
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_location,
sagemaker_session=sess,
input_mode='Pipe')
# Setup the hyperparameters
linear.set_hyperparameters(feature_dim=22, # number of attributes (minus the researchOutcome attribute)
predictor_type='multiclass_classifier', # type of classification problem
num_classes=3) # number of classes in out researchOutcome (explained, unexplained, probable)
# Launch a training job. This method calls the CreateTrainingJob API call
data_channels = {
'train': training_recordIO_protobuf_location,
'validation': validate_recordIO_protobuf_location
}
linear.fit(data_channels, job_name=job_name)
print('Here is the location of the trained Linear Learner model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
UFO Sightings Algorithms LabThe goal of this notebook is to build out models to use for predicting the legitimacy of a UFO sighting using the XGBoost and Linear Learner algorithm.What we plan on accompishling is the following:1. [Load dataset onto Notebook instance memory from S3](Step-1:-Load-the-data-from-Amazon-S3)1. [Cleaning, transforming, analyize, and preparing the dataset](Step-2:-Cleaning,-transforming,-analyize,-and-preparing-the-dataset)1. [Create and train our model (XGBoost)](Step-3:-Creating-and-training-our-model-(XGBoost))1. [Create and train our model (Linear Learner)](Step-4:-Creating-and-training-our-model-(Linear-Learner)) First let's go ahead and import all the needed libraries.
###Code
import pandas as pd
import numpy as np
from datetime import datetime
import io
import sagemaker.amazon.common as smac
import boto3
from sagemaker import get_execution_role
import sagemaker
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Step 1: Loading the data from Amazon S3Let's get the UFO sightings data that is stored in S3 and load it into memory.
###Code
role = get_execution_role()
bucket='<INSERT_YOUR_BUCKET_NAME_HERE>'
sub_folder = 'ufo_dataset'
data_key = 'ufo_fullset.csv'
data_location = 's3://{}/{}/{}'.format(bucket, sub_folder, data_key)
df = pd.read_csv(data_location, low_memory=False)
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Cleaning, transforming, analyize, and preparing the datasetThis step is so important. It's crucial that we clean and prepare our data before we do anything else.
###Code
# Let's check to see if there are any missing values
missing_values = df.isnull().values.any()
if(missing_values):
display(df[df.isnull().any(axis=1)])
df['shape'].value_counts()
# Replace the missing values with the most common shape
df['shape'] = df['shape'].fillna(df['shape'].value_counts().index[0])
###Output
_____no_output_____
###Markdown
Let's go ahead and start preparing our dataset by transforming some of the values into the correct data types. Here is what we are going to take care of.1. Convert the `reportedTimestamp` and `eventDate` to a datetime data types.1. Convert the `shape` and `weather` to a category data type.1. Map the `physicalEvidence` and `contact` from 'Y', 'N' to `0`, `1`.1. Convert the `researchOutcome` to a category data type (target attribute).
###Code
df['reportedTimestamp'] = pd.to_datetime(df['reportedTimestamp'])
df['eventDate'] = pd.to_datetime(df['eventDate'])
df['shape'] = df['shape'].astype('category')
df['weather'] = df['weather'].astype('category')
df['physicalEvidence'] = df['physicalEvidence'].replace({'Y': 1, 'N': 0})
df['contact'] = df['contact'].replace({'Y': 1, 'N': 0})
df['researchOutcome'] = df['researchOutcome'].astype('category')
df.dtypes
###Output
_____no_output_____
###Markdown
Let's visualize some of the data to see if we can find out any important information.
###Code
%matplotlib inline
sns.set_context("paper", font_scale=1.4)
m_cts = (df['contact'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Contact')
ax.set_xlabel('Was contact made?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['physicalEvidence'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Physical Evidence')
ax.set_xlabel('Was there physical evidence?')
ax.set_ylabel('Number of Sightings')
ax.set_xticklabels(['No', 'Yes'])
plt.xticks(rotation=45)
plt.show()
m_cts = (df['shape'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(9,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Shape')
ax.set_xlabel('UFO Shape')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['weather'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings by Weather')
ax.set_xlabel('Weather')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
m_cts = (df['researchOutcome'].value_counts())
m_ctsx = m_cts.index
m_ctsy = m_cts.get_values()
f, ax = plt.subplots(figsize=(5,5))
sns.barplot(x=m_ctsx, y=m_ctsy)
ax.set_title('UFO Sightings and Research Outcome')
ax.set_xlabel('Research Outcome')
ax.set_ylabel('Number of Sightings')
plt.xticks(rotation=45)
plt.show()
ufo_yr = df['eventDate'].dt.year # series with the year exclusively
## Set axes ##
years_data = ufo_yr.value_counts()
years_index = years_data.index # x ticks
years_values = years_data.get_values()
## Create Bar Plot ##
plt.figure(figsize=(15,8))
plt.xticks(rotation = 60)
plt.title('UFO Sightings by Year')
plt.ylabel('Number of Sightings')
plt.ylabel('Year')
years_plot = sns.barplot(x=years_index[:60],y=years_values[:60])
df.corr()
###Output
_____no_output_____
###Markdown
Let's drop the columns that are not important. 1. We can drop `sighting` becuase it is always 'Y' or Yes. 1. Let's drop the `firstName` and `lastName` becuase they are not important in determining the `researchOutcome`.1. Let's drop the `reportedTimestamp` becuase when the sighting was reporting isn't going to help us determine the legitimacy of the sighting.1. We would need to create some sort of buckets for the `eventDate` and `eventTime`, like seasons for example, but since the distribution of dates is pretty even, let's go ahead and drop them.
###Code
df.drop(columns=['firstName', 'lastName', 'sighting', 'reportedTimestamp', 'eventDate', 'eventTime'], inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Let's apply one-hot encoding1. We need to one-hot both the `weather` attribute and the `shape` attribute. 1. We also need to transform or map the researchOutcome (target) attribute into numeric values. This is what the alogrithm is expecting. We can do this by mapping unexplained, explained, and probable to 0, 1, 2.
###Code
# Let's one-hot the weather and shape attribute
df = pd.get_dummies(df, columns=['weather', 'shape'])
# Let's replace the researchOutcome values with 0, 1, 2 for Unexplained, Explained, and Probable
df['researchOutcome'] = df['researchOutcome'].replace({'unexplained': 0, 'explained': 1, 'probable': 2})
display(df.head())
display(df.shape)
###Output
_____no_output_____
###Markdown
Let's randomize and split the data into training, validation, and testing.1. First we need to randomize the data.1. Next Let's use 80% of the dataset for our training set.1. Then use 10% for validation during training.1. Finally we will use 10% for testing our model after it is deployed.
###Code
# Let's go ahead and randomize our data.
df = df.sample(frac=1).reset_index(drop=True)
# Next, Let's split the data into a training, validation, and testing.
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8 # 80% for training
val_list = (rand_split >= 0.8) & (rand_split < 0.9) # 10% for validation
test_list = rand_split >= 0.9 # 10% for testing
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
###Output
_____no_output_____
###Markdown
Next, let's go ahead and rearrange our attributes so the first attribute is our target attribute `researchOutcome`. This is what AWS requires and the XGBoost algorithms expects. You can read all about it here in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost.htmlInputOutput-XGBoost).After that we will go ahead and create those files on our Notebook instance (stored as CSV) and then upload them to S3.
###Code
# Simply moves the researchOutcome attribute to the first position before creating CSV files
pd.concat([data_train['researchOutcome'], data_train.drop(['researchOutcome'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([data_val['researchOutcome'], data_val.drop(['researchOutcome'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
# Next we can take the files we just stored onto our Notebook instance and upload them to S3.
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_train/train.csv').upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/xgboost_validation/validation.csv').upload_file('validation.csv')
###Output
_____no_output_____
###Markdown
Step 3: Creating and training our model (XGBoost)This is where the magic happens. We will get the ECR container hosted in ECR for the XGBoost algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, 'xgboost')
###Output
_____no_output_____
###Markdown
Next, because we're training with the CSV file format, we'll create inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
###Code
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_train'.format(bucket), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/algorithms_lab/xgboost_validation'.format(bucket), content_type='csv')
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a XGBoost model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `xgboost` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [XGBoost Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
# Create a training job name
job_name = 'ufo-xgboost-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model artifact will be stored
output_location = 's3://{}/algorithms_lab/xgboost_output'.format(bucket)
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path=output_location,
sagemaker_session=sess)
xgb.set_hyperparameters(objective='multi:softmax',
num_class=3,
num_round=100)
data_channels = {
'train': s3_input_train,
'validation': s3_input_validation
}
xgb.fit(data_channels, job_name=job_name)
print('Here is the location of the trained XGBoost model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____
###Markdown
After we train our model we can see the default evaluation metric in the logs. The `merror` is used in multiclass classification error rate. It is calculated as (wrong cases)/(all cases). We want this to be minimized (so we want this to be super small). --- Step 4: Creating and training our model (Linear Learner)Let's evaluate the Linear Learner algorithm as well. Let's go ahead and randomize the data again and get it ready for the Linear Leaner algorithm. We will also rearrange the columns so it is ready for the algorithm (it expects the first column to be the target attribute)
###Code
np.random.seed(0)
rand_split = np.random.rand(len(df))
train_list = rand_split < 0.8
val_list = (rand_split >= 0.8) & (rand_split < 0.9)
test_list = rand_split >= 0.9
# This dataset will be used to train the model.
data_train = df[train_list]
# This dataset will be used to validate the model.
data_val = df[val_list]
# This dataset will be used to test the model.
data_test = df[test_list]
# This rearranges the columns
cols = list(data_train)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_train = data_train[cols]
cols = list(data_val)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_val = data_val[cols]
cols = list(data_test)
cols.insert(0, cols.pop(cols.index('researchOutcome')))
data_test = data_test[cols]
# Breaks the datasets into attribute numpy.ndarray and the same for target attribute.
train_X = data_train.drop(columns='researchOutcome').as_matrix()
train_y = data_train['researchOutcome'].as_matrix()
val_X = data_val.drop(columns='researchOutcome').as_matrix()
val_y = data_val['researchOutcome'].as_matrix()
test_X = data_test.drop(columns='researchOutcome').as_matrix()
test_y = data_test['researchOutcome'].as_matrix()
###Output
_____no_output_____
###Markdown
Next, Let's create recordIO file for the training data and upload it to S3.
###Code
train_file = 'ufo_sightings_train_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, train_X.astype('float32'), train_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_train/{}'.format(train_file)).upload_fileobj(f)
training_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_train/{}'.format(bucket, train_file)
print('The Pipe mode recordIO protobuf training data: {}'.format(training_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
Let's create recordIO file for the validation data and upload it to S3
###Code
validation_file = 'ufo_sightings_validatioin_recordIO_protobuf.data'
f = io.BytesIO()
smac.write_numpy_to_dense_tensor(f, val_X.astype('float32'), val_y.astype('float32'))
f.seek(0)
boto3.Session().resource('s3').Bucket(bucket).Object('algorithms_lab/linearlearner_validation/{}'.format(validation_file)).upload_fileobj(f)
validate_recordIO_protobuf_location = 's3://{}/algorithms_lab/linearlearner_validation/{}'.format(bucket, validation_file)
print('The Pipe mode recordIO protobuf validation data: {}'.format(validate_recordIO_protobuf_location))
###Output
_____no_output_____
###Markdown
---Alright we are good to go for the Linear Learner algorithm. Let's get everything we need from the ECR repository to call the Linear Learner algorithm.
###Code
from sagemaker.amazon.amazon_estimator import get_image_uri
import sagemaker
container = get_image_uri(boto3.Session().region_name, 'linear-learner', "1")
# Create a training job name
job_name = 'ufo-linear-learner-job-{}'.format(datetime.now().strftime("%Y%m%d%H%M%S"))
print('Here is the job name {}'.format(job_name))
# Here is where the model-artifact will be stored
output_location = 's3://{}/algorithms_lab/linearlearner_output'.format(bucket)
###Output
_____no_output_____
###Markdown
Next we start building out our model by using the SageMaker Python SDK and passing in everything that is required to create a Linear Learner model.First I like to always create a specific job name.Next, we'll need to specify training parameters.1. The `linear-learner` algorithm container1. The IAM role to use1. Training instance type and count1. S3 location for output data/model artifact1. [The input type (Pipe)](https://docs.aws.amazon.com/sagemaker/latest/dg/linear-learner.html)1. [Linear Learner Hyperparameters](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html)Finally, after everything is included and ready, then we can call the `.fit()` function which specifies the S3 location for training and validation data.
###Code
print('The feature_dim hyperparameter needs to be set to {}.'.format(data_train.shape[1] - 1))
sess = sagemaker.Session()
# Setup the LinearLeaner algorithm from the ECR container
linear = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.c4.xlarge',
output_path=output_location,
sagemaker_session=sess,
input_mode='Pipe')
# Setup the hyperparameters
linear.set_hyperparameters(feature_dim=22, # number of attributes (minus the researchOutcome attribute)
predictor_type='multiclass_classifier', # type of classification problem
num_classes=3) # number of classes in out researchOutcome (explained, unexplained, probable)
# Launch a training job. This method calls the CreateTrainingJob API call
data_channels = {
'train': training_recordIO_protobuf_location,
'validation': validate_recordIO_protobuf_location
}
linear.fit(data_channels, job_name=job_name)
print('Here is the location of the trained Linear Learner model: {}/{}/output/model.tar.gz'.format(output_location, job_name))
###Output
_____no_output_____ |
sierra_leone.ipynb | ###Markdown
SEIR model for the 2014 outbreak in Sierra Leone.Numbers are taken from: [this article](10.1371/currents.outbreaks.91afb5e0f279e7f29e7056095255b288)
###Code
%matplotlib notebook
import dateutil.parser
from matplotlib import pyplot as plt
import pandas as pd
from model import run_model
from plotting import (
plot_model_and_raw_data,
plot_model_evolution,
plot_model_evolution_item,
)
plt.style.use('seaborn-notebook')
raw = pd.read_csv('data/ebola_sierra_leone.csv')
raw['Date'] = [dateutil.parser.parse(i) for i in raw['Date'].values]
time_zero = dateutil.parser.parse('23 Apr 2014')
parameters = {
'infectiousness': 5.61,
'incubation': 5.3,
'r0': 2.53,
'fatality': 0.48,
'kappa': 0.0097,
'tau': 0,
'time_zero': time_zero,
}
parameters['beta_0'] = parameters['r0'] / parameters['infectiousness']
parameters
population = 7.6 * 10**6
initial_values = [population, 0, 1, 0, 0, 1]
time_span = [0, 350]
result, _, time_date, model = run_model(time_span, parameters, initial_values)
plot_model_and_raw_data(
time_date,
model,
raw,
max_model_date=dateutil.parser.parse('01 Sep 2014')
)
plot_model_evolution(time_date, model)
plot_model_evolution_item(time_date, model, 'infected')
###Output
_____no_output_____ |
notebooks/.ipynb_checkpoints/similar-duplicate-images-in-aptos-data-checkpoint.ipynb | ###Markdown
Calculating the Hash, Shape, Mode, Length and Ratio of each image
###Code
def getImageMetaData(file_path):
with Image.open(file_path) as img:
img_hash = imagehash.phash(img)
return img.size, img.mode, img_hash
def get_train_input():
train_input = df.copy()
m = train_input.path.apply(lambda x: getImageMetaData(x))
train_input["Hash"] = [str(i[2]) for i in m]
train_input["Shape"] = [i[0] for i in m]
train_input["Mode"] = [str(i[1]) for i in m]
train_input["Length"] = train_input["Shape"].apply(lambda x: x[0]*x[1])
train_input["Ratio"] = train_input["Shape"].apply(lambda x: x[0]/x[1])
img_counts = train_input.path.value_counts().to_dict()
train_input["Id_Count"] = train_input.path.apply(lambda x: img_counts[x])
return train_input
train_input = get_train_input()
train_input1 = train_input[['Hash']] # Getting the Hash from the new data
train_input1['New']=1 # Creating a dummy column 1
train_input2 = train_input1.groupby('Hash').count().reset_index() # Grouping the column by Hash to aggregate at Hash level
train_input2 = train_input2[train_input2['New']>1] # Filtering those instances where the hash is occuring multiple times
train_input2.shape # Checking the shape
train_input2 = train_input2.sort_values('Hash') # Sorting the data by Hash
train_input2.head(5) # Checking the top 5 records
train_input[train_input['Hash']=='808d0f3513f33f2d'] # FIltering only one Hash from master data
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython import display
import time
%matplotlib inline
PATH = "../input/train_images/1632c4311fc9.png"
image = mpimg.imread(PATH) # images are color images
plt.imshow(image);
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython import display
import time
%matplotlib inline
PATH = "../input/train_images/a75bab2463d4.png"
image = mpimg.imread(PATH) # images are color images
plt.imshow(image);
###Output
_____no_output_____ |
labs/4-12/4-12_Morality_Sentiment_Analysis.ipynb | ###Markdown
[LEGALST-190] Lab 4/12: Morality and Sentiment Analysis This lab will cover morality and sentiment analysis using the *Moral Foundations Theory* with dictionary-based analysis, connecting to topic modeling and classifications ideas from previous labs. Table of Contents[The Data](section data)[Goal and Question](section goal)1 - [Text Pre-processing](section 1)2 - [Polarity](section 2)3 - [Moral Foundations Theory](section 3)4 - [Non-negative matrix factorization](section 4)**Dependencies:**
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import json
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import nltk
from nltk.stem.snowball import SnowballStemmer
import seaborn as sns
!pip install textblob
from textblob import TextBlob
###Output
_____no_output_____
###Markdown
---- The DataFor this lab, we'll use the Old Bailey dataset, something you all should be familiar with now. The size of the dataset is also rather large so we will compare two year-long periods, one from before 1827 and one after. Read the question to better understand why we look at 1827. Goal and QuestionThe goal of today's lab is to explore sentiment analysis with three different approaches โ [polarity scoring](section 2), [topic-specific dictionary methods](section 3), and [topic modeling](section 4).We'll look at sentiment in the context of the following question: **Did the way judges, prosecutors, and witnesses talk about moral culpability change after the Bloody Code was mostly repealed in 1827 (at the leading edge of a wave of legal reform in England)?***Note: this is a question that could encompass an entire research project. Today's lab uses a very small subset of data due to datahub memory limitations, and skips over many of the steps needed for truly robust conclusions. *Something to think about: What are some things you would need to consider before answering this question?---- Section 1: Text Pre-processing Before we startThis dataset we are about to look at is incredibly large, so to avoid crashing our datahub kernel, we only consider two years: 1822 and 1832. These two years were chosen as periods that were equally far from 1827 (when the Bloody Code was mostly repealed), while not being so far from each other that we'd expect to see major language usage change due only to time. ---- Getting startedLet's get working with the data.
###Code
# contains Old Bailey trial data from 1822 and 1832
old_bailey = pd.read_csv('data/obc_1822_1832.csv', index_col='trial_id')
# select only the columns we need for this lab
old_bailey = old_bailey.loc[:, ['year', 'transcript']]
old_bailey.head()
###Output
_____no_output_____
###Markdown
Awesome! We now have data we can work with. Before we start anything, we must clean the text!Just to review, we want to process our text by:1) Lowercasing the words2) Cleaning up punctuation3) Splitting into individual words4) Stemming the word tokensFor the sake of time (and to get to the good stuff), we've provided the pre-processing code below. This a big data set, so the code will take up to a minute to run.
###Code
# pre-process the data
lower_cased = old_bailey['transcript'].str.lower()
punct_re = r'[^\w\s]'
lower_no_punc = lower_cased.str.replace(punct_re, ' ')
tokens = lower_no_punc.str.split()
old_bailey['tokens'] = tokens
stemmer = SnowballStemmer('english')
stem_lists = []
for token_list in old_bailey['tokens']:
stem_lists.append([stemmer.stem(wd) for wd in token_list])
old_bailey['stemmed_tokens'] = stem_lists
old_bailey.head()
###Output
_____no_output_____
###Markdown
---- Section 2: Polarity One way to measure the tone of a text is to look at the text **polarity**: a measure of how positive or negative it is perceived to be. For example, a sentence like "I love Berkeley!" would be considered positive, while a sentence like "Stanford is terrible!" would be negative. And, because polarity is represented as a scale, some words have stronger positive or negative sentiment than others- "I like data science" is positive, but not as positive as "I love data science."We will use the [TextBlob](https://textblob.readthedocs.io/en/dev/quickstart.htmlsentiment-analysis) tools to analyze the sentiment of Old Bailey. TextBlob provides access to many common text-processing operations, and includes a lexicon and rule-based sentiment analysis tool.A TextBlob is created around string of text:
###Code
# creates a sentiment analyzer
blob = TextBlob("This is a super exciting, totally awesome test sentence.")
blob
###Output
_____no_output_____
###Markdown
We can access the sentiment by using `.sentiment`.
###Code
blob.sentiment
###Output
_____no_output_____
###Markdown
`sentiment` returns two values: the **polarity** and the **subjectivity**. The polarity ranges between -1 and 1 where -1 is a very negative text and 1 is a very positive text. Subjectivity ranges between 0 and 1 where 0 is a very objective text and 1 is a very subjective text (i.e. one that can be interpreted many different ways). You can get the polarity by using `.polarity`.
###Code
blob.sentiment.polarity
###Output
_____no_output_____
###Markdown
Polarity is calculated fairly simply: TextBlob accesses a dictionary of words that have been assigned polarity and subjectivity scores, looks up each word in the given text, and averages over the sentence. It also employs a few rules, such as changing the polarity of a word that comes after a negation.
###Code
happy = TextBlob('Happy')
print(happy.sentiment.polarity)
negation = TextBlob('Not')
print(negation.sentiment.polarity)
negated_happy = TextBlob('Not happy')
print(negated_happy.sentiment.polarity)
###Output
_____no_output_____
###Markdown
**QUESTION:** Try calculating the polarity scores of a few of your own sentences in the cell below.
###Code
# test the polarity scoring for different sentences
my_blob = ...
...
###Output
_____no_output_____
###Markdown
Next, we want to get the average polarity for each transcript. **EXERCISE:** define a function that will take in a string of text and return the polarity of that text.
###Code
def get_polarity(text):
"""Return the polarity of TEXT"""
...
return ...
###Output
_____no_output_____
###Markdown
**EXERCISE**: Using `.apply` and your `get_polarity` function, get the polarity of every transcript in the Old Bailey data.
###Code
polarities = ...
# add the polarities as a column
old_bailey['polarity'] = polarities
old_bailey.head()
###Output
_____no_output_____
###Markdown
**QUESTION:** - What was the most negative transcript/transcripts?- What was the most positive transcript/transcripts?
###Code
# find the transcript with the highest polarity
most_pos = ...
most_pos
# find the transcript with the lowest polarity
most_neg = ...
most_neg
###Output
_____no_output_____
###Markdown
**EXERCISE:** Let's take a look at violin plots of these two datasets to better compare how the average compound polarity is distributed for each of the two years, before and after 1827.To show both years at once, it's easiest to use the Seaborn (abbreviated as `sns`) visualization library function. `y` is set to the name of the variable (a string) whose distributions we want to see. `x` is set to the name of the variable (also a string)that we want to compare distributions for . `data` is set to the dataframe (not a string) with all the values.
###Code
# uncomment the next line and fill in the code to create the violin plots
#sns.violinplot(x=..., y=..., data=...)
###Output
_____no_output_____
###Markdown
**QUESTION:** What does this plot show us? What are some advantages to using polarity as a way to measure moral tone? What are some issues with this approach? Consider also how these answers might change for a different data set. *Write your answer here.* ---- Section 3: Moral Foundations TheoryAnother approach is to create specialized dictionaries containing specific words of interest to try to analyze sentiment from a particular angle (i.e. use a **dictionary method**). One set of researchers did just that from the perspective of [Moral Foundations Theory](http://moralfoundations.org/). We will now use it to see if we can understand more about the moral tone of Old Bailey transcripts than by using general polarity. You should be doing something like this for your homework. We will be using a provided moral foundations dictionary.
###Code
with open('data/haidt_dict.json') as json_data:
mft_dict = json.load(json_data)
###Output
_____no_output_____
###Markdown
Moral Foundations Theory posits that there are five (with an occasional sixth) innane, universal psychological foundations of morality, and that those foundations shape human cultures and institutions (including legal). The keys of the dictionary correspond to the five foundations.
###Code
#look at the keys of the dictionary provided
keys = mft_dict.keys()
list(keys)
###Output
_____no_output_____
###Markdown
And the values of the dictionary are lists of words associated with each foundation.
###Code
mft_dict[list(keys)[0]] #one example of the values provided for the first key
###Output
_____no_output_____
###Markdown
Calculating Percentages In this approach, we'll use the frequency of Moral Foundations-related words as a measure of how the transcripts talk about morality and see if there's a difference between pre- and post-1827 trends. As a first step, we need to know the total number of words in each transcript. **EXERCISE:** Add a column to `old_bailey` with the number of words corresponding to each transcript.
###Code
# create a new column called 'total_words'
old_bailey['total_words'] = ...
old_bailey.head()
###Output
_____no_output_____
###Markdown
Next, we need to calculate the number of matches to entries in our dictionary for each foundation for each speech.Run the next cell to add six new columns to `old_bailey`, one per foundation, that show the number of word matches. This cell will also likely take some time to run (no more than a minute). Note that by now, you have the skills to write all the code in the next cell- we're just giving it to you because it's long, fiddly, and writing nested for-loops is not the focus of this lab. Make sure you know what it does before you move on, though.
###Code
# Will take a bit of time to run due to the large size.
# do the following code for each foundation
for foundation in mft_dict.keys():
# create a new, empty column
num_match_words = np.zeros(len(old_bailey))
stems = mft_dict[foundation]
# do the following code for each foundation word
for stem in stems:
# find related word matches
wd_count = np.array([sum([wd == stem for wd in transcript])for transcript in old_bailey['stemmed_tokens']])
# add the number of matches to the total
num_match_words += wd_count
# create a new column for each foundation with the number of related words per transcript
old_bailey[foundation] = num_match_words
old_bailey.head()
###Output
_____no_output_____
###Markdown
**EXERCISE:** The columns for each foundation currently contain the number of words related to that foundation for each of the trials. Calculate the *percentage* of foundation words per trial by dividing the number of matched words by the number of total words and multiplying by 100.
###Code
# do this for each foundation column
for foundation in mft_dict.keys():
old_bailey[foundation] = ...
old_bailey.head()
###Output
_____no_output_____
###Markdown
Let's compare the average percentage of foundation words per transcript for the two dates, 1822, and 1832.**EXERCISE**: Create a dataframe that only has columns for the five foundations plus the year. Then, use the pandas dataframe function `groupby` to group rows by the year, and call the `mean` function on the `groupby` output to get the averages for each foundation.
###Code
# the names of the columns we want to keep
mft_columns = ['authority/subversion', 'care/harm', 'fairness/cheating', 'loyalty/betrayal',
'sanctity/degradation', 'year']
# create a data frame with only the above columns included
mft_df = ...
# groups the rows of mft_df by year, then take the mean
foundation_avgs = ...
foundation_avgs
###Output
_____no_output_____
###Markdown
Next, create a bar graph. The simplest way is to call `.plot.barh()` on your dataframe of the averages. Also try calling `.transpose()` on your averages dataframe, then making a bar graph of that. The transpose function flips the rows and columns and can make it easier to compare the percentages.
###Code
# create a bar graph
...
###Output
_____no_output_____
###Markdown
**QUESTION:** What do you see from the bar graphs you created? Why would this be a good approach to answering the question of how talk about morality changed between these two periods? What are some limitations of this approach (Hint: look at the values on the graphs you calculated, and remember: these are *percentages*, not proportions)? *Write your answer here.* ---- Section 4: Non-negative matrix factorizationIn this section, you can get an idea of sentiment using topic modeling algorithms, something you touched on in the 4/10 lab earlier this week, to help look for patterns.On Tuesday, you explored Latent Dirichlet Allocation (LDA) in gensim to look for topics in a corpus. Non-negative matrix factorization (NMF), not included in gensim, is another such way to look for topics in unstructured text data. The two methods differ in what kinds of math they use 'under the hood': LDA relies on probabilistic graphical modeling, while NMF uses linear algebra. We want to generate the topics found for 1822 and 1832 trials, look for topics related to tone or morality, and see if there's a difference between the two.Run the cell below to make two lists: one list of the trial transcripts for each year.
###Code
# trial transcripts for 1822
transcripts_1822 = old_bailey[old_bailey['year'] == 1822]['transcript']
# trial transcripts for 1832
transcripts_1832 = old_bailey[old_bailey['year'] == 1832]['transcript']
###Output
_____no_output_____
###Markdown
We'll start by looking at 1822. The following cell creates the tfidf vectorizer, fits the text data, and assigns the list of feature name (i.e. the words in the document) to `tfidf_feature_names_1822`.Check out the [documentation for TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) if you need a refresher on what it does.
###Code
# create the vectorizer
tfidf_vectorizer_1822 = TfidfVectorizer(max_df=0.95, min_df=2, max_features=1000, stop_words='english')
# fit the data
tfidf_1822 = tfidf_vectorizer_1822.fit_transform(transcripts_1822)
# get the feature names
tfidf_feature_names_1822 = tfidf_vectorizer_1822.get_feature_names()
###Output
_____no_output_____
###Markdown
**EXERCISE:** Create the TfidfVectorizer, fit_transform the data, and get the feature names for 1832.
###Code
# create the vectorizer
tfidf_vectorizer_1832 = ...
# fit the data
tfidf_1832 = ...
# get the feature names
tfidf_feature_names_1832 = ...
###Output
_____no_output_____
###Markdown
As mentioned previously the algorithms are not able to automatically determine the number of topics and this value must be set when running the algorithm. Initialising NMF with โnndsvdโ rather than random initialisation improves the time it takes for NMF to converge.`random_state` gives the seed for the random number generator to use: this lets us reproduce our results in the future.
###Code
num_topics = 20
# Run NMF for 1822
nmf_1822 = NMF(n_components=num_topics, random_state=1, init='nndsvd').fit(tfidf_1822)
###Output
_____no_output_____
###Markdown
**EXERCISE:** Run NMF using `num_topics` for the number of components on the data from 1832.
###Code
# Run NMF for 1832
nmf_1832 = ...
###Output
_____no_output_____
###Markdown
We've provided you the function to display the topics shown by the NMF.
###Code
def display_topics(model, feature_names, num_top_words):
"""Displays NUM_TOP_WORDS topics for MODEL """
for topic_idx, topic in enumerate(model.components_):
print("Topic %d:" % (topic_idx))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-num_top_words - 1:-1]]))
# the number of words to display per topic
num_top_words = 10
# display the topics for 1822
display_topics(nmf_1822, tfidf_feature_names_1822, num_top_words)
# display the topics for 1832
display_topics(nmf_1832, tfidf_feature_names_1832, num_top_words)
###Output
_____no_output_____ |
docs/notebooks/Starters.ipynb | ###Markdown
For StartersThe basic idea of a starter is:> - **pick a destination** in the _JupyterLab File Browser_> - **click a button** in the _JupyterLab Launcher_> - **see useful files**A slightly more accurate version is:> - configure via> [traitlets](https://jupyter-notebook.readthedocs.io/en/stable/config.html)> - advertise to JupyterLab via the [REST API](./REST%20API.ipynb)> - display in the _JupyterLab Launcher_> - **click a button** in the _JupyterLab Launcher_> - or immediately start with a [Starter Tree URL](Starter-Tree-URL)> - zero or more (but usually one) times:> - gather more information from the user via> [react-jsonschema-form](https://react-jsonschema-form.readthedocs.io)> - perform further processing> - copy files via the> [Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html)> - **see useful files** in the _JupyterLab File Browser_> - run _JupyterLab Commands_ to do other things to JupyterLabWhich of these steps a particular starter performs depends primarily on its type. Types of Starters Copy> `"type": "copy"`>> `"src": ""`The simplest starter, `copy`, just... copies. It can copy a single file, or a directoryof files (and subdirectories). The `src` attribute tells the starter where to get thefiles.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a copy starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> contents[label=โข]
contents -> files[label=โฃ]
files -> contents[label=โค]
contents -> post[label=โฅ]
post -> launcher[label=โฆ]
launcher -> launcher[label=โง]
}
###Output
_____no_output_____
###Markdown
`copy`, like all the starters, makes use of the[Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html)directly. Existing files will _not_ be overwritten. Python> `"type": "python"`>> `"callable": ""`A Python Starter is a function. This type has the fewest limitations, as it has fullaccess to the `StarterManager` (and by extension, it's `parent`, the `NotebookApp`).This powers both the [Cookiecutter](Cookiecutter) the [Notebook](Notebook) starters,with the latter directly using the notebook server's _Kernel Manager_ to startshort-lifespan kernels.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a python starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
callable
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> callable[label=โข]
callable -> contents[label=โฃ]
contents -> files[label=โค]
files -> contents[label=โฅ]
contents -> callable[label=โฆ]
callable -> post[label=โง]
post -> launcher[label=โจ]
launcher -> launcher[label=โฉ]
}
###Output
_____no_output_____
###Markdown
Notebook> `"type": "notebook"`A notebook can be a starter. Each starter run gets its own, private kernel which canpersist between interactions with the user. Communication with the server manager ishandled through manipulating a copy of the notebook, specfically the notebook metadata.The advantages of this approach over the Python starter is:- works with **any installed kernel**- **state is maintained** between successive re-executions- `jupyterlab-starters` provides **authoring support** for editing and validating the starter
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a notebook starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
kernel
tmpdir
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="initial form"]
form2[label="dynamic form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> tmpdir[label=โฃ]
tmpdir -> post[label=โค]
tmpdir -> kernel[label=โฅ]
kernel -> tmpdir[label=โฆ]
post -> form2[label=โง]
form2 -> post[label=โจ]
post -> tmpdir[label=โฉ]
tmpdir -> kernel[label=โช]
kernel -> tmpdir[label=โซ]
tmpdir -> contents[label=โฌ]
contents -> files[label=โญ]
files -> contents[label=โฎ]
contents -> post[label=โฏ]
post -> launcher[label=โฐ]
launcher -> launcher[label=โฒ]
}
###Output
_____no_output_____
###Markdown
Built-ins CookiecutterThe cookiecutter starter will be available if `cookiecutter` is[installed](./Users.ipynbCookiecutter) in the same Python environment as the `notebook`server.> Additionally, if available, `importlib_metadata` will be used to list the (previously)> curated list of community-contributed cookiecutters. It is now recommended to search> for them directly on GitHub by> [topic](https://github.com/topics/cookiecutter-template) or> [advanced search](https://github.com/search?utf8=%E2%9C%93&q=path%3A%2F+filename%3Acookiecutter.json).One of the original motivations for _Jupyter Starters_ was a way to provide aconvenient, consistent, web-based experience for the[cookiecutter](https://cookiecutter.rtfd.io) ecosystem. Briefly, a cookiecutter is:> - a repository, zip archive, or directory that contains> - `cookiecutter.json`> - a (potentially nested) directory that uses> [Jinja2](https://jinja.palletsprojects.com) to describe file names and contentsWhat they may lack in dynamism, the make up for in consistency and robustness.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of the cookiecutter starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
cookiecutter
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="template form"]
form2[label="cookiecutter form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> cookiecutter[label=โฃ]
cookiecutter -> git[label=โค]
git -> cookiecutter[label=โฅ]
cookiecutter -> post[label=โง]
post -> form2[label=โจ]
form2 -> post[label=โฉ]
post -> cookiecutter[label=โช]
cookiecutter -> contents[label=โซ]
contents -> files[label=โฌ]
files -> contents[label=โญ]
contents -> post[label=โฎ]
post -> launcher[label=โฏ]
launcher -> launcher[label=โฐ]
}
###Output
_____no_output_____
###Markdown
For StartersThe basic idea of a starter is:> - **pick a destination** in the _JupyterLab File Browser_> - **click a button** in the _JupyterLab Launcher_> - **see useful files**A slightly more accurate version is:> - configure via> [traitlets](https://jupyter-notebook.readthedocs.io/en/stable/config.html)> - advertise to JupyterLab via the [REST API](./REST%20API.ipynb)> - display in the _JupyterLab Launcher_> - **click a button** in the _JupyterLab Launcher_> - or immediately start with a [Starter Tree URL](Starter-Tree-URL)> - zero or more (but usually one) times:> - gather more information from the user via> [react-jsonschema-form](https://react-jsonschema-form.readthedocs.io)> - perform further processing> - copy files via the> [Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html)> - **see useful files** in the _JupyterLab File Browser_> - run _JupyterLab Commands_ to do other things to JupyterLabWhich of these steps a particular starter performs depends primarily on its type. Types of Starters Copy> `"type": "copy"`>> `"src": ""`The simplest starter, `copy`, just... copies. It can copy a single file, or a directoryof files (and subdirectories). The `src` attribute tells the starter where to get thefiles.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a copy starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> contents[label=โข]
contents -> files[label=โฃ]
files -> contents[label=โค]
contents -> post[label=โฅ]
post -> launcher[label=โฆ]
launcher -> launcher[label=โง]
}
###Output
_____no_output_____
###Markdown
`copy`, like all the starters, makes use of the[Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html)directly. Existing files will _not_ be overwritten. Python> `"type": "python"`>> `"callable": ""`A Python Starter is a function. This type has the fewest limitations, as it has fullaccess to the `StarterManager` (and by extension, it's `parent`, the `NotebookApp`).This powers both the [Cookiecutter](Cookiecutter) the [Notebook](Notebook) starters,with the latter directly using the notebook server's _Kernel Manager_ to startshort-lifespan kernels.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a python starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
callable
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> callable[label=โข]
callable -> contents[label=โฃ]
contents -> files[label=โค]
files -> contents[label=โฅ]
contents -> callable[label=โฆ]
callable -> post[label=โง]
post -> launcher[label=โจ]
launcher -> launcher[label=โฉ]
}
###Output
_____no_output_____
###Markdown
Notebook> `"type": "notebook"`A notebook can be a starter. Each starter run gets its own, private kernel which canpersist between interactions with the user. Communication with the server manager ishandled through manipulating a copy of the notebook, specfically the notebook metadata.The advantages of this approach over the Python starter is:- works with **any installed kernel**- **state is maintained** between successive re-executions- `jupyterlab-starters` provides **authoring support** for editing and validating the starter
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a notebook starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
kernel
tmpdir
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="initial form"]
form2[label="dynamic form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> tmpdir[label=โฃ]
tmpdir -> post[label=โค]
tmpdir -> kernel[label=โฅ]
kernel -> tmpdir[label=โฆ]
post -> form2[label=โง]
form2 -> post[label=โจ]
post -> tmpdir[label=โฉ]
tmpdir -> kernel[label=โช]
kernel -> tmpdir[label=โซ]
tmpdir -> contents[label=โฌ]
contents -> files[label=โญ]
files -> contents[label=โฎ]
contents -> post[label=โฏ]
post -> launcher[label=โฐ]
launcher -> launcher[label=โฒ]
}
###Output
_____no_output_____
###Markdown
Built-ins CookiecutterThe cookiecutter starter will be available if `cookiecutter` is[installed](./Users.ipynbCookiecutter) in the same Python environment as the `notebook`server.> Find more cookiecutter URLs on GitHub by> [topic](https://github.com/topics/cookiecutter-template) or> [advanced search](https://github.com/search?utf8=%E2%9C%93&q=path%3A%2F+filename%3Acookiecutter.json).One of the original motivations for _Jupyter Starters_ was a way to provide aconvenient, consistent, web-based experience for the[cookiecutter](https://cookiecutter.rtfd.io) ecosystem. Briefly, a cookiecutter is:> - a repository, zip archive, or directory that contains> - `cookiecutter.json`> - a (potentially nested) directory that uses> [Jinja2](https://jinja.palletsprojects.com) to describe file names and contentsWhat they may lack in dynamism, the make up for in consistency and robustness.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of the cookiecutter starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
cookiecutter
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="template form"]
form2[label="cookiecutter form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> cookiecutter[label=โฃ]
cookiecutter -> git[label=โค]
git -> cookiecutter[label=โฅ]
cookiecutter -> post[label=โง]
post -> form2[label=โจ]
form2 -> post[label=โฉ]
post -> cookiecutter[label=โช]
cookiecutter -> contents[label=โซ]
contents -> files[label=โฌ]
files -> contents[label=โญ]
contents -> post[label=โฎ]
post -> launcher[label=โฏ]
launcher -> launcher[label=โฐ]
}
###Output
_____no_output_____
###Markdown
For StartersThe basic idea of a starter is:> - **pick a destination** in the _JupyterLab File Browser_> - **click a button** in the _JupyterLab Launcher_> - **see useful files**A slightly more accurate version is:> - configure via [traitlets](https://jupyter-notebook.readthedocs.io/en/stable/config.html) > - advertise to JupyterLab via the [REST API](./REST%20API.ipynb)> - display in the _JupyterLab Launcher_> - **click a button** in the _JupyterLab Launcher_> - or immediately start with a [Starter Tree URL](Starter-Tree-URL)> - zero or more (but usually one) times: > - gather more information from the user via [react-jsonschema-form](https://react-jsonschema-form.readthedocs.io)> - perform further processing> - copy files via the [Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html)> - **see useful files** in the _JupyterLab File Browser_> - run _JupyterLab Commands_ to do other things to JupyterLabWhich of these steps a particular starter performs depends primarily on its type. Types of Starters Copy> `"type": "copy"`>> `"src": ""`The simplest starter, `copy`, just... copies. It can copy a single file, or a directory of files (and subdirectories). The `src` attribute tells the starter where to get the files.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a copy starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> contents[label=โข]
contents -> files[label=โฃ]
files -> contents[label=โค]
contents -> post[label=โฅ]
post -> launcher[label=โฆ]
launcher -> launcher[label=โง]
}
###Output
_____no_output_____
###Markdown
`copy`, like all the starters, makes use of the [Contents API](https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html) directly. Existing files will _not_ be overwritten. Python> `"type": "python"`>> `"callable": ""`A Python Starter is a function. This type has the fewest limitations, as it has full access to the `StarterManager` (and by extension, it's `parent`, the `NotebookApp`). This powers both the [Cookiecutter](Cookiecutter) the [Notebook](Notebook) starters, with the latter directly using the notebook server's _Kernel Manager_ to start short-lifespan kernels.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a python starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/{:name}/{:path}" fontname=monospace]
contents
callable
}
subgraph cluster_lab { label="JupyterLab"
launcher
}
get -> launcher[label=โ ]
launcher -> post[label=โก]
post -> callable[label=โข]
callable -> contents[label=โฃ]
contents -> files[label=โค]
files -> contents[label=โฅ]
contents -> callable[label=โฆ]
callable -> post[label=โง]
post -> launcher[label=โจ]
launcher -> launcher[label=โฉ]
}
###Output
_____no_output_____
###Markdown
Notebook> `"type": "notebook"`A notebook can be a starter. Each starter run gets its own, private kernel which can persist between interactions with the user. Communication with the server manager is handled through manipulating a copy of the notebook, specfically the notebook metadata. The advantages of this approach over the Python starter is:- works with **any installed kernel**- **state is maintained** between successive re-executions- `jupyterlab-starters` provides **authoring support** for editing and validating the starter
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of a notebook starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
kernel
tmpdir
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="initial form"]
form2[label="dynamic form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> tmpdir[label=โฃ]
tmpdir -> post[label=โค]
tmpdir -> kernel[label=โฅ]
kernel -> tmpdir[label=โฆ]
post -> form2[label=โง]
form2 -> post[label=โจ]
post -> tmpdir[label=โฉ]
tmpdir -> kernel[label=โช]
kernel -> tmpdir[label=โซ]
tmpdir -> contents[label=โฌ]
contents -> files[label=โญ]
files -> contents[label=โฎ]
contents -> post[label=โฏ]
post -> launcher[label=โฐ]
launcher -> launcher[label=โฒ]
}
###Output
_____no_output_____
###Markdown
Built-ins CookiecutterThe cookiecutter starter will be available if `cookiecutter` is [installed](./Users.ipynbCookiecutter) in the same Python environment as the `notebook` server.> Additionally, if available, `importlib_metadata` will be used to list the (previously) curated list of community-contributed cookiecutters. It is now recommended to search for them directly on GitHub by [topic](https://github.com/topics/cookiecutter-template) or [advanced search](https://github.com/search?utf8=%E2%9C%93&q=path%3A%2F+filename%3Acookiecutter.json).One of the original motivations for _Jupyter Starters_ was a way to provide a convenient, consistent, web-based experience for the [cookiecutter](https://cookiecutter.rtfd.io) ecosystem. Briefly, a cookiecutter is:> - a repository, zip archive, or directory that contains> - `cookiecutter.json`> - a (potentially nested) directory that uses [Jinja2](https://jinja.palletsprojects.com) to describe file names and contentsWhat they may lack in dynamism, the make up for in consistency and robustness.
###Code
%%dot
digraph g { compound=true layout=dot rankdir=TB title="woooo"
node[shape=none fontname="sans-serif"]
graph[fontname="sans-serif" fontcolor="grey" color="none" fillcolor="#eeeeee" style=filled]
label="a notional execution of the cookiecutter starter"
subgraph cluster_files { label="Your Files"
files
}
subgraph cluster_server { label="Notebook Server"
get[label="/starters" fontname=monospace]
post[label="/starters/cookiecutter/{:path}" fontname=monospace]
contents
cookiecutter
}
subgraph cluster_lab { label="JupyterLab"
launcher
form1[label="template form"]
form2[label="cookiecutter form"]
}
get -> launcher[label=โ ]
launcher -> form1[label=โก]
form1 -> post[label=โข]
post -> cookiecutter[label=โฃ]
cookiecutter -> git[label=โค]
git -> cookiecutter[label=โฅ]
cookiecutter -> post[label=โง]
post -> form2[label=โจ]
form2 -> post[label=โฉ]
post -> cookiecutter[label=โช]
cookiecutter -> contents[label=โซ]
contents -> files[label=โฌ]
files -> contents[label=โญ]
contents -> post[label=โฎ]
post -> launcher[label=โฏ]
launcher -> launcher[label=โฐ]
}
###Output
_____no_output_____ |
notebooks/Tutorial_09_Xarray_Compare_two_gridded_datasets.ipynb | ###Markdown
SST in Hurricane Irene Authors* [Dr Chelle Gentemann](mailto:[email protected]) - Earth and Space Research, USA* [Dr Marisol Garcia-Reyes](mailto:[email protected]) - Farallon Institute, USA * PODAACPY file search added by [Lewis John McGibbney](mailto:[email protected]) -JPL, NASA, USA ------------------- Import python packages* You are going to want numpy, pandas, matplotlib.pyplot, podaaacpy, and xarray* This cell also imports a parser so that a login file can be read to use podaacpy
###Code
import warnings
warnings.simplefilter('ignore') # filter some warning messages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray as xr
import cartopy.crs as ccrs
#This is for reading in and parsing the login file credentials
from pathlib import Path
import configparser
from lxml import objectify
#The podaacpy api
import podaac.podaac as podaac
from podaac import drive as podaacdrive
import podaac.podaac_utils as putil
# then create an instance of the Podaac class
p = podaac.Podaac()
#read in the login credentials and pass them to podaac drive
with open('./podaac.ini', 'r') as f:
config = configparser.ConfigParser()
config.read_file(f)
d = podaacdrive.Drive(None,
config['drive']['urs_username'],
config['drive']['urs_password'])
###Output
_____no_output_____
###Markdown
Analysis of SSTs during Hurricane IreneIrene was a massive storm, with tropical storm force winds extending outward 300 miles (485 km). The storm was also slow moving as it traversed the Mid-Atlantic. Irene claimed at least 48 lives and caused over 7 billion U.S. dollars in damages in the U.S. and 3.1 billion U.S. dollars of damage in the Caribbean. (source: https://www.ncdc.noaa.gov/sotc/tropical-cyclones/201113).For this tutorial we will use the podaacpy and a virtually aggregated dataset to search for SST2 Chl-a during Hurricane Irene and look at the change in upper ocean heat content and chlorophyll-a. https://www.livescience.com/30759-how-a-hurricane-impacts-the-ocean.html Read in Storm data from a thredds server- Note update - the thredds server has disappeared, so I have left the url in the code, but commented out and replaced it with a local copy of the data.
###Code
#url = 'http://mrtee.europa.renci.org:8080/thredds/dodsC/DataLayers/IBTrACS.NA.v04r00.nc?name[0:1:2211],time[0:1:2211][0:1:359],lat[0:1:2211][0:1:359],lon[0:1:2211][0:1:359]'
url = './../data/IBTrACS.NA.v04r00.nc'
ds_storm=xr.open_dataset(url)
irene = ds_storm.isel(storm=2092).isel(date_time=slice(0,78))
plt.scatter(irene.lon,irene.lat,c=irene.time.dt.dayofyear)
print('storm start and end:', irene.time[0].data,irene.time[-1].data)
###Output
_____no_output_____
###Markdown
For plotting the data, try using cartopy`ax = plt.axes(projection=ccrs.Orthographic(-70, 30))`you will need to add `transform=ccrs.PlateCarree()` to your plotting routine`ax.scatter(irene.lon,irene.lat,c=irene.time.dt.dayofyear,transform=ccrs.PlateCarree())``ax.set_extent([-82, -50, 10, 60], crs=ccrs.PlateCarree())``ax.coastlines('50m')``ax.stock_img()`
###Code
# try plotting here with land mask
start_time = '2011-08-20T00:00:00Z'
end_time = '2011-09-05T23:59:59Z'
start_time2 = '2011-08-15'
end_time2 = '2011-09-25'
minlat,maxlat = 15,45
minlon,maxlon = -100,-40
#dataset_id = 'PODAAC-GHCMC-4FM03' #CMC SST looked up on podaac website, on dataset page this is the persistant id
#dataset_id = 'PODAAC-GHCMC-4FM02' #CMC SST looked up on podaac website, on dataset page this is the persistant id
#dataset_id = 'PODAAC-GHGMR-4FJ04' #MUR SST
#dataset_id = 'PODAAC-GHGDM-4FD02' #DMI SST
#dataset_id = 'PODAAC-GHGPB-4FO02' #ospo sst
dataset_id = 'PODAAC-GHCMC-4FM02' #CMC SST
gresult = p.granule_search(dataset_id=dataset_id,
start_time=start_time,
end_time=end_time,
items_per_page='100')
urls = putil.PodaacUtils.mine_opendap_urls_from_granule_search(gresult)
urls_sst = [w[:-5] for w in urls] #remove html from urlsurls_sst = [w.replace('-tools.jpl.nasa.gov/drive/files/', '-opendap.jpl.nasa.gov/opendap/') for w in urls_sst]
print('num files:',len(urls_sst))
ds_sst = xr.open_dataset(urls_sst[0])
subset_sst = ds_sst.sel(lat=slice(minlat,maxlat)
,lon=slice(minlon,maxlon))
print('opening:', urls_sst[0],subset_sst)
#subset_sst.analysed_sst.plot()
fig, axes = plt.subplots(ncols=2,figsize=[12,4])
subset_sst.analysed_sst[0,:,:].plot(ax=axes[0])
axes[0].scatter(irene.lon[0:78],irene.lat[0:78],c=irene.time.dt.dayofyear[0:78],cmap='seismic')
subset_sst.mask[0,:,:].plot(ax=axes[1])
axes[0].scatter(irene.lon[0:78],irene.lat[0:78],c=irene.time.dt.dayofyear[0:78],cmap='seismic')
###Output
_____no_output_____
###Markdown
Mask out land values using .where`subset_sst_masked = subset_sst.where(subset_sst.mask==1)``subset_sst_masked.analysed_sst[0,:,:].plot()`
###Code
# plot masked data here
###Output
_____no_output_____
###Markdown
Compare time series of the cold wake after Hurricane as measured by MUR and OSTIA SSTs When you open a multi-file dataset, xarray uses dask for lasy loading. * Lazy loading: It mostly just loads the metadata. You can do data searching, selecting, subsetting without acutally loading the data. * Here we have loaded in 14 days of data for a very high resolution SST global datasets. Before we actually load the data, we are going to want to do some subsetting so that it will fit into our memory.* Notice below when you print out the dataset details that they are all stored as dask.array types.
###Code
ds_sst = xr.open_mfdataset(urls_sst,coords='minimal')
ds_sst = ds_sst.where(ds_sst.mask==1)
#subset data
subset_sst = ds_sst.sel(lat=slice(minlat,maxlat),
lon=slice(minlon,maxlon))
###Output
_____no_output_____
###Markdown
Check the size of the data
###Code
print('GB of data:', subset_sst.nbytes/1e9)
#load the data
subset_sst.load()
###Output
_____no_output_____
###Markdown
Load chlorophyll-a data from a virtually aggregated dataset at COASTWATCH
###Code
url = 'https://coastwatch.pfeg.noaa.gov/erddap/griddap/pmlEsaCCI31OceanColorDaily'
ds_chl = xr.open_dataset(url).rename({'latitude':'lat','longitude':'lon'})
ds_chl_subset = ds_chl.sel(time=slice(start_time2,end_time2),
lat=slice(45,15),
lon=slice(-100,-40))
chl = ds_chl_subset.chlor_a.sortby('lat')
###Output
_____no_output_____
###Markdown
* Create a 5-day resampled dataset since the chl-a data is missing when clouds are present
###Code
chl_5dy = chl.resample(time='5D').mean('time')
###Output
_____no_output_____
###Markdown
* Interpolate onto daily maps
###Code
chl_1dy = chl_5dy.resample(time='1D').interpolate('linear')
###Output
_____no_output_____
###Markdown
Look at the Chlorophyll-a data* Create a subplot* plot a 5-day average of the data* plot IRENE's track on the image* plot the difference poststorm - prestorm Chl-a* add a grid to the data for georeference
###Code
fig, axes = plt.subplots(ncols=2,figsize=[12,4])
chl_5dy[0,:,:].plot(vmin=0,vmax=.5,ax=axes[0])
axes[0].scatter(irene.lon,irene.lat,
c=irene.time.dt.dayofyear,
cmap='seismic')
(chl_5dy[4,:,:]-chl_5dy[0,:,:]).plot(vmin=-0.2,vmax=.2,ax=axes[1],cmap='seismic')
axes[1].scatter(irene.lon,irene.lat,
c=irene.time.dt.dayofyear,
cmap='jet')
axes[1].grid()
###Output
_____no_output_____
###Markdown
Plot a timeseries of Chl-a at -80,32 near the coast* The Chl-a changes from 0.4 to 1.0 * It takes almost a month before the Chl-a returns to normal
###Code
chl_ts = chl_1dy.sel(lat=32.0,method='nearest').sel(lon=-80.0,method='nearest')
chl_ts.plot()
###Output
_____no_output_____
###Markdown
Regridding to look at both SST and Chl-a* First interpolate in time* Next interpolate in space* use the SST mask to mask the Chl-a data
###Code
#first interpolate onto same time sampling
subset_chl_interp_time = chl_1dy.interp(time=subset_sst.time,
method='linear')
#now interpolate onto same spatial grid
subset_chl_interp = subset_chl_interp_time.interp(lat=subset_sst.lat,
lon=subset_sst.lon,
method='nearest')
#now mask the data
subset_chl_masked = subset_chl_interp.where(subset_sst.mask==1)
###Output
_____no_output_____
###Markdown
Plot both the change in SST and the change in Chl-a* There is a large region with substantial cooling, a 'cold-wake'* The Chl-a increase is only near the coast
###Code
fig, axes = plt.subplots(ncols=2,figsize=[12,4])
dif = (subset_sst.analysed_sst[10,:,:]-subset_sst.analysed_sst[0,:,:])
dif.plot(vmin=-1,vmax=1,ax=axes[0],cmap='seismic')
dif2 = (subset_chl_masked[10,:,:]-subset_chl_masked[0,:,:])
dif2.plot(vmin=-1,vmax=1,ax=axes[1],cmap='seismic')
###Output
_____no_output_____
###Markdown
Make the figure easier to interpret by adding land
###Code
f = plt.figure(figsize=(12,4))
dif = (subset_sst.analysed_sst[10,:,:]-subset_sst.analysed_sst[0,:,:])
ax1 = plt.subplot(121, projection=ccrs.Orthographic(-70, 30))
dif.plot(vmin=-1,vmax=1,ax=ax1,cmap='seismic',transform=ccrs.PlateCarree())
ax1.set_extent([-82, -50, 15, 45], crs=ccrs.PlateCarree())
ax1.coastlines('50m')
ax1.stock_img()
dif2 = (subset_chl_masked[11,:,:]-subset_chl_masked[0,:,:])
ax2 = plt.subplot(122, projection=ccrs.Orthographic(-70, 30))
(dif*0).plot(vmin=-1,vmax=1,ax=ax2,cmap='seismic',transform=ccrs.PlateCarree(),add_colorbar=False)
dif2.plot(vmin=-1,vmax=1,ax=ax2,cmap='seismic',transform=ccrs.PlateCarree())
ax2.set_extent([-82, -50, 15, 45], crs=ccrs.PlateCarree())
ax2.coastlines('50m')
ax2.stock_img()
###Output
_____no_output_____ |
chap7/chapter_7_examples.ipynb | ###Markdown
`def func(pos1, pos2, ..., keywd1, keywd2, ..., *args, **kwargs):` โข Changes to string, variable, and tuple arguments of a function within the function do not affect their values in the calling program.โข Changes to values of elements in list and array arguments of a function within the function are reflected in the values of the same list and array elements in the calling function.The point is that simple numerics, strings and tuples are immutable while lists and arrays are mutable. Because immutable objects canโt be changed, changing them within a function creates new objects with the same name inside of the function, but the old immutable objects that were used as arguments in the function call remain unchanged in the calling program. On the other hand, if elements of mutable objects like those in lists or arrays are changed, then those elements that are changed inside the function are also changed in the calling program.
###Code
import numpy as np
a = np.random.random(10)
a.size
a.dtype
a
a.sum()
a.mean()
a.var()
a.sort()
a
a.clip(0.3, 0.8)
a
def f(a, b):
return 3 * a + b**2
f(2, 3)
g = lambda a, b: 3 * a + b**2
g(2, 3)
###Output
_____no_output_____ |
analyze_results10.ipynb | ###Markdown
###Code
import os
import pandas as pd
url = 'https://raw.githubusercontent.com/m-rafiul-islam/driver-behavior-model/data/parameter_estimation_combined_data_version10.csv'
data = pd.read_csv(url)
data.dropna(inplace=True)
data.describe()
data_ODE = data[data['alpha']==1].reset_index()
data_FDE = data[data['alpha']!=1].reset_index()
data_FDE.describe()
data_ODE.describe()
data_FDE
data_FDE['alpha'].hist(bins=100)
import seaborn as sns
import matplotlib.pyplot as plt
sns.histplot(data_FDE['alpha'], bins = 100,kde = True)
sns.histplot(data_FDE['delta'], bins = 50,kde = True)
sns.histplot(data_ODE['delta'], bins = 50,kde = True)
fig, ax = plt.subplots()
ax = (data_ODE['objective']/data_FDE['objective']).plot()
import numpy as np
# fig, ax = plt.subplots()
# ax.plot(range(len(data_ODE['objective'])),np.array(data_ODE['objective']),'blue')
# ax2= plt.twinx()
# ax2.plot(range(len(data_FDE['objective'])),np.array(data_FDE['objective']),'green')
# plt.figure()
# plt.plot(range(len(data_ODE['objective'])),np.array(data_ODE['objective']),'blue')
# plt.plot(range(len(data_FDE['objective'])),np.array(data_FDE['objective']),'green')
###Output
_____no_output_____
###Markdown
 
###Code
###Output
_____no_output_____ |
quantum gravity/convwave/src/jupyter/get_snr_distributions_from_training.ipynb | ###Markdown
Read in the training data
###Code
# Path to the directory where all data is stored
data_path = '../data'
snrs = dict()
for event in ['GW150914', 'GW151226', 'GW170104']:
snrs[event] = dict()
print('Starting on event: {}'.format(event))
for dist in ['0100_0300', '0250_0500', '0400_0800', '0700_1200']:
print('--Reading in distances: {}'.format(dist), end=' ')
snrs[event][dist] = dict()
# Read in the HDF file
with h5py.File(os.path.join(data_path,
'training',
'timeseries',
'training_{}_{}_8k.h5'.format(event, dist)), 'r') as file:
snrs[event][dist]['H1'] = np.array(file['snrs_H1'])
print('({}, '.format(len(snrs[event][dist]['H1'])), end='')
snrs[event][dist]['L1'] = np.array(file['snrs_L1'])
print('{})'.format(len(snrs[event][dist]['L1'])))
for event in ['GW150914', 'GW151226', 'GW170104']:
display(HTML('<h3>{}</h3>'.format(event)))
rows = []
for dist in ['0100_0300', '0250_0500', '0400_0800', '0700_1200']:
median_H1 = '{:.1E}'.format(np.nanmedian(snrs[event][dist]['H1'].flatten()))
median_L1 = '{:.1E}'.format(np.nanmedian(snrs[event][dist]['L1'].flatten()))
min_H1 = '{:.1E}'.format(np.nanmin(snrs[event][dist]['H1'].flatten()))
min_L1 = '{:.1E}'.format(np.nanmin(snrs[event][dist]['L1'].flatten()))
max_H1 = '{:.1E}'.format(np.nanmax(snrs[event][dist]['H1'].flatten()))
max_L1 = '{:.1E}'.format(np.nanmax(snrs[event][dist]['L1'].flatten()))
rows.append([dist, median_H1, min_H1, max_H1, median_L1, min_L1, max_L1])
display(HTML(tabulate(rows, tablefmt='html',
headers=['Distances', 'Median H1', 'Minimum H1', 'Maximum H1', 'Median L1', 'Minimum L1', 'Maximum L1'])))
print(tabulate(rows, tablefmt='latex',
headers=['Distances', 'Median H1', 'Minimum H1', 'Maximum H1', 'Median L1', 'Minimum L1', 'Maximum L1']))
1.7e-4
###Output
_____no_output_____ |
Data_Visualization/Multivariate_Exploration_of_data/Adapted_Plot_Practice.ipynb | ###Markdown
In this workspace, you will work with the fuel economy dataset from the previous lesson on bivariate plots.
###Code
fuel_econ = pd.read_csv('./data/fuel_econ.csv')
fuel_econ.head()
###Output
_____no_output_____
###Markdown
**Task 1**: Plot the city ('city') vs. highway ('highway') fuel efficiencies (both in mpg) for each vehicle class ('VClass'). Don't forget that vehicle class is an ordinal variable with levels {Minicompact Cars, Subcompact Cars, Compact Cars, Midsize Cars, Large Cars}.
###Code
g = sb.FacetGrid(data = fuel_econ, col = 'VClass', size = 3, col_wrap = 3)
g.map(plt.scatter, 'city', 'highway', alpha = 1/4);
# run this cell to check your work against ours
adaptedplot_solution_1()
###Output
Due to overplotting, I've taken a faceting approach to this task. There don't seem to be any obvious differences in the main cluster across vehicle classes, except that the minicompact and large sedans' arcs are thinner than the other classes due to lower counts. The faceted plots clearly show that most of the high-efficiency cars are in the mid-size and compact car classes.
###Markdown
**Task 2**: Plot the relationship between engine size ('displ', in meters), vehicle class, and fuel type ('fuelType'). For the lattermost feature, focus only on Premium Gasoline and Regular Gasoline cars. What kind of relationships can you spot in this plot?
###Code
sb.boxplot(data = fuel_econ, x = 'VClass', y = 'displ', hue = 'fuelType')
plt.legend(loc = 6, bbox_to_anchor = (1, 0))
plt.xticks(rotation = 90);
# run this cell to check your work against ours
adaptedplot_solution_2()
###Output
I went with a clustered box plot on this task since there were too many levels to make a clustered violin plot accessible. The plot shows that in each vehicle class, engine sizes were larger for premium-fuel cars than regular-fuel cars. Engine size generally increased with vehicle class within each fuel type, but the trend was noisy for the smallest vehicle classes.
|
jupyter_script/hard_triplet_lstm_classification.ipynb | ###Markdown
Baseline of 3-convolutional layer and lstm classification- **network** - embedding: ```conv(5x5)->maxpool(2x2)->conv(5x5)->maxpool(2x2)->conv(3x3)->bi-lstm(32 hidden units)``` - classifier: ```linear(64, 10)``` - loss function: ```CrossEntropyLoss```
###Code
import sys
sys.path.append('..')
%load_ext autoreload
%autoreload 2
from experiment import hard_triplet_baseline
kwargs = {
'device': '2',
'lr': 1e-3,
'n_epochs': 100,
'n_classes': 10,
'n_samples': 12,
'margin': 0.3,
'log_interval': 50
}
kwargs
hard_triplet_baseline.hard_triplet_baseline_exp(**kwargs)
###Output
10080it [00:00, 307924.02it/s]
10080it [00:00, 205046.73it/s]
10080it [00:00, 488594.66it/s] |
7. Python - Functions.ipynb | ###Markdown
Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
###Code
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
###Output
_____no_output_____
###Markdown
Basics of Python
###Code
#Functions are a convenient way to divide your code into useful blocks, allowing us to order our code,
#make it more readable, reuse it and save some time.
#Also functions are a key way to define interfaces so programmers can share their code.
def my_function():
print("x")
def myfunction():
print("y")
#default argument usage for "efgh" variable
def my_function(abcd, efgh=101):
print("abcd = %d, efgh = %d" %(abcd, efgh))
print("Hello there!")
my_function(5)
myfunction()
my_function(3.9, 7)
#function arguments
def my_function_with_args(username, greeting, greeting2):
print("Hello, %s , From My Function!, I wish you %s. Today %s"%(username, greeting, greeting2))
my_function_with_args("Vivek", "How are you", "is a good day")
#def sum_two_numbers(a, b):
# return a + b
# Define our 3 functions
def my_function():
print("Hello From My Function!")
def my_function_with_args(username, greeting):
print("Hello, %s , From My Function!, I wish you %s"%(username, greeting))
# print(a simple greeting)
my_function()
#prints - "Hello, John Doe, From My Function!, I wish you a great year!"
my_function_with_args("John Doe", "a great year!")
def sum_two_numbers(a, b):
return a + b
# after this line x will hold the value 3!
x = sum_two_numbers(1,2)
print(x)
#Exercise
"""
In this exercise you'll use an existing function, and while adding your own to create a fully functional program.
Add a function named list_benefits() that returns the following list of strings:
"More organized code", "More readable code", "Easier code reuse", "Allowing programmers to share and connect code together"
Add a function named build_sentence(info) which receives a single argument containing a string and returns
a sentence starting with the given string and ending with the string " is a benefit of functions!"
Run and see all the functions work together!
"""
# Modify this function to return a list of strings as defined above
def list_benefits():
pass
# Modify this function to concatenate to each benefit - " is a benefit of functions!"
def build_sentence(benefit):
pass
def name_the_benefits_of_functions():
list_of_benefits = list_benefits()
for benefit in list_of_benefits:
print(build_sentence(benefit))
name_the_benefits_of_functions()
###Output
_____no_output_____ |
_acquire_data/BeautifulSoup_xedotcom.ipynb | ###Markdown
BeautifulSoup Exchange rates from XE.com
###Code
url = 'https://www.xe.com/currencyconverter/convert/?Amount=1&From=USD&To=EUR'
import pandas as pd
import requests
from bs4 import BeautifulSoup
def result_page(url, keywords=''):
response = requests.get(url + keywords)
if not response.status_code == 200:
return None
return BeautifulSoup(response.content, 'lxml')
def get_data(url, keywords='', selector=''):
rate_list = []
try:
soup = result_page(url, keywords)
rates = soup.find_all('td', class_='rateCell')
for rate in rates:
rate_ = rate.get_text()
try:
currency = rate.find('a').get('rel')[0][:7]
rate_list.append((currency, rate_))
except:
currency = ''
return rate_list
except:
return None
xe_data = get_data(url)
pd.DataFrame(xe_data, columns=['currency', 'rate'])
###Output
_____no_output_____ |
site/en-snapshot/swift/tutorials/python_interoperability.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](scrollTo=ByZjmtFgB_Y5).
###Code
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Python interoperabilitySwift For TensorFlow supports Python interoperability.You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
###Code
import PythonKit
print(Python.version)
###Output
_____no_output_____
###Markdown
Setting the Python version By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed. To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example: `export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`The exact filename will differ across Python environments and platforms. Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
###Code
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
###Output
_____no_output_____
###Markdown
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__ Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674discussion_r235207008). BasicsIn Swift, `PythonObject` represents an object from Python.All Python APIs use and return `PythonObject` instances.Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
###Code
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
###Output
_____no_output_____
###Markdown
`PythonObject` defines conformances to many standard Swift protocols:* `Equatable`* `Comparable`* `Hashable`* `SignedNumeric`* `Strideable`* `MutableCollection`* All of the `ExpressibleBy_Literal` protocolsNote that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
###Code
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
###Output
_____no_output_____
###Markdown
To convert tuples from Python to Swift, you must statically know the arity of the tuple.Call one of the following instance methods:- `PythonObject.tuple2`- `PythonObject.tuple3`- `PythonObject.tuple4`
###Code
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
###Output
_____no_output_____
###Markdown
Python builtinsAccess Python builtins via the global `Python` interface.
###Code
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
###Output
_____no_output_____
###Markdown
Importing Python modulesUse `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
###Code
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
###Output
_____no_output_____
###Markdown
Use the throwing function `Python.attemptImport` to perform safe importing.
###Code
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
###Output
_____no_output_____
###Markdown
Conversion with `numpy.ndarray`The following Swift types can be converted to and from `numpy.ndarray`:- `Array`- `ShapedArray`- `Tensor`Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
###Code
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
###Output
_____no_output_____
###Markdown
Displaying imagesYou can display images in-line using `matplotlib`, just like in Python notebooks.
###Code
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
IPythonDisplay.shell.enable_matplotlib("inline")
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](scrollTo=ByZjmtFgB_Y5).
###Code
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Python interoperabilitySwift For TensorFlow supports Python interoperability.You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
###Code
import PythonKit
print(Python.version)
###Output
_____no_output_____
###Markdown
Setting the Python version By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed. To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example: `export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`The exact filename will differ across Python environments and platforms. Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
###Code
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
###Output
_____no_output_____
###Markdown
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__ Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674discussion_r235207008). BasicsIn Swift, `PythonObject` represents an object from Python.All Python APIs use and return `PythonObject` instances.Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
###Code
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
###Output
_____no_output_____
###Markdown
`PythonObject` defines conformances to many standard Swift protocols:* `Equatable`* `Comparable`* `Hashable`* `SignedNumeric`* `Strideable`* `MutableCollection`* All of the `ExpressibleBy_Literal` protocolsNote that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
###Code
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
###Output
_____no_output_____
###Markdown
To convert tuples from Python to Swift, you must statically know the arity of the tuple.Call one of the following instance methods:- `PythonObject.tuple2`- `PythonObject.tuple3`- `PythonObject.tuple4`
###Code
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
###Output
_____no_output_____
###Markdown
Python builtinsAccess Python builtins via the global `Python` interface.
###Code
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
###Output
_____no_output_____
###Markdown
Importing Python modulesUse `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
###Code
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
###Output
_____no_output_____
###Markdown
Use the throwing function `Python.attemptImport` to perform safe importing.
###Code
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
###Output
_____no_output_____
###Markdown
Conversion with `numpy.ndarray`The following Swift types can be converted to and from `numpy.ndarray`:- `Array`- `ShapedArray`- `Tensor`Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
###Code
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
###Output
_____no_output_____
###Markdown
Displaying imagesYou can display images in-line using `matplotlib`, just like in Python notebooks.
###Code
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
IPythonDisplay.shell.enable_matplotlib("inline")
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](scrollTo=ByZjmtFgB_Y5).
###Code
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Python interoperabilitySwift For TensorFlow supports Python interoperability.You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
###Code
// comment so that Colab does not interpret `#if ...` as a comment
#if canImport(PythonKit)
import PythonKit
#else
import Python
#endif
print(Python.version)
###Output
_____no_output_____
###Markdown
Setting the Python version By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed. To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example: `export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`The exact filename will differ across Python environments and platforms. Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
###Code
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
###Output
_____no_output_____
###Markdown
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__ Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674discussion_r235207008). BasicsIn Swift, `PythonObject` represents an object from Python.All Python APIs use and return `PythonObject` instances.Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
###Code
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
###Output
_____no_output_____
###Markdown
`PythonObject` defines conformances to many standard Swift protocols:* `Equatable`* `Comparable`* `Hashable`* `SignedNumeric`* `Strideable`* `MutableCollection`* All of the `ExpressibleBy_Literal` protocolsNote that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
###Code
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
###Output
_____no_output_____
###Markdown
To convert tuples from Python to Swift, you must statically know the arity of the tuple.Call one of the following instance methods:- `PythonObject.tuple2`- `PythonObject.tuple3`- `PythonObject.tuple4`
###Code
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
###Output
_____no_output_____
###Markdown
Python builtinsAccess Python builtins via the global `Python` interface.
###Code
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
###Output
_____no_output_____
###Markdown
Importing Python modulesUse `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
###Code
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
###Output
_____no_output_____
###Markdown
Use the throwing function `Python.attemptImport` to perform safe importing.
###Code
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
###Output
_____no_output_____
###Markdown
Conversion with `numpy.ndarray`The following Swift types can be converted to and from `numpy.ndarray`:- `Array`- `ShapedArray`- `Tensor`Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
###Code
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
###Output
_____no_output_____
###Markdown
Displaying imagesYou can display images in-line using `matplotlib`, just like in Python notebooks.
###Code
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
IPythonDisplay.shell.enable_matplotlib("inline")
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](scrollTo=ByZjmtFgB_Y5).
###Code
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Python interoperabilitySwift For TensorFlow supports Python interoperability.You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
###Code
import PythonKit
print(Python.version)
###Output
_____no_output_____
###Markdown
Setting the Python version By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed. To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example: `export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`The exact filename will differ across Python environments and platforms. Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
###Code
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
###Output
_____no_output_____
###Markdown
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__ Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674discussion_r235207008). BasicsIn Swift, `PythonObject` represents an object from Python.All Python APIs use and return `PythonObject` instances.Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
###Code
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
###Output
_____no_output_____
###Markdown
`PythonObject` defines conformances to many standard Swift protocols:* `Equatable`* `Comparable`* `Hashable`* `SignedNumeric`* `Strideable`* `MutableCollection`* All of the `ExpressibleBy_Literal` protocolsNote that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
###Code
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
###Output
_____no_output_____
###Markdown
To convert tuples from Python to Swift, you must statically know the arity of the tuple.Call one of the following instance methods:- `PythonObject.tuple2`- `PythonObject.tuple3`- `PythonObject.tuple4`
###Code
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
###Output
_____no_output_____
###Markdown
Python builtinsAccess Python builtins via the global `Python` interface.
###Code
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
###Output
_____no_output_____
###Markdown
Importing Python modulesUse `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
###Code
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
###Output
_____no_output_____
###Markdown
Use the throwing function `Python.attemptImport` to perform safe importing.
###Code
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
###Output
_____no_output_____
###Markdown
Conversion with `numpy.ndarray`The following Swift types can be converted to and from `numpy.ndarray`:- `Array`- `ShapedArray`- `Tensor`Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
###Code
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
###Output
_____no_output_____
###Markdown
Displaying imagesYou can display images in-line using `matplotlib`, just like in Python notebooks.
###Code
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
print(IPythonDisplay.shell.enable_matplotlib("inline"))
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](scrollTo=ByZjmtFgB_Y5).
###Code
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Python interoperabilitySwift For TensorFlow supports Python interoperability.You can import Python modules from Swift, call Python functions, and convert values between Swift and Python.
###Code
import PythonKit
print(Python.version)
###Output
_____no_output_____
###Markdown
Setting the Python version By default, when you `import Python`, Swift searches system library paths for the newest version of Python installed. To use a specific Python installation, set the `PYTHON_LIBRARY` environment variable to the `libpython` shared library provided by the installation. For example: `export PYTHON_LIBRARY="~/anaconda3/lib/libpython3.7m.so"`The exact filename will differ across Python environments and platforms. Alternatively, you can set the `PYTHON_VERSION` environment variable, which instructs Swift to search system library paths for a matching Python version. Note that `PYTHON_LIBRARY` takes precedence over `PYTHON_VERSION`.In code, you can also call the `PythonLibrary.useVersion` function, which is equivalent to setting `PYTHON_VERSION`.
###Code
// PythonLibrary.useVersion(2)
// PythonLibrary.useVersion(3, 7)
###Output
_____no_output_____
###Markdown
__Note: you should run `PythonLibrary.useVersion` right after `import Python`, before calling any Python code. It cannot be used to dynamically switch Python versions.__ Set `PYTHON_LOADER_LOGGING=1` to see [debug output for Python library loading](https://github.com/apple/swift/pull/20674discussion_r235207008). BasicsIn Swift, `PythonObject` represents an object from Python.All Python APIs use and return `PythonObject` instances.Basic types in Swift (like numbers and arrays) are convertible to `PythonObject`. In some cases (for literals and functions taking `PythonConvertible` arguments), conversion happens implicitly. To explicitly cast a Swift value to `PythonObject`, use the `PythonObject` initializer.`PythonObject` defines many standard operations, including numeric operations, indexing, and iteration.
###Code
// Convert standard Swift types to Python.
let pythonInt: PythonObject = 1
let pythonFloat: PythonObject = 3.0
let pythonString: PythonObject = "Hello Python!"
let pythonRange: PythonObject = PythonObject(5..<10)
let pythonArray: PythonObject = [1, 2, 3, 4]
let pythonDict: PythonObject = ["foo": [0], "bar": [1, 2, 3]]
// Perform standard operations on Python objects.
print(pythonInt + pythonFloat)
print(pythonString[0..<6])
print(pythonRange)
print(pythonArray[2])
print(pythonDict["bar"])
// Convert Python objects back to Swift.
let int = Int(pythonInt)!
let float = Float(pythonFloat)!
let string = String(pythonString)!
let range = Range<Int>(pythonRange)!
let array: [Int] = Array(pythonArray)!
let dict: [String: [Int]] = Dictionary(pythonDict)!
// Perform standard operations.
// Outputs are the same as Python!
print(Float(int) + float)
print(string.prefix(6))
print(range)
print(array[2])
print(dict["bar"]!)
###Output
_____no_output_____
###Markdown
`PythonObject` defines conformances to many standard Swift protocols:* `Equatable`* `Comparable`* `Hashable`* `SignedNumeric`* `Strideable`* `MutableCollection`* All of the `ExpressibleBy_Literal` protocolsNote that these conformances are not type-safe: crashes will occur if you attempt to use protocol functionality from an incompatible `PythonObject` instance.
###Code
let one: PythonObject = 1
print(one == one)
print(one < one)
print(one + one)
let array: PythonObject = [1, 2, 3]
for (i, x) in array.enumerated() {
print(i, x)
}
###Output
_____no_output_____
###Markdown
To convert tuples from Python to Swift, you must statically know the arity of the tuple.Call one of the following instance methods:- `PythonObject.tuple2`- `PythonObject.tuple3`- `PythonObject.tuple4`
###Code
let pythonTuple = Python.tuple([1, 2, 3])
print(pythonTuple, Python.len(pythonTuple))
// Convert to Swift.
let tuple = pythonTuple.tuple3
print(tuple)
###Output
_____no_output_____
###Markdown
Python builtinsAccess Python builtins via the global `Python` interface.
###Code
// `Python.builtins` is a dictionary of all Python builtins.
_ = Python.builtins
// Try some Python builtins.
print(Python.type(1))
print(Python.len([1, 2, 3]))
print(Python.sum([1, 2, 3]))
###Output
_____no_output_____
###Markdown
Importing Python modulesUse `Python.import` to import a Python module. It works like the `import` keyword in `Python`.
###Code
let np = Python.import("numpy")
print(np)
let zeros = np.ones([2, 3])
print(zeros)
###Output
_____no_output_____
###Markdown
Use the throwing function `Python.attemptImport` to perform safe importing.
###Code
let maybeModule = try? Python.attemptImport("nonexistent_module")
print(maybeModule)
###Output
_____no_output_____
###Markdown
Conversion with `numpy.ndarray`The following Swift types can be converted to and from `numpy.ndarray`:- `Array`- `ShapedArray`- `Tensor`Conversion succeeds only if the `dtype` of the `numpy.ndarray` is compatible with the `Element` or `Scalar` generic parameter type.For `Array`, conversion from `numpy` succeeds only if the `numpy.ndarray` is 1-D.
###Code
import TensorFlow
let numpyArray = np.ones([4], dtype: np.float32)
print("Swift type:", type(of: numpyArray))
print("Python type:", Python.type(numpyArray))
print(numpyArray.shape)
// Examples of converting `numpy.ndarray` to Swift types.
let array: [Float] = Array(numpy: numpyArray)!
let shapedArray = ShapedArray<Float>(numpy: numpyArray)!
let tensor = Tensor<Float>(numpy: numpyArray)!
// Examples of converting Swift types to `numpy.ndarray`.
print(array.makeNumpyArray())
print(shapedArray.makeNumpyArray())
print(tensor.makeNumpyArray())
// Examples with different dtypes.
let doubleArray: [Double] = Array(numpy: np.ones([3], dtype: np.float))!
let intTensor = Tensor<Int32>(numpy: np.ones([2, 3], dtype: np.int32))!
###Output
_____no_output_____
###Markdown
Displaying imagesYou can display images in-line using `matplotlib`, just like in Python notebooks.
###Code
// This cell is here to display plots inside a Jupyter Notebook.
// Do not copy it into another environment.
%include "EnableIPythonDisplay.swift"
print(IPythonDisplay.shell.enable_matplotlib("inline"))
let np = Python.import("numpy")
let plt = Python.import("matplotlib.pyplot")
let time = np.arange(0, 10, 0.01)
let amplitude = np.exp(-0.1 * time)
let position = amplitude * np.sin(3 * time)
plt.figure(figsize: [15, 10])
plt.plot(time, position)
plt.plot(time, amplitude)
plt.plot(time, -amplitude)
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.title("Oscillations")
plt.show()
###Output
_____no_output_____ |
site/ko/lattice/tutorials/shape_constraints_for_ethics.ipynb | ###Markdown
***Copyright 2020 The TensorFlow Authors.***
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Tensorflow Lattice๋ฅผ ์ฌ์ฉํ ์ค๋ฆฌ์ ๋ํ ํ์ ์ ์ฝ ์กฐ๊ฑด TensorFlow.org์์ ๋ณด๊ธฐ Google Colab์์ ์คํํ๊ธฐ GitHub์์์์ค ๋ณด๊ธฐ ๋
ธํธ๋ถ ๋ค์ด๋ก๋ํ๊ธฐ ๊ฐ์์ด ํํ ๋ฆฌ์ผ์์๋ TensorFlow Lattice(TFL) ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ฌ์ฉํ์ฌ *์ฑ
์๊ฐ* ์๊ฒ ์๋ํ๊ณ *์ค๋ฆฌ์ *์ด๊ฑฐ๋ *๊ณต์ ํ* ํน์ ๊ฐ์ ์ ์๋ฐํ์ง ์๋ ๋ชจ๋ธ์ ํ๋ จํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ํนํ ํน์ ์์ฑ์ ๋ํ *๋ถ๊ณต์ ํ ๋ถ์ด์ต*์ ํผํ๊ธฐ ์ํด ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ฌ์ฉํ๋ ๋ฐ ์ด์ ์ ๋ง์ถ ๊ฒ์
๋๋ค. ์ด ํํ ๋ฆฌ์ผ์๋ Serena Wang ๋ฐ Maya Gupta์ด [AISTATS 2020](https://www.aistats.org/)์ ๊ฒ์ฌํ [*Deontological Ethics By Monotonicity Shape Constraints(๋จ์กฐ์ฑ ํ์ ์ ์ฝ ์กฐ๊ฑด์ ์ํ ์๋ฌด๋ก ์ ์ค๋ฆฌ)*](https://arxiv.org/abs/2001.11990) ๋
ผ๋ฌธ์ ์คํ ๋ฐ๋ชจ๊ฐ ํฌํจ๋์ด ์์ต๋๋ค.๊ณต๊ฐ๋ ๋ฐ์ดํฐ์ธํธ์ TFL ์ฌ์ ๊ตฌ์ฑ estimator๋ฅผ ์ฌ์ฉํ ๊ฒ์ด์ง๋ง ์ด ํํ ๋ฆฌ์ผ์ ๋ชจ๋ ๋ด์ฉ์ TFL Keras ๋ ์ด์ด๋ก ๊ตฌ์ฑ๋ ๋ชจ๋ธ๋ก๋ ์ํํ ์ ์์ต๋๋ค.๊ณ์ํ๊ธฐ ์ ์ ํ์ํ ๋ชจ๋ ํจํค์ง๊ฐ ๋ฐํ์์ ์ค์น๋์ด ์๋์ง ํ์ธํ์ธ์(์๋ ์ฝ๋ ์
์์ ๊ฐ์ ธ์ด). ์ค์ TF Lattice ํจํค์ง ์ค์นํ๊ธฐ:
###Code
#@test {"skip": true}
!pip install tensorflow-lattice seaborn
###Output
_____no_output_____
###Markdown
ํ์ ํจํค์ง ๊ฐ์ ธ์ค๊ธฐ:
###Code
import tensorflow as tf
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import sys
import tensorflow_lattice as tfl
logging.disable(sys.maxsize)
###Output
_____no_output_____
###Markdown
์ด ํํ ๋ฆฌ์ผ์์ ์ฌ์ฉ๋๋ ๊ธฐ๋ณธ๊ฐ:
###Code
# List of learning rate hyperparameters to try.
# For a longer list of reasonable hyperparameters, try [0.001, 0.01, 0.1].
LEARNING_RATES = [0.01]
# Default number of training epochs and batch sizes.
NUM_EPOCHS = 1000
BATCH_SIZE = 1000
# Directory containing dataset files.
DATA_DIR = 'https://raw.githubusercontent.com/serenalwang/shape_constraints_for_ethics/master'
###Output
_____no_output_____
###Markdown
์ฌ๋ก ์ฐ๊ตฌ 1: ๋ก์ค์ฟจ ์
ํ์ด ํํ ๋ฆฌ์ผ์ ์ฒซ ๋ฒ์งธ ๋ถ๋ถ์์๋ ๋ก์ค์ฟจ ์
ํ ์์ํ(LSAC)์ ๋ก์ค์ฟจ ์
ํ ๋ฐ์ดํฐ์ธํธ๋ฅผ ์ฌ์ฉํ ์ฌ๋ก ์ฐ๊ตฌ๋ฅผ ์ดํด๋ด
๋๋ค. ํ์์ LSAT ์ ์์ ํ๋ถ GPA์ ๋ ๊ฐ์ง ํน์ฑ์ ์ฌ์ฉํ์ฌ ํ์์ด ๊ธฐ์ค์ ์ ํต๊ณผํ ์ง ์ฌ๋ถ๋ฅผ ์์ธกํ๋๋ก ๋ถ๋ฅ์๋ฅผ ํ๋ จํ ๊ฒ์
๋๋ค.๋ถ๋ฅ์์ ์ ์๊ฐ ๋ก์ค์ฟจ ์
ํ ๋๋ ์ฅํ๊ธ ํ๋จ ์์๋ก ์ฌ์ฉ๋์๋ค๊ณ ๊ฐ์ ํฉ๋๋ค. ์ฑ๊ณผ ๊ธฐ๋ฐ ์ฌํ ๊ท๋ฒ์ ๋ฐ๋ฅด๋ฉด GPA์ LSAT ์ ์๊ฐ ๋์ ํ์์ด ๋ถ๋ฅ์๋ก๋ถํฐ ๋ ๋์ ์ ์๋ฅผ ๋ฐ์์ผ ํฉ๋๋ค. ๊ทธ๋ฌ๋ ๋ชจ๋ธ์ด ์ด๋ฌํ ์ง๊ด์ ์ธ ๊ท๋ฒ์ ์๋ฐํ๊ธฐ ์ฝ๊ณ ๋๋ก๋ ๋ ๋์ GPA ๋๋ LSAT ์ ์๋ฅผ ๋ฐ์ ํ์๋ค์๊ฒ ๋ถ์ด์ต์ ์ฃผ๋ ๊ฒ์ ๊ด์ฐฐํ๊ฒ ๋ฉ๋๋ค.์ด *๋ถ๊ณต์ ํ ๋ถ์ด์ต* ๋ฌธ์ ๋ฅผ ํด๊ฒฐํ๊ธฐ ์ํด ๋ชจ๋ธ์ด ๋ ๋์ GPA ๋๋ ๋ ๋์ LSAT ์ ์์ ๋ถ์ด์ต์ ์ฃผ์ง ์๋๋ก ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ ์ฉํ ์ ์์ต๋๋ค. ์ด ํํ ๋ฆฌ์ผ์์๋ TFL์ ์ฌ์ฉํ์ฌ ์ด๋ฌํ ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ ์ฉํ๋ ๋ฐฉ๋ฒ์ ๋ณด์ฌ์ค๋๋ค. ๋ก์ค์ฟจ ๋ฐ์ดํฐ ๋ก๋ํ๊ธฐ
###Code
# Load data file.
law_file_name = 'lsac.csv'
law_file_path = os.path.join(DATA_DIR, law_file_name)
raw_law_df = pd.read_csv(law_file_path, delimiter=',')
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ์ธํธ ์ ์ฒ๋ฆฌํ๊ธฐ:
###Code
# Define label column name.
LAW_LABEL = 'pass_bar'
def preprocess_law_data(input_df):
# Drop rows with where the label or features of interest are missing.
output_df = input_df[~input_df[LAW_LABEL].isna() & ~input_df['ugpa'].isna() &
(input_df['ugpa'] > 0) & ~input_df['lsat'].isna()]
return output_df
law_df = preprocess_law_data(raw_law_df)
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ๋ฅผ ํ๋ จ/๊ฒ์ฆ/ํ
์คํธ ์ธํธ๋ก ๋ถํ ํ๊ธฐ
###Code
def split_dataset(input_df, random_state=888):
"""Splits an input dataset into train, val, and test sets."""
train_df, test_val_df = train_test_split(
input_df, test_size=0.3, random_state=random_state)
val_df, test_df = train_test_split(
test_val_df, test_size=0.66, random_state=random_state)
return train_df, val_df, test_df
law_train_df, law_val_df, law_test_df = split_dataset(law_df)
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ ๋ถํฌ ์๊ฐํํ๊ธฐ๋จผ์ ๋ฐ์ดํฐ ๋ถํฌ๋ฅผ ์๊ฐํํฉ๋๋ค. ๊ธฐ์ค์ ์ ํต๊ณผํ ๋ชจ๋ ํ์๋ค๊ณผ ํต๊ณผํ์ง ๋ชปํ ๋ชจ๋ ํ์๋ค์ ๋ํ GPA ๋ฐ LSAT ์ ์๋ฅผ ํ๋กฏํ ๊ฒ์
๋๋ค.
###Code
def plot_dataset_contour(input_df, title):
plt.rcParams['font.family'] = ['serif']
g = sns.jointplot(
x='ugpa',
y='lsat',
data=input_df,
kind='kde',
xlim=[1.4, 4],
ylim=[0, 50])
g.plot_joint(plt.scatter, c='b', s=10, linewidth=1, marker='+')
g.ax_joint.collections[0].set_alpha(0)
g.set_axis_labels('Undergraduate GPA', 'LSAT score', fontsize=14)
g.fig.suptitle(title, fontsize=14)
# Adust plot so that the title fits.
plt.subplots_adjust(top=0.9)
plt.show()
law_df_pos = law_df[law_df[LAW_LABEL] == 1]
plot_dataset_contour(
law_df_pos, title='Distribution of students that passed the bar')
law_df_neg = law_df[law_df[LAW_LABEL] == 0]
plot_dataset_contour(
law_df_neg, title='Distribution of students that failed the bar')
###Output
_____no_output_____
###Markdown
๊ธฐ์ค์ ์ํ ํต๊ณผ๋ฅผ ์์ธกํ๋๋ก ๋ณด์ ๋ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ๋ค์์ผ๋ก, TFL์์ *๋ณด์ ๋ ์ ํ ๋ชจ๋ธ*์ ํ๋ จํ์ฌ ํ์์ด ๊ธฐ์ค์ ์ ํต๊ณผํ ์ง ์ฌ๋ถ๋ฅผ ์์ธกํฉ๋๋ค. ๋ ๊ฐ์ง ์
๋ ฅ ํน์ฑ์ LSAT ์ ์์ ํ๋ถ GPA์ด๋ฉฐ, ํ๋ จ ๋ ์ด๋ธ์ ํ์์ด ๊ธฐ์ค์ ์ ํต๊ณผํ๋์ง ์ฌ๋ถ์
๋๋ค.๋จผ์ ์ ์ฝ ์กฐ๊ฑด ์์ด ๋ณด์ ๋ ์ ํ ๋ชจ๋ธ์ ํ๋ จํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์, ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ฌ์ฉํ์ฌ ๋ณด์ ๋ ์ ํ ๋ชจ๋ธ์ ํ๋ จํ๊ณ ๋ชจ๋ธ ์ถ๋ ฅ ๋ฐ ์ ํ์ฑ์ ์ฐจ์ด๋ฅผ ๊ด์ฐฐํฉ๋๋ค. TFL ๋ณด์ ์ ํ estimator๋ฅผ ํ๋ จํ๊ธฐ ์ํ ๋์ฐ๋ฏธ ํจ์์ด๋ค ํจ์๋ ์ด ๋ก์ค์ฟจ ์ฌ๋ก ์ฐ๊ตฌ์ ์๋์ ๋์ถ ์ฐ์ฒด ์ฌ๋ก ์ฐ๊ตฌ์ ์ฌ์ฉ๋ฉ๋๋ค.
###Code
def train_tfl_estimator(train_df, monotonicity, learning_rate, num_epochs,
batch_size, get_input_fn,
get_feature_columns_and_configs):
"""Trains a TFL calibrated linear estimator.
Args:
train_df: pandas dataframe containing training data.
monotonicity: if 0, then no monotonicity constraints. If 1, then all
features are constrained to be monotonically increasing.
learning_rate: learning rate of Adam optimizer for gradient descent.
num_epochs: number of training epochs.
batch_size: batch size for each epoch. None means the batch size is the full
dataset size.
get_input_fn: function that returns the input_fn for a TF estimator.
get_feature_columns_and_configs: function that returns TFL feature columns
and configs.
Returns:
estimator: a trained TFL calibrated linear estimator.
"""
feature_columns, feature_configs = get_feature_columns_and_configs(
monotonicity)
model_config = tfl.configs.CalibratedLinearConfig(
feature_configs=feature_configs, use_bias=False)
estimator = tfl.estimators.CannedClassifier(
feature_columns=feature_columns,
model_config=model_config,
feature_analysis_input_fn=get_input_fn(input_df=train_df, num_epochs=1),
optimizer=tf.keras.optimizers.Adam(learning_rate))
estimator.train(
input_fn=get_input_fn(
input_df=train_df, num_epochs=num_epochs, batch_size=batch_size))
return estimator
def optimize_learning_rates(
train_df,
val_df,
test_df,
monotonicity,
learning_rates,
num_epochs,
batch_size,
get_input_fn,
get_feature_columns_and_configs,
):
"""Optimizes learning rates for TFL estimators.
Args:
train_df: pandas dataframe containing training data.
val_df: pandas dataframe containing validation data.
test_df: pandas dataframe containing test data.
monotonicity: if 0, then no monotonicity constraints. If 1, then all
features are constrained to be monotonically increasing.
learning_rates: list of learning rates to try.
num_epochs: number of training epochs.
batch_size: batch size for each epoch. None means the batch size is the full
dataset size.
get_input_fn: function that returns the input_fn for a TF estimator.
get_feature_columns_and_configs: function that returns TFL feature columns
and configs.
Returns:
A single TFL estimator that achieved the best validation accuracy.
"""
estimators = []
train_accuracies = []
val_accuracies = []
test_accuracies = []
for lr in learning_rates:
estimator = train_tfl_estimator(
train_df=train_df,
monotonicity=monotonicity,
learning_rate=lr,
num_epochs=num_epochs,
batch_size=batch_size,
get_input_fn=get_input_fn,
get_feature_columns_and_configs=get_feature_columns_and_configs)
estimators.append(estimator)
train_acc = estimator.evaluate(
input_fn=get_input_fn(train_df, num_epochs=1))['accuracy']
val_acc = estimator.evaluate(
input_fn=get_input_fn(val_df, num_epochs=1))['accuracy']
test_acc = estimator.evaluate(
input_fn=get_input_fn(test_df, num_epochs=1))['accuracy']
print('accuracies for learning rate %f: train: %f, val: %f, test: %f' %
(lr, train_acc, val_acc, test_acc))
train_accuracies.append(train_acc)
val_accuracies.append(val_acc)
test_accuracies.append(test_acc)
max_index = val_accuracies.index(max(val_accuracies))
return estimators[max_index]
###Output
_____no_output_____
###Markdown
๋ก์ค์ฟจ ๋ฐ์ดํฐ์ธํธ ํน์ฑ์ ๊ตฌ์ฑํ๊ธฐ ์ํ ๋์ฐ๋ฏธ ํจ์์ด๋ค ๋์ฐ๋ฏธ ํจ์๋ ๋ก์ค์ฟจ ์ฌ๋ก ์ฐ๊ตฌ์๋ง ํด๋น๋ฉ๋๋ค.
###Code
def get_input_fn_law(input_df, num_epochs, batch_size=None):
"""Gets TF input_fn for law school models."""
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x=input_df[['ugpa', 'lsat']],
y=input_df['pass_bar'],
num_epochs=num_epochs,
batch_size=batch_size or len(input_df),
shuffle=False)
def get_feature_columns_and_configs_law(monotonicity):
"""Gets TFL feature configs for law school models."""
feature_columns = [
tf.feature_column.numeric_column('ugpa'),
tf.feature_column.numeric_column('lsat'),
]
feature_configs = [
tfl.configs.FeatureConfig(
name='ugpa',
lattice_size=2,
pwl_calibration_num_keypoints=20,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
tfl.configs.FeatureConfig(
name='lsat',
lattice_size=2,
pwl_calibration_num_keypoints=20,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
]
return feature_columns, feature_configs
###Output
_____no_output_____
###Markdown
ํ๋ จ๋ ๋ชจ๋ธ์ ์ถ๋ ฅ ์๊ฐํ๋ฅผ ์ํ ๋์ฐ๋ฏธ ํจ์
###Code
def get_predicted_probabilities(estimator, input_df, get_input_fn):
predictions = estimator.predict(
input_fn=get_input_fn(input_df=input_df, num_epochs=1))
return [prediction['probabilities'][1] for prediction in predictions]
def plot_model_contour(estimator, input_df, num_keypoints=20):
x = np.linspace(min(input_df['ugpa']), max(input_df['ugpa']), num_keypoints)
y = np.linspace(min(input_df['lsat']), max(input_df['lsat']), num_keypoints)
x_grid, y_grid = np.meshgrid(x, y)
positions = np.vstack([x_grid.ravel(), y_grid.ravel()])
plot_df = pd.DataFrame(positions.T, columns=['ugpa', 'lsat'])
plot_df[LAW_LABEL] = np.ones(len(plot_df))
predictions = get_predicted_probabilities(
estimator=estimator, input_df=plot_df, get_input_fn=get_input_fn_law)
grid_predictions = np.reshape(predictions, x_grid.shape)
plt.rcParams['font.family'] = ['serif']
plt.contour(
x_grid,
y_grid,
grid_predictions,
colors=('k',),
levels=np.linspace(0, 1, 11))
plt.contourf(
x_grid,
y_grid,
grid_predictions,
cmap=plt.cm.bone,
levels=np.linspace(0, 1, 11)) # levels=np.linspace(0,1,8));
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
cbar = plt.colorbar()
cbar.ax.set_ylabel('Model score', fontsize=20)
cbar.ax.tick_params(labelsize=20)
plt.xlabel('Undergraduate GPA', fontsize=20)
plt.ylabel('LSAT score', fontsize=20)
###Output
_____no_output_____
###Markdown
์ ์ฝ์ด ์๋(๋จ์กฐ๊ฐ ์๋) ๋ณด์ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ
###Code
nomon_linear_estimator = optimize_learning_rates(
train_df=law_train_df,
val_df=law_val_df,
test_df=law_test_df,
monotonicity=0,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_law,
get_feature_columns_and_configs=get_feature_columns_and_configs_law)
plot_model_contour(nomon_linear_estimator, input_df=law_df)
###Output
_____no_output_____
###Markdown
๋จ์กฐ์ฑ ๋ณด์ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ
###Code
mon_linear_estimator = optimize_learning_rates(
train_df=law_train_df,
val_df=law_val_df,
test_df=law_test_df,
monotonicity=1,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_law,
get_feature_columns_and_configs=get_feature_columns_and_configs_law)
plot_model_contour(mon_linear_estimator, input_df=law_df)
###Output
_____no_output_____
###Markdown
๋ค๋ฅธ ์ ์ฝ์ด ์๋ ๋ชจ๋ธ ํ๋ จํ๊ธฐTFL ๋ณด์ ์ ํ ๋ชจ๋ธ์ด ์ ํ์ฑ์ ํฌ๊ฒ ํฌ์ํ์ง ์๊ณ ๋ LSAT ์ ์์ GPA ๋ชจ๋์์ ๋จ์กฐ๋กญ๋๋ก ํ๋ จ๋ ์ ์์์ ์
์ฆํ์ต๋๋ค.๊ทธ๋ ๋ค๋ฉด ๋ณด์ ์ ํ ๋ชจ๋ธ์ด ์ฌ์ธต ์ ๊ฒฝ๋ง(DNN) ๋๋ ๊ทธ๋๋์ธํธ ๋ถ์คํธ ํธ๋ฆฌ(GBT)์ ๊ฐ์ ๋ค๋ฅธ ํํ์ ๋ชจ๋ธ๊ณผ ์ด๋ป๊ฒ ๋น๊ต๋ ๊น์? DNN๊ณผ GBT๊ฐ ํฉ๋ฆฌ์ ์ผ๋ก ๊ณต์ ํ ์ถ๋ ฅ์ ์ ๊ณตํ๋ ๊ฒ์ผ๋ก ๋ณด์
๋๊น? ์ด ์ง๋ฌธ์ ํด๋ต์ ์ป๊ธฐ ์ํด ์ด์ ์ ์ฝ์ด ์๋ DNN ๋ฐ GBT๋ฅผ ํ๋ จํ ๊ฒ์
๋๋ค. ์ค์ ๋ก, DNN๊ณผ GBT ๋ชจ๋ LSAT ์ ์์ ํ๋ถ GPA์์ ๋จ์กฐ์ฑ์ ์ฝ๊ฒ ์๋ฐํ๋ค๋ ์ฌ์ค์ ๊ด์ฐฐํ๊ฒ ๋ ๊ฒ์
๋๋ค. ์ ์ฝ์ด ์๋ ์ฌ์ธต ์ ๊ฒฝ๋ง(DNN) ๋ชจ๋ธ ํ๋ จํ๊ธฐ์์ ๋์ ๊ฒ์ฆ ์ ํ์ฑ์ ์ป๊ธฐ ์ํด ์ํคํ
์ฒ๋ฅผ ์ต์ ํํ์ต๋๋ค.
###Code
feature_names = ['ugpa', 'lsat']
dnn_estimator = tf.estimator.DNNClassifier(
feature_columns=[
tf.feature_column.numeric_column(feature) for feature in feature_names
],
hidden_units=[100, 100],
optimizer=tf.keras.optimizers.Adam(learning_rate=0.008),
activation_fn=tf.nn.relu)
dnn_estimator.train(
input_fn=get_input_fn_law(
law_train_df, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS))
dnn_train_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']
dnn_val_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']
dnn_test_acc = dnn_estimator.evaluate(
input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']
print('accuracies for DNN: train: %f, val: %f, test: %f' %
(dnn_train_acc, dnn_val_acc, dnn_test_acc))
plot_model_contour(dnn_estimator, input_df=law_df)
###Output
_____no_output_____
###Markdown
์ ์ฝ์ด ์๋ ๊ทธ๋๋์ธํธ ๋ถ์คํธ ํธ๋ฆฌ(GBT) ๋ชจ๋ธ ํ๋ จํ๊ธฐ์์ ๋์ ๊ฒ์ฆ ์ ํ์ฑ์ ์ป๊ธฐ ์ํด ํธ๋ฆฌ ๊ตฌ์กฐ๋ฅผ ์ต์ ํํ์ต๋๋ค.
###Code
tree_estimator = tf.estimator.BoostedTreesClassifier(
feature_columns=[
tf.feature_column.numeric_column(feature) for feature in feature_names
],
n_batches_per_layer=2,
n_trees=20,
max_depth=4)
tree_estimator.train(
input_fn=get_input_fn_law(
law_train_df, num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE))
tree_train_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']
tree_val_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']
tree_test_acc = tree_estimator.evaluate(
input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']
print('accuracies for GBT: train: %f, val: %f, test: %f' %
(tree_train_acc, tree_val_acc, tree_test_acc))
plot_model_contour(tree_estimator, input_df=law_df)
###Output
_____no_output_____
###Markdown
์ฌ๋ก ์ฐ๊ตฌ 2: ๋์ถ ์ฐ์ฒด์ด ํํ ๋ฆฌ์ผ์์ ๊ณ ๋ คํ ๋ ๋ฒ์งธ ์ฌ๋ก ์ฐ๊ตฌ๋ ๊ฐ์ธ์ ๋์ถ ์ฐ์ฒด ํ๋ฅ ์ ์์ธกํ๋ ๊ฒ์
๋๋ค. UCI ๋ฆฌํฌ์งํ ๋ฆฌ์ Default of Credit Card Clients ๋ฐ์ดํฐ์ธํธ๋ฅผ ์ฌ์ฉํฉ๋๋ค. ์ด ๋ฐ์ดํฐ๋ 30,000๋ช
์ ๋๋ง ์ ์ฉ์นด๋ ์ฌ์ฉ์๋ก๋ถํฐ ์์ง๋์์ผ๋ฉฐ ์ฌ์ฉ์๊ฐ ์ผ์ ๊ธฐ๊ฐ ๋ด์ ๊ฒฐ์ ๋ฅผ ๋ถ์ดํํ๋์ง ์ฌ๋ถ๋ฅผ ๋ํ๋ด๋ ๋ฐ์ด๋๋ฆฌ ๋ ์ด๋ธ์ ํฌํจํ๊ณ ์์ต๋๋ค. ํน์ฑ์๋ ๊ฒฐํผ ์ฌ๋ถ, ์ฑ๋ณ, ํ๋ ฅ, ์ฌ์ฉ์๊ฐ 2005๋
4์๋ถํฐ 9์๊น์ง ์๋ณ๋ก ๊ธฐ์กด ์ฒญ๊ตฌ์ก์ ์ฐ์ฒดํ ๊ธฐ๊ฐ์ด ํฌํจ๋ฉ๋๋ค.์ฒซ ๋ฒ์งธ ์ฌ๋ก ์ฐ๊ตฌ์์์ ๋ง์ฐฌ๊ฐ์ง๋ก, *๋ถ๊ณต์ ํ ๋ถ์ด์ต*์ ํผํ๊ธฐ ์ํด ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ์ ๋ค์ ์ค๋ช
ํฉ๋๋ค. ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ์ฌ์ฉ์์ ์ ์ฉ ์ ์๋ฅผ ๊ฒฐ์ ํ๋ ๊ฒฝ์ฐ, ๋ค๋ฅธ ๋ชจ๋ ์กฐ๊ฑด์ด ๋์ผํ ๋ ์ฒญ๊ตฌ์ก์ ์กฐ๊ธฐ์ ์ง๋ถํ๋ ๊ฒ์ ๋ํด ๋ถ์ด์ต์ ๋ฐ๋๋ค๋ฉด ๋ง์ ์ฌ๋๋ค์ด ๋ถ๊ณต์ ํ๋ค๊ณ ๋๋ ์ ์์ต๋๋ค. ๋ฐ๋ผ์ ๋ชจ๋ธ์ด ์กฐ๊ธฐ ๊ฒฐ์ ์ ๋ถ์ด์ต์ ์ฃผ์ง ์๋๋ก ํ๋ ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ ์ฉํฉ๋๋ค. ๋์ถ ์ฐ์ฒด ๋ฐ์ดํฐ ๋ก๋ํ๊ธฐ
###Code
# Load data file.
credit_file_name = 'credit_default.csv'
credit_file_path = os.path.join(DATA_DIR, credit_file_name)
credit_df = pd.read_csv(credit_file_path, delimiter=',')
# Define label column name.
CREDIT_LABEL = 'default'
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ๋ฅผ ํ๋ จ/๊ฒ์ฆ/ํ
์คํธ ์ธํธ๋ก ๋ถํ ํ๊ธฐ
###Code
credit_train_df, credit_val_df, credit_test_df = split_dataset(credit_df)
###Output
_____no_output_____
###Markdown
๋ฐ์ดํฐ ๋ถํฌ ์๊ฐํํ๊ธฐ๋จผ์ ๋ฐ์ดํฐ ๋ถํฌ๋ฅผ ์๊ฐํํฉ๋๋ค. ๊ฒฐํผ ์ฌ๋ถ์ ์ํ ์ํ๊ฐ ์๋ก ๋ค๋ฅธ ์ฌ๋๋ค์ ๋ํด ๊ด์ฐฐ๋ ์ฐ์ฒด์จ์ ํ๊ท ๋ฐ ํ์ค ์ค์ฐจ๋ฅผ ํ๋กฏํ ๊ฒ์
๋๋ค. ์ํ ์ํ๋ ๋์ถ ์ํ ๊ธฐ๊ฐ(2005๋
4์ ํ์ฌ)์ ์ฐ์ฒด๋ ๊ฐ์ ์๋ฅผ ๋ํ๋
๋๋ค.
###Code
def get_agg_data(df, x_col, y_col, bins=11):
xbins = pd.cut(df[x_col], bins=bins)
data = df[[x_col, y_col]].groupby(xbins).agg(['mean', 'sem'])
return data
def plot_2d_means_credit(input_df, x_col, y_col, x_label, y_label):
plt.rcParams['font.family'] = ['serif']
_, ax = plt.subplots(nrows=1, ncols=1)
plt.setp(ax.spines.values(), color='black', linewidth=1)
ax.tick_params(
direction='in', length=6, width=1, top=False, right=False, labelsize=18)
df_single = get_agg_data(input_df[input_df['MARRIAGE'] == 1], x_col, y_col)
df_married = get_agg_data(input_df[input_df['MARRIAGE'] == 2], x_col, y_col)
ax.errorbar(
df_single[(x_col, 'mean')],
df_single[(y_col, 'mean')],
xerr=df_single[(x_col, 'sem')],
yerr=df_single[(y_col, 'sem')],
color='orange',
marker='s',
capsize=3,
capthick=1,
label='Single',
markersize=10,
linestyle='')
ax.errorbar(
df_married[(x_col, 'mean')],
df_married[(y_col, 'mean')],
xerr=df_married[(x_col, 'sem')],
yerr=df_married[(y_col, 'sem')],
color='b',
marker='^',
capsize=3,
capthick=1,
label='Married',
markersize=10,
linestyle='')
leg = ax.legend(loc='upper left', fontsize=18, frameon=True, numpoints=1)
ax.set_xlabel(x_label, fontsize=18)
ax.set_ylabel(y_label, fontsize=18)
ax.set_ylim(0, 1.1)
ax.set_xlim(-2, 8.5)
ax.patch.set_facecolor('white')
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_facecolor('white')
leg.get_frame().set_linewidth(1)
plt.show()
plot_2d_means_credit(credit_train_df, 'PAY_0', 'default',
'Repayment Status (April)', 'Observed default rate')
###Output
_____no_output_____
###Markdown
๋์ถ ์ฐ์ฒด์จ์ ์์ธกํ๋๋ก ๋ณด์ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ๋ค์์ผ๋ก, TFL์์ *๋ณด์ ์ ํ ๋ชจ๋ธ*์ ํ๋ จํ์ฌ ๊ฐ์ธ์ด ๋์ถ์ ๋ถ์ดํํ ์ง ์ฌ๋ถ๋ฅผ ์์ธกํฉ๋๋ค. ๋ ๊ฐ์ง ์
๋ ฅ ํน์ฑ์ ๊ฒฐํผ ์ฌ๋ถ์ 4์์ ๋์ถ๊ธ์ ์ฐ์ฒดํ ๊ฐ์ ์(์ํ ์ํ)์
๋๋ค. ํ๋ จ ๋ ์ด๋ธ์ ๋์ถ์ ์ฐ์ฒดํ๋์ง ์ฌ๋ถ์
๋๋ค.๋จผ์ ์ ์ฝ ์กฐ๊ฑด ์์ด ๋ณด์ ๋ ์ ํ ๋ชจ๋ธ์ ํ๋ จํฉ๋๋ค. ๊ทธ๋ฐ ๋ค์, ๋จ์กฐ์ฑ ์ ์ฝ ์กฐ๊ฑด์ ์ฌ์ฉํ์ฌ ๋ณด์ ๋ ์ ํ ๋ชจ๋ธ์ ํ๋ จํ๊ณ ๋ชจ๋ธ ์ถ๋ ฅ ๋ฐ ์ ํ์ฑ์ ์ฐจ์ด๋ฅผ ๊ด์ฐฐํฉ๋๋ค. ๋์ถ ์ฐ์ฒด ๋ฐ์ดํฐ์ธํธ ํน์ฑ์ ๊ตฌ์ฑํ๊ธฐ ์ํ ๋์ฐ๋ฏธ ํจ์์ด๋ค ๋์ฐ๋ฏธ ํจ์๋ ๋์ถ ์ฐ์ฒด ์ฌ๋ก ์ฐ๊ตฌ์๋ง ํด๋นํฉ๋๋ค.
###Code
def get_input_fn_credit(input_df, num_epochs, batch_size=None):
"""Gets TF input_fn for credit default models."""
return tf.compat.v1.estimator.inputs.pandas_input_fn(
x=input_df[['MARRIAGE', 'PAY_0']],
y=input_df['default'],
num_epochs=num_epochs,
batch_size=batch_size or len(input_df),
shuffle=False)
def get_feature_columns_and_configs_credit(monotonicity):
"""Gets TFL feature configs for credit default models."""
feature_columns = [
tf.feature_column.numeric_column('MARRIAGE'),
tf.feature_column.numeric_column('PAY_0'),
]
feature_configs = [
tfl.configs.FeatureConfig(
name='MARRIAGE',
lattice_size=2,
pwl_calibration_num_keypoints=3,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
tfl.configs.FeatureConfig(
name='PAY_0',
lattice_size=2,
pwl_calibration_num_keypoints=10,
monotonicity=monotonicity,
pwl_calibration_always_monotonic=False),
]
return feature_columns, feature_configs
###Output
_____no_output_____
###Markdown
ํ๋ จ๋ ๋ชจ๋ธ์ ์ถ๋ ฅ ์๊ฐํ๋ฅผ ์ํ ๋์ฐ๋ฏธ ํจ์
###Code
def plot_predictions_credit(input_df,
estimator,
x_col,
x_label='Repayment Status (April)',
y_label='Predicted default probability'):
predictions = get_predicted_probabilities(
estimator=estimator, input_df=input_df, get_input_fn=get_input_fn_credit)
new_df = input_df.copy()
new_df.loc[:, 'predictions'] = predictions
plot_2d_means_credit(new_df, x_col, 'predictions', x_label, y_label)
###Output
_____no_output_____
###Markdown
์ ์ฝ์ด ์๋(๋จ์กฐ๊ฐ ์๋) ๋ณด์ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ
###Code
nomon_linear_estimator = optimize_learning_rates(
train_df=credit_train_df,
val_df=credit_val_df,
test_df=credit_test_df,
monotonicity=0,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_credit,
get_feature_columns_and_configs=get_feature_columns_and_configs_credit)
plot_predictions_credit(credit_train_df, nomon_linear_estimator, 'PAY_0')
###Output
_____no_output_____
###Markdown
๋จ์กฐ ๋ณด์ ์ ํ ๋ชจ๋ธ ํ๋ จํ๊ธฐ
###Code
mon_linear_estimator = optimize_learning_rates(
train_df=credit_train_df,
val_df=credit_val_df,
test_df=credit_test_df,
monotonicity=1,
learning_rates=LEARNING_RATES,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
get_input_fn=get_input_fn_credit,
get_feature_columns_and_configs=get_feature_columns_and_configs_credit)
plot_predictions_credit(credit_train_df, mon_linear_estimator, 'PAY_0')
###Output
_____no_output_____ |
notebooks/Top 15 violations by Total Tickets Analysis.ipynb | ###Markdown
Get the top 15 violations by total tickets
###Code
vc_df = dmv_df.groupby(['violation_code']).counter.sum().reset_index('violation_code')
counter_codes_15 = vc_df.sort_values(by='counter', ascending=False)[:15].violation_code
top_codes = dmv_df[dmv_df.violation_code.isin(counter_codes_15)]
top_violation_by_state = top_codes.groupby(['violation_description', 'rp_plate_state']).counter.sum() #.unstack().unstack().unstack()
top_violation_by_state.unstack().plot.barh()
top_violation_by_state_revenue = top_codes.groupby(['violation_description', 'rp_plate_state']).fine.sum()
ax = top_violation_by_state_revenue.unstack().plot.barh(legend=True)
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f'))
plt.draw()
###Output
_____no_output_____ |
notebooks/Glob-RRL.ipynb | ###Markdown
From GC-Orbits-Monte-Carlo.ipynb:
###Code
df = gd.FardalStreamDF()
names = ['NGC 6809', 'NGC 288', 'NGC 5897', 'NGC 6362', 'IC 4499',
'NGC 7099', 'Pal 13', 'NGC 6144', 'NGC 6101', 'NGC 1904']
for name in names:
row = tbl[tbl['Name_1'] == name]
this_w0 = w0[tbl['Name_1'] == name][0]
# Run mock stream models from samples from the orbit error distribution:
mass = row['mass'][0]*u.Msun
prog_pot = gp.PlummerPotential(m=mass,
b=row['r_hm'][0] / 1.3 * u.pc,
units=galactic)
gen = gd.MockStreamGenerator(df, mw_pot,
progenitor_potential=prog_pot)
streams = []
for i in tqdm(range(32)):
stream, _ = gen.run(this_w0[i], mass, release_every=8,
dt=-1*u.Myr, n_steps=2000)
streams.append(stream)
# Now transform to sky coordinates, centered on the cluster:
fr = coord.SkyOffsetFrame(origin=coord.ICRS(ra=row['RA'][0]*u.deg,
dec=row['DEC'][0]*u.deg))
all_sky = []
for stream in streams:
stream_c = stream.to_coord_frame(fr, galactocentric_frame=gc_frame)
all_sky.append(np.stack((stream_c.lon.wrap_at(180*u.deg).degree,
stream_c.lat.degree)).T)
all_sky = np.vstack(all_sky)
all_sky = all_sky[np.sqrt(all_sky[:, 0]**2 + all_sky[:, 1]**2) > 0.25]
# Predict where we expect to see tidal debris:
kde = KernelDensity(bandwidth=0.2)
_ = kde.fit(all_sky)
grid = np.arange(-10, 10+1e-3, 0.25)
xgrid, ygrid = np.meshgrid(grid, grid)
X_grid = np.stack((xgrid.ravel(), ygrid.ravel())).T
H = np.exp(kde.score_samples(X_grid))
H = H.reshape(grid.size, grid.size)
# --- plot
fig, ax = plt.subplots(1, 1, figsize=(6.2, 6))
ax.pcolormesh(xgrid, ygrid, H)
ax.set_xlabel(r'$\phi_1$')
ax.set_ylabel(r'$\phi_2$')
ax.set_title('Predicted stream ({})'.format(row['Name_1'][0]),
fontsize=16)
for axis in [ax.xaxis, ax.yaxis]:
axis.set_ticks(np.arange(-10, 10+1e-3, 5))
fig.set_facecolor('w')
fig.tight_layout()
fig.savefig('../plots/predicted-debris-{}.png'.format(row['Name_1'][0].replace(' ', '_')),
dpi=250)
###Output
_____no_output_____
###Markdown
---
###Code
# row = tbl[tbl['Name_1'] == 'NGC 5897']
row = tbl[tbl['Name_1'] == 'Pal 13']
# Now transform to sky coordinates, centered on the cluster:
fr = coord.SkyOffsetFrame(origin=coord.ICRS(ra=row['RA'][0]*u.deg,
dec=row['DEC'][0]*u.deg))
rrl_c = rrl.get_skycoord(distance=rrl.D_kpc*u.kpc)
rrl_c_fr = rrl_c.transform_to(fr)
sky_mask = (np.abs(rrl_c_fr.lon) < 10*u.deg) & (np.abs(rrl_c_fr.lat) < 10*u.deg)
pm_mask = np.sqrt((rrl_c.pm_ra_cosdec.value - (row['PMRA'][0]))**2 +
(rrl_c.pm_dec.value - row['PMDEC'][0])**2) < 1.5
dist_mask = (np.abs(rrl_c.distance - row['dist'][0]*u.kpc) < 5*u.kpc)
# dist_mask = (np.abs(rrl_c.distance - 10*u.kpc) < 2*u.kpc)
mask = sky_mask & dist_mask & pm_mask
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
ax = axes[0]
ax.plot(rrl_c_fr.lon.degree[mask],
rrl_c_fr.lat.degree[mask],
ls='', marker='o', mew=0, ms=2., alpha=0.5)
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
ax = axes[1]
ax.plot(rrl_c.pm_ra_cosdec.value[mask],
rrl_c.pm_dec.value[mask],
ls='', marker='o', mew=0, ms=2., alpha=0.5)
# ax.scatter(row['PMRA'], row['PMDEC'])
###Output
_____no_output_____ |
modeling/AppMdl/profile_functions.ipynb | ###Markdown
AppMdl * A complex App with 16 functions.* There are 2 branches, 2 parallels, 2 cycles, and 2 self-loops in App16
###Code
import os
import logging
from io import BytesIO
import time
import zipfile
import numpy as np
import boto3
from datetime import datetime, timezone
from time import gmtime, strftime
import json
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import math
client = boto3.client('lambda')
function_prefix='AppMdl'
function_count = 16
# The difference between UTC and local timezone
timezone_offset = 0
###Output
_____no_output_____
###Markdown
Create Functions of AppMdl Function Name List
###Code
function_name_list = [function_prefix+'_f'+str(i) for i in range(1, function_count+1)]
print(function_name_list)
###Output
['AppMdl_f1', 'AppMdl_f2', 'AppMdl_f3', 'AppMdl_f4', 'AppMdl_f5', 'AppMdl_f6', 'AppMdl_f7', 'AppMdl_f8', 'AppMdl_f9', 'AppMdl_f10', 'AppMdl_f11', 'AppMdl_f12', 'AppMdl_f13', 'AppMdl_f14', 'AppMdl_f15', 'AppMdl_f16']
###Markdown
Send Requests to Create Lambda Functions
###Code
function_creation_response = []
for function in function_name_list:
response = client.create_function(
FunctionName=function,
Runtime='python3.7',
Role='arn:aws:iam::499537426559:role/ServerlessAppPerfOpt',
Handler='lambda_function.lambda_handler',
Code={
'ZipFile': b"PK\x03\x04\x14\x00\x00\x00\x00\x00\xf3s;P\x84\xf0r\x96Z\x00\x00\x00Z\x00\x00\x00\x12\x00\x00\x00lambda_function.pydef lambda_handler(event, context):\n pass\n return {\n 'statusCode': 200\n }\nPK\x03\x04\x14\x00\x00\x00\x00\x00\x05q;P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00.ipynb_checkpoints/PK\x01\x02\x14\x03\x14\x00\x00\x00\x00\x00\xf3s;P\x84\xf0r\x96Z\x00\x00\x00Z\x00\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x81\x00\x00\x00\x00lambda_function.pyPK\x01\x02\x14\x03\x14\x00\x00\x00\x00\x00\x05q;P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\xfdA\x8a\x00\x00\x00.ipynb_checkpoints/PK\x05\x06\x00\x00\x00\x00\x02\x00\x02\x00\x81\x00\x00\x00\xbb\x00\x00\x00\x00\x00"
},
Description='Analytical Model Evaluation {}'.format(function),
Timeout=60,
MemorySize=128
)
function_creation_response.append(response)
time.sleep(0.1)
print([item['StateReasonCode'] for item in function_creation_response])
###Output
_____no_output_____
###Markdown
Update all Functions in AppMdl Update Function Code
###Code
functions=[]
for file in os.listdir('functions'):
path=os.path.abspath(os.path.join(os.path.dirname('__file__'), 'functions/'+file))
if not file.startswith('.') and os.path.isdir(path):
functions.append(file)
for function_folder in functions:
buf = BytesIO()
with zipfile.ZipFile(buf, 'w') as z:
for file in os.listdir('functions/'+function_folder):
z.write(os.path.abspath(os.path.join(os.path.dirname('__file__'), 'functions/{}/{}'.format(function_folder,file))), os.path.basename(os.path.join(os.path.dirname('__file__'), 'functions/{}/{}'.format(function_folder,file))))
buf.seek(0)
pkg = buf.read()
client.update_function_code(FunctionName='{}_{}'.format(function_prefix, function_folder),ZipFile=pkg)
###Output
_____no_output_____
###Markdown
Update Function Memory Configuration* Available Memory Configurations: 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048, 2112, 2176, 2240, 2304, 2368, 2432, 2496, 2560, 2624, 2688, 2752, 2816, 2880, 2944, 3008
###Code
mem_config_list={
'f1':1536,
'f2':1792,
'f3':576,
'f4':2240,
'f5':896,
'f6':1728,
'f7':128,
'f8':128,
'f9':256,
'f10':320,
'f11':1920,
'f12':1984,
'f13':1088,
'f14':640,
'f15':896,
'f16':1088
}
for function in mem_config_list.keys():
client.update_function_configuration(FunctionName='{}_{}'.format(function_prefix, function), MemorySize=mem_config_list[function])
###Output
_____no_output_____
###Markdown
Profile Functions Test Run
###Code
np.random.seed(256)
payload_str="{"+ "\"para1\":{}, \"para2\":{}, \"para4\":{}, \"para6\":{}".format(
np.random.randint(1, 101),
list(np.random.randint(1, 101, 20)),
np.random.randint(1, 101),
list(np.random.randint(1, 101, 20))
) +"}"
client.invoke(FunctionName='{}_{}'.format(function_prefix, 'f1'), InvocationType='Event', Payload=payload_str)
###Output
_____no_output_____
###Markdown
Configure Logging
###Code
logging.basicConfig(filename='invoke.log', encoding='utf-8', format='%(asctime)s.%(msecs)03d %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
###Output
_____no_output_____
###Markdown
Run
###Code
for i in range(10000):
time.sleep(3)
for name in function_name_list:
payload_str="{"+ "\"para1\":{}, \"para2\":{}, \"para4\":{}, \"para6\":{}".format(
np.random.randint(1, 101),
list(np.random.randint(1, 101, 20)),
np.random.randint(1, 101),
list(np.random.randint(1, 101, 20))
) +"}"
response = client.invoke(FunctionName=name, InvocationType='Event', Payload=payload_str)
RequestId = response.get('ResponseMetadata', {}).get('RequestId')
StatusCode = response.get('StatusCode', 'ERR')
logging.info(f'{i+1} {StatusCode} {name} {RequestId}')
time.sleep(0.1)
###Output
_____no_output_____
###Markdown
Get the start time and the end time
###Code
profile_function_start_time = ' '.join(os.popen('head -1 invoke.log').read().split(' ')[:2])
profile_function_end_time = ' '.join(os.popen('tail -1 invoke.log').read().split(' ')[:2])
profile_function_start_time = datetime.strptime(profile_function_start_time, '%Y-%m-%d %H:%M:%S.%f')
profile_function_end_time = datetime.strptime(profile_function_end_time, '%Y-%m-%d %H:%M:%S.%f')
profile_function_start_time
profile_function_end_time
profile_function_start_time = int(datetime.timestamp(profile_function_start_time)) - 10
profile_function_end_time = int(datetime.timestamp(profile_function_end_time)) + 10
###Output
_____no_output_____
###Markdown
CloudWatch Logs
###Code
logclient = boto3.client('logs')
###Output
_____no_output_____
###Markdown
Query AppMdl Lambda Function Logs Functions for parsing Logs
###Code
def lambda_report_log_to_dict(log):
res={}
lis=[item.split(': ') for item in log[1]['value'].split('\t')]
res['RequestId']=lis[0][1]
res['Duration']=float(lis[1][1].split(' ')[0])
res['Billed_Duration']=int(lis[2][1].split(' ')[0])
res['Memory_Size']=int(lis[3][1].split(' ')[0])
res['Max_Memory_Used']=int(lis[4][1].split(' ')[0])
res['UTC_Timestamp'] = time.mktime(datetime.strptime(log[0]['value'], "%Y-%m-%d %H:%M:%S.%f").timetuple()) +timezone_offset*3600
return res
###Output
_____no_output_____
###Markdown
Prepare Logs
###Code
query_lambda = []
for function in function_name_list:
query_lambda.append(logclient.start_query(
logGroupName='/aws/lambda/{}'.format(function),
queryString="fields @timestamp, @message| filter @message like 'REPORT'| sort @timestamp asc",
startTime=profile_function_start_time,
endTime=profile_function_end_time,
limit=10000
))
time.sleep(4)
time.sleep(10)
###Output
_____no_output_____
###Markdown
Retrieve Logs
###Code
query_lambda_results = []
for q in query_lambda:
query_lambda_results.append(logclient.get_query_results(
queryId=q['queryId']
))
time.sleep(4)
with open('query_lambda_results_performance_profile.pickle', 'wb') as f:
f.write(pickle.dumps(query_lambda_results))
AppMdl_lambda_logs_dict = {'f'+str(i):None for i in range(1, function_count+1)}
for i in range(1, function_count+1):
AppMdl_lambda_logs_dict['f'+str(i)] = [lambda_report_log_to_dict(item) for item in query_lambda_results[i-1]['results']]
for item in AppMdl_lambda_logs_dict['f'+str(i)]:
item['Function']='f'+str(i)
len(AppMdl_lambda_logs_dict['f1'])
###Output
_____no_output_____
###Markdown
Convert Logs into DataFrame and Save as CSV
###Code
AppMdl_lambda_logs=pd.DataFrame()
for i in range(1, function_count+1):
AppMdl_lambda_logs = AppMdl_lambda_logs.append(pd.DataFrame(AppMdl_lambda_logs_dict['f'+str(i)]))
AppMdl_lambda_logs.index=range(AppMdl_lambda_logs.shape[0])
AppMdl_lambda_logs=AppMdl_lambda_logs[['Function', 'Memory_Size', 'Max_Memory_Used', 'Duration', 'Billed_Duration', 'UTC_Timestamp', 'RequestId']]
AppMdl_lambda_logs.to_csv('AppMdl_lambda_logs_performance_profile.csv',index=False)
AppMdl_lambda_logs = pd.read_csv('AppMdl_lambda_logs_performance_profile.csv', low_memory=False)
AppMdl_lambda_logs.columns = ['Function', 'Memory_Size', 'Max_Memory_Used', 'Duration', 'Billed_Duration', 'UTCTimestamp', 'RequestId']
AppMdl_lambda_logs.head()
for i in range(1, function_count+1):
print(f"f{i}", AppMdl_lambda_logs.query(f"Function == 'f{i}'").shape[0], AppMdl_lambda_logs.query(f"Function == 'f{i}'")['Duration'].mean())
def calculate_cost(rt: float, mem: float, pmms: float = 1.627607421875e-11, ppi: float = 0.0000002) -> float:
return math.ceil(rt) * mem * pmms + ppi
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
f1_duration = AppMdl_lambda_logs.query("Function == 'f1'")['Duration'].to_list()[500:9501]
f1_cost = [calculate_cost(duration, mem_config_list['f1'] * 1000000) for duration in f1_duration]
f2_duration = AppMdl_lambda_logs.query("Function == 'f2'")['Duration'].to_list()[500:9501]
f2_cost = [calculate_cost(duration, mem_config_list['f2'] * 1000000) for duration in f2_duration]
f3_duration = AppMdl_lambda_logs.query("Function == 'f3'")['Duration'].to_list()[500:9501]
f3_cost = [calculate_cost(duration, mem_config_list['f3'] * 1000000) for duration in f3_duration]
f4_duration = AppMdl_lambda_logs.query("Function == 'f4'")['Duration'].to_list()[500:9501]
f4_cost = [calculate_cost(duration, mem_config_list['f4'] * 1000000) for duration in f4_duration]
f5_duration = AppMdl_lambda_logs.query("Function == 'f5'")['Duration'].to_list()[500:9501]
f5_cost = [calculate_cost(duration, mem_config_list['f5'] * 1000000) for duration in f5_duration]
f6_duration = AppMdl_lambda_logs.query("Function == 'f6'")['Duration'].to_list()[500:9501]
f6_cost = [calculate_cost(duration, mem_config_list['f6'] * 1000000) for duration in f6_duration]
f7_duration = AppMdl_lambda_logs.query("Function == 'f7'")['Duration'].to_list()[500:9501]
f7_cost = [calculate_cost(duration, mem_config_list['f7'] * 1000000) for duration in f7_duration]
f8_duration = AppMdl_lambda_logs.query("Function == 'f8'")['Duration'].to_list()[500:9501]
f8_cost = [calculate_cost(duration, mem_config_list['f8'] * 1000000) for duration in f8_duration]
f9_duration = AppMdl_lambda_logs.query("Function == 'f9'")['Duration'].to_list()[500:9501]
f9_cost = [calculate_cost(duration, mem_config_list['f9'] * 1000000) for duration in f9_duration]
f10_duration = AppMdl_lambda_logs.query("Function == 'f10'")['Duration'].to_list()[500:9501]
f10_cost = [calculate_cost(duration, mem_config_list['f10'] * 1000000) for duration in f10_duration]
f11_duration = AppMdl_lambda_logs.query("Function == 'f11'")['Duration'].to_list()[500:9501]
f11_cost = [calculate_cost(duration, mem_config_list['f11'] * 1000000) for duration in f11_duration]
f12_duration = AppMdl_lambda_logs.query("Function == 'f12'")['Duration'].to_list()[500:9501]
f12_cost = [calculate_cost(duration, mem_config_list['f12'] * 1000000) for duration in f12_duration]
f13_duration = AppMdl_lambda_logs.query("Function == 'f13'")['Duration'].to_list()[500:9501]
f13_cost = [calculate_cost(duration, mem_config_list['f13'] * 1000000) for duration in f13_duration]
f14_duration = AppMdl_lambda_logs.query("Function == 'f14'")['Duration'].to_list()[500:9501]
f14_cost = [calculate_cost(duration, mem_config_list['f14'] * 1000000) for duration in f14_duration]
f15_duration = AppMdl_lambda_logs.query("Function == 'f15'")['Duration'].to_list()[500:9501]
f15_cost = [calculate_cost(duration, mem_config_list['f15'] * 1000000) for duration in f15_duration]
f16_duration = AppMdl_lambda_logs.query("Function == 'f16'")['Duration'].to_list()[500:9501]
f16_cost = [calculate_cost(duration, mem_config_list['f16'] * 1000000) for duration in f16_duration]
fig, ((ax_f1, ax_f2, ax_f3, ax_f4, ax_f5, ax_f6, ax_f7, ax_f8), (ax_f9, ax_f10, ax_f11, ax_f12, ax_f13, ax_f14, ax_f15, ax_f16)) = plt.subplots(nrows=2, ncols=8, figsize=(12, 3.2))
duration_y_lim = {
"f1": (483, 880, 28),
"f2": (194, 224, 2),
"f3": (498, 584, 6),
"f4": (700, 1200, 35),
"f5": (190, 230, 2.5),
"f6": (490, 550, 4),
"f7": (260, 360, 6),
"f8": (160, 260, 6),
"f9": (112, 192, 5),
"f10": (320, 420, 6),
"f11": (640, 1140, 30),
"f12": (600, 1200, 40),
"f13": (350, 450, 6),
"f14": (200, 300, 6),
"f15": (240, 320, 5),
"f16": (130, 330, 12)
}
cost_y_lim = {
"f1": (12.2, 21.8, 0.6),
"f2": (5.6, 6.6, 0.2),
"f3": (4.6, 5.6, 0.2),
"f4": (24, 44, 1.5),
"f5": (2.6, 3.6, 0.2),
"f6": (13.4, 15.6, 0.2),
"f7": (0.5, 0.9, 0.05),
"f8": (0.25, 0.7, 0.05),
"f9": (0.3, 0.8, 0.05),
"f10": (1.6, 2.4, 0.1),
"f11": (24, 40, 1.5),
"f12": (20, 42, 1.5),
"f13": (6.2, 7.4, 0.1),
"f14": (2.1, 3.1, 0.1),
"f15": (3.5, 4.6, 0.1),
"f16": (2.4, 5.8, 0.2)
}
for i in range(1, 17):
string = f"""
# plot f{i}
vp{i} = ax_f{i}.violinplot(f{i}_duration, [1.5], widths=1.5,
showmeans=False, showmedians=False, showextrema=False)
percentile10, quartile25, medians, quartile75, percentile90 = np.percentile(f{i}_duration, [10, 25, 50, 75, 90])
mean = np.mean(f{i}_duration)
duration_y1, duration_y2, duration_step = duration_y_lim["f{i}"]
whiskers_min, whiskers_max = adjacent_values(np.sort(f{i}_duration), quartile25, quartile75)
ax_f{i}.scatter([1.5], medians, marker='o', color='white', s=30, zorder=3)
ax_f{i}.scatter([1.5], mean, marker='x', color='white', s=30, zorder=3)
ax_f{i}.scatter([1.5], percentile10, marker='+', color='white', s=30, zorder=3)
ax_f{i}.scatter([1.5], percentile90, marker='+', color='white', s=30, zorder=3)
ax_f{i}.vlines([1.5], quartile25, quartile75, color='#666666', linestyle='-', lw=6, alpha=0.5)
ax_f{i}.vlines([1.5], whiskers_min, whiskers_max, color='#666666', linestyle='-', lw=1, alpha=0.5)
ax_f{i}.set(xlim=(0, 5), xticks=np.arange(0, 5),
ylim=(duration_y1, duration_y2), yticks=np.arange(duration_y1, duration_y2, duration_step))
ax_f{i}.set_title('f{i}', fontsize=7)
ax_f{i}_cost = ax_f{i}.twinx()
vp{i}_twin = ax_f{i}_cost.violinplot(
f{i}_cost, [3.5], showmeans=False, showmedians=False,
showextrema=False, widths=1.5)
percentile10, quartile25, medians, quartile75, percentile90 = np.percentile(f{i}_cost, [10, 25, 50, 75, 90])
mean = np.mean(f{i}_cost)
cost_y1, cost_y2, cost_step = cost_y_lim["f{i}"]
whiskers_min, whiskers_max = adjacent_values(np.sort(f{i}_cost), quartile25, quartile75)
ax_f{i}_cost.scatter([3.5], medians, marker='o', color='white', s=30, zorder=3)
ax_f{i}_cost.scatter([3.5], mean, marker='x', color='white', s=30, zorder=3)
ax_f{i}_cost.scatter([3.5], percentile10, marker='+', color='white', s=30, zorder=3)
ax_f{i}_cost.scatter([3.5], percentile90, marker='+', color='white', s=30, zorder=3)
ax_f{i}_cost.vlines([3.5], quartile25, quartile75, color='#666666', linestyle='-', lw=6, alpha=0.5)
ax_f{i}_cost.vlines([3.5], whiskers_min, whiskers_max, color='#666666', linestyle='-', lw=1, alpha=0.5)
ax_f{i}_cost.set(xlim=(0, 5), xticks=np.arange(0, 5),
ylim=(cost_y1, cost_y2), yticks=np.arange(cost_y1, cost_y2, cost_step))
for pc in vp{i}["bodies"]:
pc.set_facecolor('#BBD5E8')
pc.set_edgecolor('grey')
pc.set_alpha(1)
for pc in vp{i}_twin['bodies']:
pc.set_facecolor('#FFB570')
pc.set_edgecolor('grey')
pc.set_alpha(1)
ax_f{i}.tick_params(axis='both', which='major', labelsize=6)
ax_f{i}_cost.tick_params(axis='both', which='major', labelsize=6)
ax_f{i}.set_xticklabels([])
ax_f{i}_cost.set_xticklabels([])
ax_f{i}.tick_params(direction='in', bottom=False)
ax_f{i}_cost.tick_params(direction='in', bottom=False)
"""
exec(string)
# ax_f1.set_ylabel('Duration in milliseconds')
# ax_f16_cost.set_ylabel('Cost per 1M invocations in USD')
plt.tight_layout()
plt.show()
fig.savefig("16_function_pp.pdf")
###Output
_____no_output_____ |
site/public/courses/DS-2.3/Lessons/map_reduce.ipynb | ###Markdown
Map-Reduce- Lets watch this video: https://www.youtube.com/watch?v=cKlnR-CB3tk&t=137s
###Code
f = lambda x: x * x
# def f(x):
# return x * x
print(f(5))
###Output
25
###Markdown
Two ways to apply a function to the elements of a list
###Code
nums = [1, 2, 3, 4, 5, 6]
nums_squared = list(map(lambda x: x * x, nums))
print(nums_squared)
nums_squared = [x * x for x in nums]
print(nums_squared)
words = ['Deer', 'Bear', 'River', 'Car', 'Car', 'River', 'Deer', 'Car', 'Bear']
mapping = map(lambda x : (x, 1), words)
for i in mapping:
print(i)
for i in mapping:
print(i)
mapping = map(lambda x : {x: 1}, words)
for i in mapping:
print(i)
###Output
{'Deer': 1}
{'Bear': 1}
{'River': 1}
{'Car': 1}
{'Car': 1}
{'River': 1}
{'Deer': 1}
{'Car': 1}
{'Bear': 1}
###Markdown
It is somehow similar to generator unless we do list(map())
###Code
n = list(map(lambda char: dict([[char, 1]]), 'testing yeah it works'))
n
###Output
_____no_output_____
###Markdown
Reduce
###Code
from functools import reduce
print(reduce(lambda x, y: x & y, [{1, 2, 3}, {2, 3, 4}, {3, 4, 5}]))
print(reduce(lambda x, y: x + y, [1, 2, 4]))
###Output
7
###Markdown
Count how many words do we have in a list
###Code
from collections import Counter
words = ['Deer', 'Bear', 'River', 'Car', 'Car', 'River', 'Deer', 'Car', 'Bear']
mapping = map(lambda x : {x: 1}, words)
def fn(x, y):
return dict(Counter(x) + Counter(y))
reduce(fn, mapping)
reduce(fn, [{'a': 1}, {'a': 1}, {'b': 1}])
Counter({'a': 1}) + Counter({'a': 1})
print(fn({'a': 1}, {'a': 1}))
print(fn({'a': 1}, {'b': 1}))
###Output
{'a': 1, 'b': 1}
###Markdown
*arg
###Code
def intersection(*arg):
result = set(arg[0])
for i in range(1,len(arg)):
result = result & set(arg[i])
return list(result)
print(intersection(['a', 'b'], ['a', 'c'], ['a', 'b']))
###Output
['a']
|
examples/trending_prediction.ipynb | ###Markdown
Trending Prediction Notebook Import the packages
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
###Output
_____no_output_____
###Markdown
Read the original data file
###Code
df = pd.read_csv("../data/US_youtube_trending_data.csv")
###Output
_____no_output_____
###Markdown
Preview the data after cleaning
###Code
df_cleaned = df.drop(columns=['channelId', 'channelTitle', 'tags', 'thumbnail_link', 'comments_disabled', 'ratings_disabled', 'description'], axis=1)
df_cleaned = df_cleaned[df_cleaned['view_count'] > 0]
df_cleaned["trending_date"]= pd.to_datetime(df_cleaned.trending_date)
df_cleaned["publishedAt"]= pd.to_datetime(df_cleaned.publishedAt)
df_cleaned = df_cleaned.groupby(['video_id']).filter(lambda x: len(x) > 4)
df_grouped = df_cleaned.drop(columns=['publishedAt', 'likes', 'dislikes', 'comment_count'])
df_grouped
###Output
_____no_output_____
###Markdown
Get a dictionary containing dataset for training
###Code
video_ids = df_cleaned.video_id.unique()
df_grouped_by = df_cleaned.groupby(['video_id'])
video_dict = {}
for i in video_ids:
video_dict[i] = df_grouped_by.get_group(i)
###Output
_____no_output_____
###Markdown
Get a dictionary containing dataset for inference
###Code
start_date = df_grouped['trending_date'].max() + pd.DateOffset(-4)
end_date = df_grouped['trending_date'].max()
current_trending = df_grouped[df_grouped['trending_date'].between(start_date, end_date)]
current_trending = current_trending.groupby(['video_id']).filter(lambda x: len(x) > 3)
current_ids = current_trending.video_id.unique()
current_trending_group = current_trending.groupby(['video_id'])
current_dict = {}
for i in current_ids:
current_dict[i] = current_trending_group.get_group(i).reset_index(drop=True)
list(current_dict.items())[:5]
###Output
_____no_output_____
###Markdown
Inference the data
###Code
for i in current_ids:
category = int(current_dict[i].categoryId.unique())
title = str(current_dict[i].title.unique()[0])
trending_date = current_dict[i].trending_date.max() + pd.DateOffset(1)
last_view_count = current_dict[i].view_count.max()
x_points = list(current_dict[i].index)
y_points = list(current_dict[i].view_count)
tck = interpolate.splrep(x_points, y_points)
view_count_pred = int(interpolate.splev(max(x_points) + 1, tck))
if view_count_pred < last_view_count:
view_count_pred = last_view_count + last_view_count - view_count_pred
new_row = {'video_id': i, 'categoryId': category, 'trending_date': trending_date, 'view_count': view_count_pred}
current_dict[i].loc[len(current_dict[i].index)] = [i, title, category, trending_date, view_count_pred]
list(current_dict.items())[:5]
###Output
_____no_output_____
###Markdown
Plot one of the predicted trending
###Code
df_plot = current_dict['pXDx6DjNLDU']
title_plot = str(df_plot.title[0])
trending_date_plot = df_plot['trending_date']
view_count_plot = df_plot['view_count']
plt.plot(trending_date_plot, view_count_plot)
plt.title(title_plot)
plt.show()
###Output
_____no_output_____
###Markdown
Show the data with predicted values
###Code
df_output = pd.DataFrame(columns=['video_id', 'title', 'categoryId', 'trending_date', 'view_count'])
for i in current_ids:
df_output = df_output.append(current_dict[i], ignore_index=True)
df_output
###Output
_____no_output_____ |
examples/gallery/demos/matplotlib/scatter_economic.ipynb | ###Markdown
Most examples work across multiple plotting backends, this example is also available for:* [Bokeh - scatter_economic](../bokeh/scatter_economic.ipynb)
###Code
import pandas as pd
import holoviews as hv
from holoviews import dim, opts
hv.extension('matplotlib')
hv.output(dpi=100)
###Output
_____no_output_____
###Markdown
Declaring data
###Code
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
key_dimensions = [('year', 'Year'), ('country', 'Country')]
value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'),
('gdp', 'GDP Growth'), ('trade', 'Trade')]
macro = hv.Table(macro_df, key_dimensions, value_dimensions)
###Output
_____no_output_____
###Markdown
Plot
###Code
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country').opts(
opts.Scatter(color=hv.Cycle('tab20'), edgecolors='k', show_grid=True,
aspect=2, fig_size=250, s=dim('Unemployment')*20,
show_frame=False),
opts.NdOverlay(legend_position='right'))
###Output
_____no_output_____
###Markdown
Most examples work across multiple plotting backends, this example is also available for:* [Bokeh - scatter_economic](../bokeh/scatter_economic.ipynb)
###Code
import pandas as pd
import holoviews as hv
from holoviews import dim, opts
hv.extension('matplotlib')
hv.output(dpi=100)
###Output
_____no_output_____
###Markdown
Declaring data
###Code
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
key_dimensions = [('year', 'Year'), ('country', 'Country')]
value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'),
('gdp', 'GDP Growth'), ('trade', 'Trade')]
macro = hv.Table(macro_df, key_dimensions, value_dimensions)
###Output
_____no_output_____
###Markdown
Plot
###Code
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country').opts(
opts.Scatter(color=hv.Cycle('tab20'), edgecolors='k', show_grid=True,
aspect=2, fig_size=250, s=dim('Unemployment')*20,
padding=0.1, show_frame=False),
opts.NdOverlay(legend_position='right'))
###Output
_____no_output_____
###Markdown
Most examples work across multiple plotting backends, this example is also available for:* [Bokeh - scatter_economic](../bokeh/scatter_economic.ipynb)
###Code
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('matplotlib')
###Output
_____no_output_____
###Markdown
Declaring data
###Code
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
key_dimensions = [('year', 'Year'), ('country', 'Country')]
value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'),
('gdp', 'GDP Growth'), ('trade', 'Trade')]
macro = hv.Table(macro_df, key_dimensions, value_dimensions)
###Output
_____no_output_____
###Markdown
Plot
###Code
%%output dpi=100
%%opts Scatter [scaling_method='width' scaling_factor=2 size_index=2 show_grid=True]
%%opts Scatter (color=Cycle('tab20') edgecolors='k')
%%opts NdOverlay [legend_position='right' aspect=2, fig_size=250, show_frame=False]
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country')
###Output
_____no_output_____
###Markdown
Most examples work across multiple plotting backends, this example is also available for:* [Bokeh - scatter_economic](../bokeh/scatter_economic.ipynb)
###Code
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('matplotlib')
###Output
_____no_output_____
###Markdown
Declaring data
###Code
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
key_dimensions = [('year', 'Year'), ('country', 'Country')]
value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'),
('gdp', 'GDP Growth'), ('trade', 'Trade')]
macro = hv.Table(macro_df, kdims=key_dimensions, vdims=value_dimensions)
###Output
_____no_output_____
###Markdown
Plot
###Code
%%output dpi=100
%%opts Scatter [scaling_method='width' scaling_factor=2 size_index=2 show_grid=True]
%%opts Scatter (color=Cycle('tab20') edgecolors='k')
%%opts NdOverlay [legend_position='right' aspect=2, fig_size=250, show_frame=False]
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country')
###Output
_____no_output_____
###Markdown
Most examples work across multiple plotting backends, this example is also available for:* [Bokeh - scatter_economic](../bokeh/scatter_economic.ipynb)
###Code
import pandas as pd
import holoviews as hv
hv.extension('matplotlib')
###Output
_____no_output_____
###Markdown
Declaring data
###Code
macro_df = pd.read_csv('http://assets.holoviews.org/macro.csv', '\t')
key_dimensions = [('year', 'Year'), ('country', 'Country')]
value_dimensions = [('unem', 'Unemployment'), ('capmob', 'Capital Mobility'),
('gdp', 'GDP Growth'), ('trade', 'Trade')]
macro = hv.Table(macro_df, key_dimensions, value_dimensions)
###Output
_____no_output_____
###Markdown
Plot
###Code
%%output dpi=100
%%opts Scatter [scaling_method='width' scaling_factor=2 size_index=2 show_grid=True]
%%opts Scatter (color=Cycle('tab20') edgecolors='k')
%%opts NdOverlay [legend_position='right' aspect=2, fig_size=250, show_frame=False]
gdp_unem_scatter = macro.to.scatter('Year', ['GDP Growth', 'Unemployment'])
gdp_unem_scatter.overlay('Country')
###Output
_____no_output_____ |
Emotion-detection-main/Emotion_Detection.ipynb | ###Markdown
Building our Model To train the data
###Code
# Working with pre trained model
base_model = MobileNet( input_shape=(224,224,3), include_top= False )
for layer in base_model.layers:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(units=7 , activation='softmax' )(x)
# creating our model.
model = Model(base_model.input, x)
model.compile(optimizer='adam', loss= categorical_crossentropy , metrics=['accuracy'] )
###Output
_____no_output_____
###Markdown
Preparing our data using data generator
###Code
train_datagen = ImageDataGenerator(
zoom_range = 0.2,
shear_range = 0.2,
horizontal_flip=True,
rescale = 1./255
)
train_data = train_datagen.flow_from_directory(directory= "/content/train",
target_size=(224,224),
batch_size=32,
)
train_data.class_indices
val_datagen = ImageDataGenerator(rescale = 1./255 )
val_data = val_datagen.flow_from_directory(directory= "/content/test",
target_size=(224,224),
batch_size=32,
)
###Output
Found 7178 images belonging to 7 classes.
###Markdown
visualizing the data that is fed to train data gen
###Code
# to visualize the images in the traing data denerator
t_img , label = train_data.next()
#-----------------------------------------------------------------------------
# function when called will prot the images
def plotImages(img_arr, label):
"""
input :- images array
output :- plots the images
"""
count = 0
for im, l in zip(img_arr,label) :
plt.imshow(im)
plt.title(im.shape)
plt.axis = False
plt.show()
count += 1
if count == 10:
break
#-----------------------------------------------------------------------------
# function call to plot the images
plotImages(t_img, label)
###Output
_____no_output_____
###Markdown
having early stopping and model check point
###Code
## having early stopping and model check point
from keras.callbacks import ModelCheckpoint, EarlyStopping
# early stopping
es = EarlyStopping(monitor='val_accuracy', min_delta= 0.01 , patience= 5, verbose= 1, mode='auto')
# model check point
mc = ModelCheckpoint(filepath="best_model.h5", monitor= 'val_accuracy', verbose= 1, save_best_only= True, mode = 'auto')
# puting call back in a list
call_back = [es, mc]
hist = model.fit_generator(train_data,
steps_per_epoch= 10,
epochs= 30,
validation_data= val_data,
validation_steps= 8,
callbacks=[es,mc])
# Loading the best fit model
from keras.models import load_model
model = load_model("/content/best_model.h5")
h = hist.history
h.keys()
plt.plot(h['accuracy'])
plt.plot(h['val_accuracy'] , c = "red")
plt.title("acc vs v-acc")
plt.show()
plt.plot(h['loss'])
plt.plot(h['val_loss'] , c = "red")
plt.title("loss vs v-loss")
plt.show()
# just to map o/p values
op = dict(zip( train_data.class_indices.values(), train_data.class_indices.keys()))
# path for the image to see if it predics correct class
path = "/content/test/angry/PrivateTest_1054527.jpg"
img = load_img(path, target_size=(224,224) )
i = img_to_array(img)/255
input_arr = np.array([i])
input_arr.shape
pred = np.argmax(model.predict(input_arr))
print(f" the image is of {op[pred]}")
# to display the image
plt.imshow(input_arr[0])
plt.title("input image")
plt.show()
###Output
the image is of neutral
|
jiuqu/AutoArrangementDashboard.ipynb | ###Markdown
ไธบไปไนไผๅ ้คๆฟ้ด
###Code
room_id = 7122459
sql = "select begin_at, schedule_id, room_type_id, lesson_id from schedules s join rooms r on s.id = r.schedule_id \
where r.id = 7122459"
with conn.cursor() as cur:
cur.execute(sql)
room = cur.fetchone()
print("ROOM INFO:",room)
slot = room[0].strftime('%Y-%m-%d %H:%M:00')
print(slot)
df_chapter, df_room_type = get_free_teacher_capacity_dataframe(slot)
#
course_conn = get_course_connection()
sql = "select id, chapter_id from lessons where published = 1 and deleted_at is null"
lessons = pd.read_sql(sql, course_conn, index_col='id')
chapter_id = lessons.loc[room[3], 'chapter_id']
## ๅฏ็จ่ๅธไธบ
print('ๅฏ็จ่ๅธไธบ')
t1 = df_chapter.loc[df_chapter.loc[:, chapter_id] > 0].index
t2 = df_room_type.loc[df_room_type.loc[:, room[2]] > 0].index
print(t1.intersection(t2))
###Output
ROOM INFO: (datetime.datetime(2019, 6, 3, 19, 20), 2964373, 2, 1674)
2019-06-03 19:20:00
ๅฏ็จ่ๅธไธบ
Int64Index([ 4293, 4630, 6316, 100190, 100644, 100840, 100947, 100953,
101859, 101902, 102405, 102488, 102674, 103213, 103430, 103504,
105686, 106956, 110949, 111052, 111873, 112139, 112148, 112523,
112818, 112873, 117047, 119725, 128249],
dtype='int64', name='teacher_id')
###Markdown
ไธบไปไน่ฟไธชL ๆฒกๆ่ฏพไบ ็ฉบ้ฒ็่ๅธ
###Code
sql = "select r.id, schedule_id, room_type_id, course_id, lesson_id, student_count, teacher_id \
from schedules s join rooms r on s.id = r.schedule_id and r.status = 0 and klass_id is null \
and r.is_internal = 0 and ((teacher_id > 0 and student_count =0) or (teacher_id = 0 and student_count > 0)) \
where begin_at = %r " % slot
data = pd.read_sql(sql, conn)
rooms = data.loc[data['teacher_id'] == 0]
###Output
_____no_output_____
###Markdown
่ทๅ่ๅธ็chapter ๅ room type
###Code
teacher_conn = get_teacher_connection()
tlist = str(tuple([i for i in data['teacher_id'].values if i > 0]))
sql = "select teacher_id, chapter_id, status from teacher_chapter_tables where teacher_id in {} and status > 0".format(tlist)
with teacher_conn.cursor() as cur:
cur.execute(sql)
chapter_records = cur.fetchall()
chapters = pd.DataFrame(columns = ('a',), dtype='int8')
for row in chapter_records:
chapters.loc[row[0], row[1]] = int(row[2])
chapters.drop('a', axis=1,inplace=True)
sql = "select teacher_id, room_type, status from teacher_room_types where teacher_id in {} and status > 0".format(tlist)
with teacher_conn.cursor() as cur:
cur.execute(sql)
room_type_records = cur.fetchall()
roomtypes = pd.DataFrame(columns = ('a',), dtype='int8')
for row in room_type_records:
roomtypes.loc[row[0],row[1]] = int(row[2])
roomtypes.drop('a', axis = 1, inplace=True)
###Output
_____no_output_____
###Markdown
Lesson ๆฐๆฎ ่ฝฌchapter
###Code
course_conn = get_course_connection()
sql = "select id, chapter_id from lessons where published = 1 and deleted_at is null"
lessons = pd.read_sql(sql, course_conn, index_col='id')
###Output
_____no_output_____
###Markdown
ๆฟ้ดๅน้
###Code
def match(room_chapter, room_type):
rt = set(roomtypes.loc[roomtypes[room_type] == 2].index.tolist())
c = set(chapters.loc[chapters[room_chapter] == 2].index.tolist())
print(rt&c)
rt = set(roomtypes.loc[roomtypes[room_type] == 2].index.tolist())
c = set(chapters.loc[chapters[room_chapter] == 1].index.tolist())
print(rt&c)
rt = set(roomtypes.loc[roomtypes[room_type] == 1].index.tolist())
c = set(chapters.loc[chapters[room_chapter] == 2].index.tolist())
print(rt&c)
rt = set(roomtypes.loc[roomtypes[room_type] == 1].index.tolist())
c = set(chapters.loc[chapters[room_chapter] == 1].index.tolist())
print(rt&c)
for index, room in rooms.iterrows():
chapter = lessons['chapter_id'][room['lesson_id']]
print('-' *32)
print("room[{}]:".format(room['id']))
match(chapter, room['room_type_id'])
###Output
--------------------------------
room[7031682]:
set()
{4610, 104963, 102405, 526, 101395, 3604, 102419, 4630, 5654, 101912, 106521, 111643, 101405, 200224, 105508, 113188, 128036, 120871, 150052, 1066, 107563, 112683, 101933, 122926, 105524, 100919, 110135, 115767, 174136, 166972, 110653, 103998, 109631, 1089, 6211, 100420, 105541, 112195, 134211, 162377, 101965, 3155, 105046, 165979, 136285, 138846, 171618, 105575, 103016, 107111, 112235, 110189, 4718, 106606, 112750, 128621, 3191, 155256, 163450, 4219, 123003, 5755, 6782, 175233, 131722, 4747, 3213, 112269, 110740, 109205, 113301, 4759, 129177, 1690, 6810, 169113, 104603, 106153, 170666, 105645, 112814, 124077, 130734, 113329, 3764, 103093, 105141, 126646, 127669, 136372, 108730, 115898, 238262, 105663, 711, 116935, 108235, 135375, 125137, 107221, 1238, 1239, 111829, 100061, 110815, 132833, 141537, 106214, 109286, 170729, 127722, 2284, 103660, 102126, 121075, 104182, 109304, 168185, 109313, 128770, 112899, 205573, 119049, 107276, 111884, 102162, 111385, 171289, 237338, 179488, 111917, 120621, 105775, 108335, 5937, 118577, 4915, 112948, 116532, 114506, 107862, 2392, 101210, 3932, 105821, 100190, 108380, 110942, 7009, 6498, 130914, 108901, 113510, 108903, 360, 2408, 107372, 110961, 103285, 113533, 102272, 102785, 110978, 131971, 106888, 113033, 104331, 167307, 110990, 128400, 113041, 112530, 109460, 112024, 106906, 103838, 105374, 106913, 109987, 105893, 1447, 2472, 129959, 107946, 170919, 119725, 132526, 5555, 126389, 103862, 139190, 107962, 3003, 109499, 4541, 107967, 104384, 3525, 141766, 108487, 5578, 104911, 170959, 144338, 979, 110552, 101340, 109533, 101859, 106981, 108518, 999, 111079, 111081, 4586, 104943, 1521, 4083, 107507, 106486, 107511, 132602, 103931, 109053, 102399}
set()
{124943, 2064, 102417, 110607, 147493, 102444, 50, 100413, 6206, 6210, 104515, 108612, 110660, 4168, 106586, 100442, 104543, 106598, 112759, 123006, 2181, 106637, 106638, 112787, 4244, 143507, 112802, 165, 110760, 116904, 6314, 6316, 108722, 112818, 106677, 108728, 2242, 4293, 104648, 108752, 102616, 116958, 100575, 2278, 112873, 2282, 110827, 176370, 102643, 6391, 102655, 100618, 112906, 271, 102674, 4376, 106780, 102691, 112933, 114987, 117047, 4409, 104761, 121153, 106821, 110923, 6478, 112976, 121170, 151892, 102742, 115034, 106844, 121180, 104804, 100709, 106856, 113005, 108915, 104821, 104830, 6528, 102784, 110986, 104848, 121234, 106899, 119190, 111007, 102816, 113059, 106917, 100780, 2477, 111020, 113083, 108989, 100800, 102848, 106946, 119233, 108999, 106956, 2509, 111052, 109010, 4563, 111058, 109013, 109015, 129496, 4572, 104925, 123357, 100836, 111077, 100840, 113130, 503, 2558, 113162, 102934, 115225, 119325, 542, 2593, 6694, 109103, 115248, 105017, 105022, 102977, 585, 109130, 2639, 593, 100947, 109144, 100953, 115291, 115292, 100961, 4709, 113253, 109159, 113258, 113268, 631, 105087, 111237, 103047, 2696, 117384, 103053, 109199, 105105, 156313, 111259, 109214, 105129, 148139, 4782, 101042, 105143, 109244, 103118, 105168, 113364, 4821, 103128, 101081, 105176, 107232, 2785, 105189, 107240, 101113, 109305, 4862, 4863, 6913, 105218, 103173, 105228, 103189, 2839, 103197, 4898, 101156, 103211, 103213, 156461, 105269, 101178, 115530, 107339, 103244, 2893, 109387, 101199, 109390, 103252, 107348, 127833, 4956, 111455, 107362, 125805, 101231, 4976, 107377, 101237, 2942, 4990, 111494, 105354, 107403, 101259, 103316, 103320, 134044, 121759, 115619, 123819, 953, 107454, 109510, 113607, 113617, 103378, 170961, 3036, 117725, 3040, 1000, 101359, 103417, 107523, 103430, 105478, 103441, 105490, 111636, 103450, 101406, 107554, 5164, 5172, 105530, 1086, 111678, 101440, 103496, 109643, 103504, 5205, 109664, 109666, 3177, 111724, 109678, 109682, 103539, 111734, 119926, 3196, 111740, 111744, 109705, 111753, 119948, 105626, 122012, 109729, 3238, 122023, 105640, 124076, 103597, 1204, 119996, 105668, 103621, 105673, 142541, 105686, 120022, 120025, 122076, 109789, 120032, 120034, 1258, 107762, 128249, 1274, 111873, 3332, 1289, 124169, 3344, 124180, 5401, 107804, 1310, 103712, 122147, 3366, 101673, 109870, 130363, 122176, 103752, 109902, 111966, 109922, 111993, 5512, 112009, 120202, 3467, 5522, 109971, 109986, 1446, 109994, 112042, 5552, 103864, 112059, 122325, 103903, 101856, 112095, 105954, 108002, 103913, 112110, 112116, 112123, 105981, 103946, 112139, 101902, 167442, 108051, 112148, 5655, 153115, 112158, 112164, 110117, 106022, 5672, 110120, 126505, 106030, 108080, 110130, 106045, 108093, 104000, 110144, 104006, 3655, 126542, 110159, 110182, 106097, 110207, 138885, 128654, 108187, 102052, 112294, 104113, 102072, 130746, 104126, 110287, 159442, 1747, 104161, 1771, 1772, 110321, 112370, 112371, 3829, 100091, 104201, 112393, 104203, 124681, 106253, 100117, 100119, 106273, 114467, 102190, 3887, 1840, 104240, 110384, 112435, 112438, 5943, 112441, 100160, 100169, 112459, 110413, 1871, 114526, 122728, 112499, 110458, 112506, 6020, 100235, 6028, 102283, 3982, 112523, 147340, 6036, 100247, 108440, 112542, 102303, 108447, 1956, 106415, 100276, 4022, 106422, 116664, 6074, 4030, 108488, 4041, 104394, 1995, 110536, 110538, 102360, 106458, 104413, 112606, 112608, 106465, 112609, 116706, 2021, 118762, 112620, 106481, 6130, 102389, 112637, 110591}
--------------------------------
room[7031776]:
set()
{4610, 104963, 102405, 526, 101395, 3604, 102419, 4630, 5654, 101912, 106521, 111643, 101405, 200224, 105508, 113188, 128036, 120871, 150052, 1066, 107563, 112683, 101933, 122926, 105524, 100919, 110135, 115767, 174136, 166972, 110653, 103998, 109631, 1089, 6211, 100420, 105541, 112195, 134211, 162377, 101965, 3155, 105046, 165979, 136285, 138846, 171618, 105575, 103016, 107111, 112235, 110189, 4718, 106606, 112750, 128621, 3191, 155256, 163450, 4219, 123003, 5755, 6782, 175233, 131722, 4747, 3213, 112269, 110740, 109205, 113301, 4759, 129177, 1690, 6810, 169113, 104603, 106153, 170666, 105645, 112814, 124077, 130734, 113329, 3764, 103093, 105141, 126646, 127669, 136372, 108730, 115898, 238262, 105663, 711, 116935, 108235, 135375, 125137, 107221, 1238, 1239, 111829, 100061, 110815, 132833, 141537, 106214, 109286, 170729, 127722, 2284, 103660, 102126, 121075, 104182, 109304, 168185, 109313, 128770, 112899, 205573, 119049, 107276, 111884, 102162, 111385, 171289, 237338, 179488, 111917, 120621, 105775, 108335, 5937, 118577, 4915, 112948, 116532, 114506, 107862, 2392, 101210, 3932, 105821, 100190, 108380, 110942, 7009, 6498, 130914, 108901, 113510, 108903, 360, 2408, 107372, 110961, 103285, 113533, 102272, 102785, 110978, 131971, 106888, 113033, 104331, 167307, 110990, 128400, 113041, 112530, 109460, 112024, 106906, 103838, 105374, 106913, 109987, 105893, 1447, 2472, 129959, 107946, 170919, 119725, 132526, 5555, 126389, 103862, 139190, 107962, 3003, 109499, 4541, 107967, 104384, 3525, 141766, 108487, 5578, 104911, 170959, 144338, 979, 110552, 101340, 109533, 101859, 106981, 108518, 999, 111079, 111081, 4586, 104943, 1521, 4083, 107507, 106486, 107511, 132602, 103931, 109053, 102399}
set()
{124943, 2064, 102417, 110607, 147493, 102444, 50, 100413, 6206, 6210, 104515, 108612, 110660, 4168, 106586, 100442, 104543, 106598, 112759, 123006, 2181, 106637, 106638, 112787, 4244, 143507, 112802, 165, 110760, 116904, 6314, 6316, 108722, 112818, 106677, 108728, 2242, 4293, 104648, 108752, 102616, 116958, 100575, 2278, 112873, 2282, 110827, 176370, 102643, 6391, 102655, 100618, 112906, 271, 102674, 4376, 106780, 102691, 112933, 114987, 117047, 4409, 104761, 121153, 106821, 110923, 6478, 112976, 121170, 151892, 102742, 115034, 106844, 121180, 104804, 100709, 106856, 113005, 108915, 104821, 104830, 6528, 102784, 110986, 104848, 121234, 106899, 119190, 111007, 102816, 113059, 106917, 100780, 2477, 111020, 113083, 108989, 100800, 102848, 106946, 119233, 108999, 106956, 2509, 111052, 109010, 4563, 111058, 109013, 109015, 129496, 4572, 104925, 123357, 100836, 111077, 100840, 113130, 503, 2558, 113162, 102934, 115225, 119325, 542, 2593, 6694, 109103, 115248, 105017, 105022, 102977, 585, 109130, 2639, 593, 100947, 109144, 100953, 115291, 115292, 100961, 4709, 113253, 109159, 113258, 113268, 631, 105087, 111237, 103047, 2696, 117384, 103053, 109199, 105105, 156313, 111259, 109214, 105129, 148139, 4782, 101042, 105143, 109244, 103118, 105168, 113364, 4821, 103128, 101081, 105176, 107232, 2785, 105189, 107240, 101113, 109305, 4862, 4863, 6913, 105218, 103173, 105228, 103189, 2839, 103197, 4898, 101156, 103211, 103213, 156461, 105269, 101178, 115530, 107339, 103244, 2893, 109387, 101199, 109390, 103252, 107348, 127833, 4956, 111455, 107362, 125805, 101231, 4976, 107377, 101237, 2942, 4990, 111494, 105354, 107403, 101259, 103316, 103320, 134044, 121759, 115619, 123819, 953, 107454, 109510, 113607, 113617, 103378, 170961, 3036, 117725, 3040, 1000, 101359, 103417, 107523, 103430, 105478, 103441, 105490, 111636, 103450, 101406, 107554, 5164, 5172, 105530, 1086, 111678, 101440, 103496, 109643, 103504, 5205, 109664, 109666, 3177, 111724, 109678, 109682, 103539, 111734, 119926, 3196, 111740, 111744, 109705, 111753, 119948, 105626, 122012, 109729, 3238, 122023, 105640, 124076, 103597, 1204, 119996, 105668, 103621, 105673, 142541, 105686, 120022, 120025, 122076, 109789, 120032, 120034, 1258, 107762, 128249, 1274, 111873, 3332, 1289, 124169, 3344, 124180, 5401, 107804, 1310, 103712, 122147, 3366, 101673, 109870, 130363, 122176, 103752, 109902, 111966, 109922, 111993, 5512, 112009, 120202, 3467, 5522, 109971, 109986, 1446, 109994, 112042, 5552, 103864, 112059, 122325, 103903, 101856, 112095, 105954, 108002, 103913, 112110, 112116, 112123, 105981, 103946, 112139, 101902, 167442, 108051, 112148, 5655, 153115, 112158, 112164, 110117, 106022, 5672, 110120, 126505, 106030, 108080, 110130, 106045, 108093, 104000, 110144, 104006, 3655, 126542, 110159, 110182, 106097, 110207, 138885, 128654, 108187, 102052, 112294, 104113, 102072, 130746, 104126, 110287, 159442, 1747, 104161, 1771, 1772, 110321, 112370, 112371, 3829, 100091, 104201, 112393, 104203, 124681, 106253, 100117, 100119, 106273, 114467, 102190, 3887, 1840, 104240, 110384, 112435, 112438, 5943, 112441, 100160, 100169, 112459, 110413, 1871, 114526, 122728, 112499, 110458, 112506, 6020, 100235, 6028, 102283, 3982, 112523, 147340, 6036, 100247, 108440, 112542, 102303, 108447, 1956, 106415, 100276, 4022, 106422, 116664, 6074, 4030, 108488, 4041, 104394, 1995, 110536, 110538, 102360, 106458, 104413, 112606, 112608, 106465, 112609, 116706, 2021, 118762, 112620, 106481, 6130, 102389, 112637, 110591}
###Markdown
้่ฆๆฉๅ
็schedule
###Code
sql = "select schedule_id, room_type_id, course_id, lesson_id \
from schedules s join rooms r on s.id = r.schedule_id and r.status = 0 and klass_id is null \
and r.is_internal = 0 where begin_at = %r group by schedule_id having sum(max_student_count) = \
sum(student_count)" % slot
df = pd.read_sql(sql, conn)
for index, row in df.iterrows():
chapter = lessons['chapter_id'][row['lesson_id']]
print('-' *32)
print("Schedule[{}]:".format(row['schedule_id']))
match(chapter, row['room_type_id'])
###Output
--------------------------------
Schedule[2907662]:
{111873, 110949, 112873, 112139, 111052, 112523, 112818, 112148, 117047, 128249}
set()
set()
set()
|
doc/gallery/horizontal_stacked_bar_chart.ipynb | ###Markdown
Horizontal Stacked Bar ChartThis is an example of a horizontal stacked bar chart using data which contains crop yields over different regions and different years in the 1930s.
###Code
import altair as alt
from vega_datasets import data
source = data.barley()
alt.Chart(source).mark_bar().encode(
x='sum(yield)',
y='variety',
color='site'
)
###Output
_____no_output_____ |
module1-join-and-reshape-data/Eyve_Geo_LS_DS7_121_Join_and_Reshape_Data_Assignment.ipynb | ###Markdown
_Lambda School Data Science_ Join and Reshape datasetsObjectives- concatenate data with pandas- merge data with pandas- understand tidy data formatting- melt and pivot data with pandasLinks- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data) - Combine Data Sets: Standard Joins - Tidy Data - Reshaping Data- Python Data Science Handbook - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables Reference- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
###Code
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
!ls -lh *.csv
###Output
-rw-r--r-- 1 502 staff 2.6K May 2 2017 aisles.csv
-rw-r--r-- 1 502 staff 270 May 2 2017 departments.csv
-rw-r--r-- 1 502 staff 551M May 2 2017 order_products__prior.csv
-rw-r--r-- 1 502 staff 24M May 2 2017 order_products__train.csv
-rw-r--r-- 1 502 staff 104M May 2 2017 orders.csv
-rw-r--r-- 1 502 staff 2.1M May 2 2017 products.csv
###Markdown
Assignment Join Data PracticeThese are the top 10 most frequently ordered products. How many times was each ordered? 1. Banana2. Bag of Organic Bananas3. Organic Strawberries4. Organic Baby Spinach 5. Organic Hass Avocado6. Organic Avocado7. Large Lemon 8. Strawberries9. Limes 10. Organic Whole MilkFirst, write down which columns you need and which dataframes have them.Next, merge these into a single dataframe.Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products. Columns Needed/Location: - product ID/products.csv- product name/products.csv- product id with order/order products.
###Code
import pandas as pd
pd.options.display.max_rows=150
#read in and join up the orders information
df1=pd.read_csv('order_products__prior.csv')
df2=pd.read_csv('order_products__train.csv')
ordersDF=pd.concat([df1,df2])
ordersDF.head()
#read in product name information
products=pd.read_csv('products.csv')
products.head()
#create a dataframe of only the top ten items
topTen=ordersDF['product_id'].value_counts().head(10).index.tolist()
topTenDF=products[products['product_id'].isin(topTen)]
#remove any order lines that don't have a product in the top ten (make it go faster)
ordersSubset=ordersDF[ordersDF['product_id'].isin(topTen)]
topTenDF['product_id'].value_counts()
ordersSubset.head()
topTenDF['product_id']
columns=['product_id','product_name']
merged=pd.merge(ordersSubset['product_id'], topTenDF[columns], how='inner', on='product_id')
merged.head()
topTenRate=merged['product_name'].value_counts()
#this is a list of the top ordered items and how often they were ordered
topTenRate
###Output
_____no_output_____
###Markdown
Reshape Data Section- Replicate the lesson code- Complete the code cells we skipped near the beginning of the notebook- Table 2 --> Tidy- Tidy --> Table 2- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
###Code
import numpy as np
#set up table 2
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
table2
treat=table2.index.to_list()
cols=table2.columns.to_list()
cols
table2=table2.reset_index()
table2
tidy=table2.melt(id_vars='index', value_vars=cols)
tidy
import seaborn as sns
flights = sns.load_dataset('flights')
##### YOUR CODE HERE #####
flights.pivot_table(index='year', columns='month', values='passengers')
###Output
_____no_output_____
###Markdown
Join Data Stretch ChallengeThe [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of "**Popular products** purchased earliest in the day (green) and latest in the day (red)." The post says,> "We can also see the time of day that users purchase specific products.> Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.> **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**"Your challenge is to reproduce the list of the top 25 latest ordered popular products.We'll define "popular products" as products with more than 2,900 orders.
###Code
##### YOUR CODE HERE #####
#create a list of the most common items purchased
values=ordersDF['product_id'].value_counts()
orderRate=values[values>29000]
orderRate=orderRate.reset_index()
orderRate=orderRate.rename(columns={'index':'product_id','product_id':'purchase_rate'})
orderRate.head()
#add the names of the products back in
orderRate=pd.merge(orderRate, products[columns])
orderRate
#this data has the times of orders
orders=pd.read_csv('orders.csv')
orders.head()
#combine into one dataframe that has all the info we need in it
orderCols=['order_id','order_hour_of_day']
idCols=['order_id','product_id']
timeOrderDF=pd.merge(orders[orderCols], ordersDF[idCols])
timeOrderDF
final=pd.merge(timeOrderDF, orderRate[columns])
final.columns.to_list()
ct=pd.crosstab(final['product_name'],final['order_hour_of_day'], normalize='index')
ct
ct.T.plot(legend=False);
###Output
_____no_output_____
###Markdown
Reshape Data Stretch Challenge_Try whatever sounds most interesting to you!_- Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"- Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"- Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)- Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
###Code
##### YOUR CODE HERE #####
###Output
_____no_output_____ |
vanderplas/PythonDataScienceHandbook-master/notebooks/04.11-Settings-and-Stylesheets.ipynb | ###Markdown
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* Customizing Matplotlib: Configurations and Stylesheets Matplotlib's default plot settings are often the subject of complaint among its users.While much is slated to change in the 2.0 Matplotlib release in late 2016, the ability to customize default settings helps bring the package inline with your own aesthetic preferences.Here we'll walk through some of Matplotlib's runtime configuration (rc) options, and take a look at the newer *stylesheets* feature, which contains some nice sets of default configurations. Plot Customization by HandThrough this chapter, we've seen how it is possible to tweak individual plot settings to end up with something that looks a little bit nicer than the default.It's possible to do these customizations for each individual plot.For example, here is a fairly drab default histogram:
###Code
import matplotlib.pyplot as plt
plt.style.use('classic')
import numpy as np
%matplotlib inline
x = np.random.randn(1000)
plt.hist(x);
###Output
_____no_output_____
###Markdown
We can adjust this by hand to make it a much more visually pleasing plot:
###Code
# use a gray background
ax = plt.axes(axisbg='#E6E6E6')
ax.set_axisbelow(True)
# draw solid white grid lines
plt.grid(color='w', linestyle='solid')
# hide axis spines
for spine in ax.spines.values():
spine.set_visible(False)
# hide top and right ticks
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# lighten ticks and labels
ax.tick_params(colors='gray', direction='out')
for tick in ax.get_xticklabels():
tick.set_color('gray')
for tick in ax.get_yticklabels():
tick.set_color('gray')
# control face and edge color of histogram
ax.hist(x, edgecolor='#E6E6E6', color='#EE6666');
###Output
_____no_output_____
###Markdown
This looks better, and you may recognize the look as inspired by the look of the R language's ggplot visualization package.But this took a whole lot of effort!We definitely do not want to have to do all that tweaking each time we create a plot.Fortunately, there is a way to adjust these defaults once in a way that will work for all plots. Changing the Defaults: ``rcParams``Each time Matplotlib loads, it defines a runtime configuration (rc) containing the default styles for every plot element you create.This configuration can be adjusted at any time using the ``plt.rc`` convenience routine.Let's see what it looks like to modify the rc parameters so that our default plot will look similar to what we did before.We'll start by saving a copy of the current ``rcParams`` dictionary, so we can easily reset these changes in the current session:
###Code
IPython_default = plt.rcParams.copy()
###Output
_____no_output_____
###Markdown
Now we can use the ``plt.rc`` function to change some of these settings:
###Code
from matplotlib import cycler
colors = cycler('color',
['#EE6666', '#3388BB', '#9988DD',
'#EECC55', '#88BB44', '#FFBBBB'])
plt.rc('axes', facecolor='#E6E6E6', edgecolor='none',
axisbelow=True, grid=True, prop_cycle=colors)
plt.rc('grid', color='w', linestyle='solid')
plt.rc('xtick', direction='out', color='gray')
plt.rc('ytick', direction='out', color='gray')
plt.rc('patch', edgecolor='#E6E6E6')
plt.rc('lines', linewidth=2)
###Output
_____no_output_____
###Markdown
With these settings defined, we can now create a plot and see our settings in action:
###Code
plt.hist(x);
###Output
_____no_output_____
###Markdown
Let's see what simple line plots look like with these rc parameters:
###Code
for i in range(4):
plt.plot(np.random.rand(10))
###Output
_____no_output_____
###Markdown
I find this much more aesthetically pleasing than the default styling.If you disagree with my aesthetic sense, the good news is that you can adjust the rc parameters to suit your own tastes!These settings can be saved in a *.matplotlibrc* file, which you can read about in the [Matplotlib documentation](http://Matplotlib.org/users/customizing.html).That said, I prefer to customize Matplotlib using its stylesheetsย instead. StylesheetsThe version 1.4 release of Matplotlib in August 2014 added a very convenient ``style`` module, which includes a number of new default stylesheets, as well as the ability to create and package your own styles. These stylesheets are formatted similarly to the *.matplotlibrc* files mentioned earlier, but must be named with a *.mplstyle* extension.Even if you don't create your own style, the stylesheets included by default are extremely useful.The available styles are listed in ``plt.style.available``โhere I'll list only the first five for brevity:
###Code
plt.style.available[:5]
###Output
_____no_output_____
###Markdown
The basic way to switch to a stylesheet is to call``` pythonplt.style.use('stylename')```But keep in mind that this will change the style for the rest of the session!Alternatively, you can use the style context manager, which sets a style temporarily:``` pythonwith plt.style.context('stylename'): make_a_plot()``` Let's create a function that will make two basic types of plot:
###Code
def hist_and_lines():
np.random.seed(0)
fig, ax = plt.subplots(1, 2, figsize=(11, 4))
ax[0].hist(np.random.randn(1000))
for i in range(3):
ax[1].plot(np.random.rand(10))
ax[1].legend(['a', 'b', 'c'], loc='lower left')
###Output
_____no_output_____
###Markdown
We'll use this to explore how these plots look using the various built-in styles. Default styleThe default style is what we've been seeing so far throughout the book; we'll start with that.First, let's reset our runtime configuration to the notebook default:
###Code
# reset rcParams
plt.rcParams.update(IPython_default);
###Output
_____no_output_____
###Markdown
Now let's see how it looks:
###Code
hist_and_lines()
###Output
_____no_output_____
###Markdown
FiveThiryEight styleThe ``fivethirtyeight`` style mimics the graphics found on the popular [FiveThirtyEight website](https://fivethirtyeight.com).As you can see here, it is typified by bold colors, thick lines, and transparent axes:
###Code
with plt.style.context('fivethirtyeight'):
hist_and_lines()
###Output
_____no_output_____
###Markdown
ggplotThe ``ggplot`` package in the R language is a very popular visualization tool.Matplotlib's ``ggplot`` style mimics the default styles from that package:
###Code
with plt.style.context('ggplot'):
hist_and_lines()
###Output
_____no_output_____
###Markdown
*Bayesian Methods for Hackers( styleThere is a very nice short online book called [*Probabilistic Programming and Bayesian Methods for Hackers*](http://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/); it features figures created with Matplotlib, and uses a nice set of rc parameters to create a consistent and visually-appealing style throughout the book.This style is reproduced in the ``bmh`` stylesheet:
###Code
with plt.style.context('bmh'):
hist_and_lines()
###Output
_____no_output_____
###Markdown
Dark backgroundFor figures used within presentations, it is often useful to have a dark rather than light background.The ``dark_background`` style provides this:
###Code
with plt.style.context('dark_background'):
hist_and_lines()
###Output
_____no_output_____
###Markdown
GrayscaleSometimes you might find yourself preparing figures for a print publication that does not accept color figures.For this, the ``grayscale`` style, shown here, can be very useful:
###Code
with plt.style.context('grayscale'):
hist_and_lines()
###Output
_____no_output_____
###Markdown
Seaborn styleMatplotlib also has stylesheets inspired by the Seaborn library (discussed more fully in [Visualization With Seaborn](04.14-Visualization-With-Seaborn.ipynb)).As we will see, these styles are loaded automatically when Seaborn is imported into a notebook.I've found these settings to be very nice, and tend to use them as defaults in my own data exploration.
###Code
import seaborn
hist_and_lines()
###Output
_____no_output_____ |
ipynb/US-Montana.ipynb | ###Markdown
United States: Montana* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Montana");
# load the data
cases, deaths, region_label = get_country_data("US", "Montana")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
United States: Montana* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Montana", weeks=5);
overview(country="US", region="Montana");
compare_plot(country="US", region="Montana");
# load the data
cases, deaths = get_country_data("US", "Montana")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
United States: Montana* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="US", region="Montana", weeks=5);
overview(country="US", region="Montana");
compare_plot(country="US", region="Montana");
# load the data
cases, deaths = get_country_data("US", "Montana")
# get population of the region for future normalisation:
inhabitants = population(country="US", region="Montana")
print(f'Population of country="US", region="Montana": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Montana.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____ |
end_to_end/nlp_mlops_company_sentiment/02_nlp_company_earnings_analysis_pipeline.ipynb | ###Markdown
Understanding Trends in Company Valuation with NLP - Part 2: NLP Company Earnings Analysis Pipeline Introduction Orchestrating company earnings trend analysis, using SEC filings, news sentiment with the Hugging Face transformers, and Amazon SageMaker PipelinesIn this notebook, we demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We will derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers for the downstream NLP tasks of Summarization (e.g., of the news and SEC MDNA sections) and Sentiment Analysis (of the resulting summaries). --- Using SageMaker Pipelines Amazon SageMaker Pipelines is the first purpose-built, easy-to-use continuous integration and continuous delivery (CI/CD) service for machine learning (ML). With SageMaker Pipelines, you can create, automate, and manage end-to-end ML workflows at scale. Orchestrating workflows across each step of the machine learning process (e.g. exploring and preparing data, experimenting with different algorithms and parameters, training and tuning models, and deploying models to production) can take months of coding.Since it is purpose-built for machine learning, SageMaker Pipelines helps you automate different steps of the ML workflow, including data loading, data transformation, training and tuning, and deployment. With SageMaker Pipelines, you can build dozens of ML models a week, manage massive volumes of data, thousands of training experiments, and hundreds of different model versions. You can share and re-use workflows to recreate or optimize models, helping you scale ML throughout your organization. --- Understanding trends in company valuation (or similar) with NLP**Natural language processing (NLP)** is a subfield of linguistics, computer science, and artificial intelligence concerned with the interactions between computers and human language, in particular how to program computers to process and analyze large amounts of natural language data. The goal is a computer capable of "understanding" the contents of documents, including the contextual nuances of the language within them. The technology can then accurately extract information and insights contained in the documents as well as categorize and organize the documents themselves. (Source: [Wikipedia](https://en.wikipedia.org/wiki/Natural_language_processing))We are going to demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We are also going to derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers with summarization and sentiment analysis downstream tasks.* Summarization of financial text from SEC reports and news articles will be done via [Pegasus for Financial Summarization model](https://huggingface.co/human-centered-summarization/financial-summarization-pegasus) based on the paper [Towards Human-Centered Summarization: A Case Study on Financial News](https://aclanthology.org/2021.hcinlp-1.4/). * Sentiment analysis on summarized SEC financial report and news articles will be done via pre-trained NLP model to analyze sentiment of financial text called [FinBERT](https://huggingface.co/ProsusAI/finbert). Paper: [ FinBERT: Financial Sentiment Analysis with Pre-trained Language Models](https://arxiv.org/abs/1908.10063)--- SEC DatasetThe starting point for a vast amount of financial NLP is text in SEC filings. The SEC requires companies to report different types of information related to various events involving companies. The full list of SEC forms is here: https://www.sec.gov/forms.SEC filings are widely used by financial services companies as a source of information about companies in order to make trading, lending, investment, and risk management decisions. Because these filings are required by regulation, they are of high quality and veracity. They contain forward-looking information that helps with forecasts and are written with a view to the future, required by regulation. In addition, in recent times, the value of historical time-series data has degraded, since economies have been structurally transformed by trade wars, pandemics, and political upheavals. Therefore, text as a source of forward-looking information has been increasing in relevance. Obtain the dataset using the SageMaker JumpStart Industry Python SDKDownloading SEC filings is done from the SEC's Electronic Data Gathering, Analysis, and Retrieval (EDGAR) website, which provides open data access. EDGAR is the primary system under the U.S. Securities And Exchange Commission (SEC) for companies and others submitting documents under the Securities Act of 1933, the Securities Exchange Act of 1934, the Trust Indenture Act of 1939, and the Investment Company Act of 1940. EDGAR contains millions of company and individual filings. The system processes about 3,000 filings per day, serves up 3,000 terabytes of data to the public annually, and accommodates 40,000 new filers per year on average.There are several ways to download the data, and some open source packages available to extract the text from these filings. However, these require extensive programming and are not always easy-to-use. We provide a simple one-API call that will create a dataset in a few lines of code, for any period of time and for numerous tickers.We have wrapped the extraction functionality into a SageMaker processing container and provide this notebook to enable users to download a dataset of filings with metadata such as dates and parsed plain text that can then be used for machine learning using other SageMaker tools. This is included in the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for financial language models. Users only need to specify a date range and a list of ticker symbols, and the library will take care of the rest.As of now, the solution supports extracting a popular subset of SEC forms in plain text (excluding tables): 10-K, 10-Q, 8-K, 497, 497K, S-3ASR, and N-1A. For each of these, we provide examples throughout this notebook and a brief description of each form. For the 10-K and 10-Q forms, filed every year or quarter, we also extract the Management Discussion and Analysis (MDNA) section, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we provide this section automatically in a separate column of the dataframe alongside the full text of the filing.The extracted dataframe is written to S3 storage and to the local notebook instance. --- News articles related to the stock symbol -- datasetWe will use the MIT Licensed [NewsCatcher API](https://docs.newscatcherapi.com/) to grab top 4-5 articles about the specific organization using filters, however other sources such as Social media feeds, RSS Feeds can also be used. The first step in the pipeline is to fetch the SEC report from the EDGAR database using the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for Financial language models. This library provides us an easy to use functionality to obtain either one or multiple SEC reports for one or more Ticker symbols or CIKs. The ticker or CIK number will be passed to the SageMaker Pipeline using Pipeline parameter `inference_ticker_cik`. For demo purposes of this Pipeline we will focus on a single Ticker/CIK number at a time and the MDNA section of the 10-K form. The first processing will extract the MDNA from the 10-K form for a company and will also gather few news articles related to the company from the NewsCatcher API. This data will ultimately be used for summarization and then finally sentiment analysis. --- MLOps for NLP using SageMaker PipelinesWe will set up the following SageMaker Pipeline. The Pipleline has two flows depending on what the value for `model_register_deploy` Pipeline parameter is set to. If the value is set to `Y` we want the pipeline to register the model and deploy the latest version of the model from the model registry to the SageMaker endpoint. If the value is set to `N` then we simply want to run inferences using the FinBert and the Pegasus models using the Ticker symbol (or CIK number) that is passed to the pipeline using the `inference_ticker_cik` Pipeline parameter. Note: You must execute the script-processor-custom-container.ipynb notebook before you can set up the SageMaker Pipeline. This notebook creates a custom Docker image and registers it in Amazon Elastic Container Registry (Amazon ECR) for the pipeline to use. The image contains of all the dependencies required. --- Set Up SageMaker Project Install and import packages
###Code
# Install updated version of SageMaker
# !pip install -q sagemaker==2.49
!pip install sagemaker --upgrade
!pip install transformers
!pip install typing
!pip install sentencepiece
!pip install fiscalyear
#Install SageMaker Jumpstart Industry
!pip install smjsindustry
###Output
_____no_output_____
###Markdown
NOTE: After installing an updated version of SageMaker and PyTorch, save the notebook and then restart your kernel.
###Code
import boto3
import botocore
import pandas as pd
import sagemaker
print(f'SageMaker version: {sagemaker.__version__}')
from sagemaker.huggingface import HuggingFace
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.steps import CreateModelStep
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.steps import ProcessingStep
from sagemaker.workflow.steps import TransformStep
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.parameters import (ParameterInteger, ParameterString)
from sagemaker.sklearn.processing import ScriptProcessor
from sagemaker.lambda_helper import Lambda
from sagemaker.workflow.lambda_step import (
LambdaStep,
LambdaOutput,
LambdaOutputTypeEnum,
)
###Output
_____no_output_____
###Markdown
Define parameters that you'll use throughout the notebook
###Code
s3 = boto3.resource("s3")
region = boto3.Session().region_name
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
sagemaker_role = role
default_bucket = sagemaker_session.default_bucket()
prefix = 'nlp-e2e-mlops'
s3_client = boto3.client('s3', region_name=region)
sagemaker_boto_client = boto3.client("sagemaker", region_name=region)
#deploy_model_instance_type = "ml.m4.8xlarge"
deploy_model_instance_type = "ml.m4.xlarge"
inference_instances=["ml.t2.medium", "ml.m5.xlarge", "ml.m5.2xlarge", "ml.m5.4xlarge", "ml.m5.12xlarge"]
transform_instances=["ml.m5.xlarge"]
PROCESSING_INSTANCE="ml.m4.4xlarge"
ticker='AMZN'
%store -r
print(f's3://{default_bucket}/{prefix}/code/model_deploy.py')
print(f'SageMaker Role: {role}')
###Output
_____no_output_____
###Markdown
Define parameters to parametrize Pipeline ExecutionUsing SageMaker Pipelines, we can define the steps to be included in a pipeline but then use parameters to modify that pipeline when we go to execute the pipeline, without having to modify the pipeline definition. We'll provide some default parameter values that can be overridden on pipeline execution.
###Code
#Define some default parameters:
#specify default number of instances for processing step
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
#specify default instance type for processing step
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value=PROCESSING_INSTANCE
)
#specify location of inference data for data processing step
inference_input_data = ParameterString(
name="InferenceData",
default_value=f's3://{default_bucket}/{prefix}/nlp-pipeline/inf-data',
)
#Specify the Ticker CIK for the pipeline
inference_ticker_cik = ParameterString(
name="InferenceTickerCik",
default_value=ticker,
)
#specify default method for model approval
model_approval_status = ParameterString(
name="ModelApprovalStatus",
default_value="PendingManualApproval"
)
#specify if new model needs to be registered and deployed
model_register_deploy = ParameterString(
name="ModelRegisterDeploy",
default_value="Y"
)
%store
# These are the stored variables, the container is created in the
# previous notebook 01_script-processor-custom-container.ipynb
%pylab inline
%store -r
###Output
_____no_output_____
###Markdown
--- Preparing SEC dataset Before we dive right into setting up the pipeline, let's take a look at how the SageMaker Jumpstart Industry SDK for Financial language model helps obtain the dataset from SEC forms and what are the features available for us to use. **Note:** The code cells in this section are completely optional and for information purposes only; we will use the SageMaker JumpStart Industry SDK directly in the pipeline. Let's install the required dependencies first. Install the SageMaker JumpStart Industry SDKThe functionality is delivered through a client-side SDK. The first step requires pip installing a Python package that interacts with a SageMaker processing container. The retrieval, parsing, transforming, and scoring of text is a complex process and uses different algorithms and packages. In order to make this seamless and stable for the user, the functionality is packaged into a SageMaker container. This lifts the load of installation and maintenance of the workflow, reducing the user effort down to a pip install followed by a single API call.
###Code
!pip install --no-index smjsindustry==1.0.0
###Output
_____no_output_____
###Markdown
As an example, we will try to pull AMZN ticker 10k/10q filings from EDGAR and write the data as CSV to S3. Below is the single block of code that contains the API call. The options are all self-explanatory.
###Code
# from smfinance import SECDataSetConfig, DataLoader
from smjsindustry.finance import DataLoader
from smjsindustry.finance.processor_config import EDGARDataSetConfig
###Output
_____no_output_____
###Markdown
The extracted reports will be saved to an S3 bucket for us to review. This code will also be used in the Pipeline to fetch the report for the Ticker or CIK number passed to the SageMaker Pipeline. Executing the following code cell will run a processing job which will fetch the SEC reports from the EDGAR database. Obtain SEC data using the SageMaker JumpStart Industry SDK
###Code
%%time
dataset_config = EDGARDataSetConfig(
tickers_or_ciks=['amzn','goog', '27904', 'FB'], # list of stock tickers or CIKs
form_types=['10-K', '10-Q'], # list of SEC form types
filing_date_start='2019-01-01', # starting filing date
filing_date_end='2020-12-31', # ending filing date
email_as_user_agent='[email protected]') # user agent email
data_loader = DataLoader(
role=sagemaker.get_execution_role(), # loading job execution role
instance_count=1, # instances number, limit varies with instance type
instance_type='ml.c5.2xlarge', # instance type
volume_size_in_gb=30, # size in GB of the EBS volume to use
volume_kms_key=None, # KMS key for the processing volume
output_kms_key=None, # KMS key ID for processing job outputs
max_runtime_in_seconds=None, # timeout in seconds. Default is 24 hours.
sagemaker_session=sagemaker.Session(), # session object
tags=None) # a list of key-value pairs
data_loader.load(
dataset_config,
's3://{}/{}'.format(default_bucket, 'sample-sec-data'), # output s3 prefix (both bucket and folder names are required)
'dataset_10k_10q.csv', # output file name
wait=True,
logs=True)
###Output
_____no_output_____
###Markdown
OutputThe output of the `data_loader` processing job is a `CSV` file. We see the filings for different quarters. The filing date comes within a month of the end date of the reporting period. Both these dates are collected and displayed in the dataframe. The column `text` contains the full text of the report, but the tables are not extracted. The values in the tables in the filings are balance-sheet and income-statement data (numeric/tabular) and are easily available elsewhere as they are reported in numeric databases. The last column of the dataframe comprises the Management Discussion & Analysis section, the column is named `mdna`, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we will use the `mdna` text to derive the sentiment of the overall filing in this example.
###Code
print (f"{default_bucket}/{prefix}/")
s3_client.download_file(default_bucket, '{}/{}'.format(f'sample-sec-data', f'dataset_10k_10q.csv'), f'./data/dataset_10k_10q.csv')
data_frame_10k_10q = pd.read_csv(f'./data/dataset_10k_10q.csv')
data_frame_10k_10q
###Output
_____no_output_____
###Markdown
--- Set Up Your MLOps NLP Pipeline with SageMaker Pipelines Step 1: Data pre-processing - extract SEC data and news about the company Define a processing step to prepare SEC data for inference We will define a processing step to extract 10K and 10Q forms for a specific Organization either using the company [Stock Ticker](https://www.investopedia.com/ask/answers/12/what-is-a-stock-ticker.asp) Symbol or [CIK (Central Index Key)](https://www.sec.gov/edgar/searchedgar/cik.htm) used to lookup reports in SEC's EDGAR System. You can find the company Stock Ticker Symbol to CIK Number mapping [here](https://www.sec.gov/include/ticker.txt). This step will also collect news article snippets related to the company using the NewsCatcher API. **Important**:It is recommended to use CIKs as the input. The tickers will be internally converted to CIKs according to the [mapping file](https://www.sec.gov/include/ticker.txt). One ticker may map to multiple CIKs, but we only support the latest ticker to CIK mapping. Please provide the old CIKs in the input when you want historical filings. Also note that even though the Client side SDK allows you to download multiple SEC reports for multiple CIKs at a time, we will set up our data preprocessing step to grab exactly 1 SEC Report for 1 CIK (Company/Organization).
###Code
'''
we used store magic in the previous note book script-processor-custom-container.ipynb
to instantiate the container in the region of choice
'''
CONTAINER_IMAGE_URI
loader_instance_type = "ml.c5.2xlarge"
create_dataset_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
###Output
_____no_output_____
###Markdown
Create a processing step to process the SEC data for inference:
###Code
create_dataset_script_uri = f's3://{default_bucket}/{prefix}/code/data-processing.py'
s3_client.upload_file(Filename='./scripts/data-processing.py', Bucket=default_bucket, Key=f'{prefix}/code/data-processing.py')
create_dataset_step = ProcessingStep(
name='HFSECFinBertCreateDataset',
processor=create_dataset_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='report_data',
source='/opt/ml/processing/output/10k10q',
destination=f'{inference_input_data}/10k10q'),
sagemaker.processing.ProcessingOutput(output_name='article_data',
source='/opt/ml/processing/output/articles',
destination=f'{inference_input_data}/articles')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--instance-type", loader_instance_type,
"--region", region,
"--bucket", default_bucket,
"--prefix", prefix,
"--role", role],
code=create_dataset_script_uri)
###Output
_____no_output_____
###Markdown
Step 2: Create models for summarization and sentiment analysis
###Code
sentiment_model_name="HFSECFinbertModel"
summarization_model_name="HFSECPegasusModel"
###Output
_____no_output_____
###Markdown
Create the `finBert` model for Sentiment Analysis
###Code
# Download pre-trained model using HuggingFaceModel class
from sagemaker.huggingface import HuggingFaceModel
hub = {
'HF_MODEL_ID':'ProsusAI/finbert',
'HF_TASK':'text-classification'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
sentiment_huggingface_model = HuggingFaceModel(
name=sentiment_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
inputs = sagemaker.inputs.CreateModelInput(
instance_type="ml.m4.xlarge"
)
create_sentiment_model_step = CreateModelStep(
name="HFSECFinBertCreateModel",
model=sentiment_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Create the Pegasus summarization model
###Code
hub = {
'HF_MODEL_ID':'human-centered-summarization/financial-summarization-pegasus',
'HF_TASK':'summarization'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
summary_huggingface_model = HuggingFaceModel(
name=summarization_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
create_summary_model_step = CreateModelStep(
name="HFSECPegasusCreateModel",
model=summary_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Step 3: Register modelUse HuggingFace register method to register Hugging Face Model for deployment. Set up step as a custom processing step
###Code
sentiment_model_package_group_name = "HuggingFaceSECSentimentModelPackageGroup"
summary_model_package_group_name = "HuggingFaceSECSummaryModelPackageGroup"
model_approval_status = "Approved"
register_sentiment_model_step = RegisterModel(
name="HFSECFinBertRegisterModel",
model = sentiment_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = sentiment_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECFinBertCreateModel']
)
register_summary_model_step = RegisterModel(
name="HFSECPegasusRegisterModel",
model = summary_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = summary_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECPegasusCreateModel']
)
###Output
_____no_output_____
###Markdown
Step 4: Deploy model We deploy the FinBert and Pegasus models from the model registry. **NOTE:** The models in the model registry are the pre-trained version from HuggingFace Model Hub. Each of the deployment step will attempt to deploy a SageMaker Endpoint with the model and will write a property file upon successful completion. The Pipeline will make use of these property files to decide whether to execute the subsequent summarization and sentiment analysis inference steps.
###Code
deploy_model_instance_type = "ml.m4.4xlarge"
deploy_model_instance_count = "1"
sentiment_endpoint_name = "HFSECFinBertModel-endpoint"
summarization_endpoint_name = "HFSECPegasusModel-endpoint"
%store -r
print (f"using ecr container in {CONTAINER_IMAGE_URI}")
s3_client.upload_file(Filename='./scripts/model_deploy_v2.py', Bucket=default_bucket, Key=f'{prefix}/code/model_deploy_v2.py')
deploy_model_script_uri = f's3://{default_bucket}/{prefix}/code/model_deploy_v2.py'
deploy_model_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
sentiment_deploy_response = PropertyFile(
name="SentimentPropertyFile",
output_name="sentiment_deploy_response",
path="success.json" # the property file generated by the script
)
sentiment_deploy_step = ProcessingStep(
name='HFSECFinBertDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/sentimentResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", sentiment_endpoint_name,
"--model-package-group-name", sentiment_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[sentiment_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECFinBertRegisterModel'])
summary_deploy_response = PropertyFile(
name="SummaryPropertyFile",
output_name="summary_deploy_response",
path="success.json" # the property file generated by the script
)
summary_deploy_step = ProcessingStep(
name='HFSECPegasusDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='summary_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/summaryResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", summarization_endpoint_name,
"--model-package-group-name", summary_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[summary_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECPegasusRegisterModel'])
###Output
_____no_output_____
###Markdown
Create pipeline conditions to check if the Endpoint deployments were successfulWe will define a condition that checks to see if our model deployment was successful based on the property files generated by the deployment steps of both the FinBert and Pegasus Models. If both the conditions evaluates to `True` then we will run or subsequent inferences for Summarization and Sentiment analysis.
###Code
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
from sagemaker.workflow.functions import JsonGet
sentiment_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECFinBertDeployModel", #the step from which the property file will be grabbed
property_file=sentiment_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summary_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECPegasusDeployModel", #the step from which the property file will be grabbed
property_file=summary_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
deploy_condition_step = ConditionStep(
name="HFSECFinBertDeployConditionCheck",
conditions=[sentiment_condition_eq,summary_condition_eq], #the equal to conditions defined above
if_steps=[summarize_step_2], #if the condition evaluates to true then run the summarization step
else_steps=[], #there are no else steps so we will keep it empty
depends_on=['HFSECFinBertDeployModel','HFSECPegasusDeployModel'] #dependencies on both Finbert and Pegasus Deployment steps
)
###Output
_____no_output_____
###Markdown
Step 5: Summarize SEC report step This step is to make use of the Pegasus Summarizer model endpoint to summarize the MDNA text from the SEC report. Because the MDNA text is usually large, we want to derive a short summary of the overall text to be able to determine the overall sentiment.
###Code
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
summarize_script_uri = f's3://{default_bucket}/{prefix}/code/summarize.py'
s3_client.upload_file(Filename='./scripts/summarize.py', Bucket=default_bucket, Key=f'{prefix}/code/summarize.py')
summarize_step_1 = ProcessingStep(
name='HFSECPegasusSummarizer_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
###Output
_____no_output_____
###Markdown
Step 6: Sentiment inference step - SEC summary and news articles This step uses the MDNA summary (determined by the previous step) and the news articles to find out the sentiment of the company's financial and what the Market trends are indicating. This would help us understand the overall position of the company's financial outlook and current position without leaning solely on the company's forward-looking statements and bring objective market opinions into the picture.
###Code
sentiment_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
sentiment_script_uri = f's3://{default_bucket}/{prefix}/code/sentiment.py'
s3_client.upload_file(Filename='./scripts/sentiment.py', Bucket=default_bucket, Key=f'{prefix}/code/sentiment.py')
sentiment_step_1 = ProcessingStep(
name='HFSECFinBertSentiment_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_1"])
sentiment_step_2 = ProcessingStep(
name='HFSECFinBertSentiment_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_2"])
###Output
_____no_output_____
###Markdown
Condition Step As explained earlier, this is a top level condition step. This step will determine based on the value of the pipeline parameter `model_register_deploy` on whether we want to register and deploy a new version of the models and then run inference, or to simply run inference using the existing endpoints.
###Code
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
condition_eq = ConditionEquals(
left=model_register_deploy,
right="Y"
)
#Define the condition step
condition_step = ConditionStep(
name="HFSECFinBertConditionCheck",
conditions=[condition_eq], #the parameter is Y
if_steps=[
create_sentiment_model_step,
register_sentiment_model_step,
sentiment_deploy_step,
create_summary_model_step,
register_summary_model_step,
summary_deploy_step
], # if the condition evaluates to true then create model, register, and deploy
else_steps=[summarize_step_1],
depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Combine Pipeline steps and run
###Code
pipeline_name = 'FinbertSECDeploymentPipeline'
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
model_register_deploy,
inference_ticker_cik,
inference_input_data],
steps=[
create_dataset_step,
condition_step,
deploy_condition_step,
sentiment_step_1,
sentiment_step_2
],
)
pipeline.upsert(role_arn=role)
%%time
start_response = pipeline.start()
start_response.wait(delay=60, max_attempts=200)
start_response.describe()
###Output
_____no_output_____
###Markdown
The following image shows a successful execution of the NLP end-to-end Pipeline. --- View Evaluation ResultsOnce the pipeline execution completes, we can download the evaluation data from S3 and view it.
###Code
s3_client.download_file(default_bucket, f'{prefix}/nlp-pipeline/inf-data/sentiment/{ticker}_sentiment_result.csv', f'./data/{ticker}_sentiment_result.csv')
sentiment_df = pd.read_csv(f'./data/{ticker}_sentiment_result.csv')
sentiment_df
###Output
_____no_output_____
###Markdown
--- Clean up Delete the SageMaker Pipeline and the SageMaker Endpoints created by the pipeline.
###Code
def clean_up_resources():
pipeline.delete()
sagemaker_boto_client.delete_endpoint(EndpointName=sentiment_endpoint_name)
sagemaker_boto_client.delete_endpoint(EndpointName=summarization_endpoint_name)
###Output
_____no_output_____
###Markdown
Understanding Trends in Company Valuation with NLP - Part 2: NLP Company Earnings Analysis Pipeline Introduction Orchestrating company earnings trend analysis, using SEC filings, news sentiment with the Hugging Face transformers, and Amazon SageMaker PipelinesIn this notebook, we demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We will derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers for the downstream NLP tasks of Summarization (e.g., of the news and SEC MDNA sections) and Sentiment Analysis (of the resulting summaries). --- Using SageMaker Pipelines Amazon SageMaker Pipelines is the first purpose-built, easy-to-use continuous integration and continuous delivery (CI/CD) service for machine learning (ML). With SageMaker Pipelines, you can create, automate, and manage end-to-end ML workflows at scale. Orchestrating workflows across each step of the machine learning process (e.g. exploring and preparing data, experimenting with different algorithms and parameters, training and tuning models, and deploying models to production) can take months of coding.Since it is purpose-built for machine learning, SageMaker Pipelines helps you automate different steps of the ML workflow, including data loading, data transformation, training and tuning, and deployment. With SageMaker Pipelines, you can build dozens of ML models a week, manage massive volumes of data, thousands of training experiments, and hundreds of different model versions. You can share and re-use workflows to recreate or optimize models, helping you scale ML throughout your organization. --- Understanding trends in company valuation (or similar) with NLP**Natural language processing (NLP)** is a subfield of linguistics, computer science, and artificial intelligence concerned with the interactions between computers and human language, in particular how to program computers to process and analyze large amounts of natural language data. The goal is a computer capable of "understanding" the contents of documents, including the contextual nuances of the language within them. The technology can then accurately extract information and insights contained in the documents as well as categorize and organize the documents themselves. (Source: [Wikipedia](https://en.wikipedia.org/wiki/Natural_language_processing))We are going to demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We are also going to derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers with summarization and sentiment analysis downstream tasks.* Summarization of financial text from SEC reports and news articles will be done via [Pegasus for Financial Summarization model](https://huggingface.co/human-centered-summarization/financial-summarization-pegasus) based on the paper [Towards Human-Centered Summarization: A Case Study on Financial News](https://aclanthology.org/2021.hcinlp-1.4/). * Sentiment analysis on summarized SEC financial report and news articles will be done via pre-trained NLP model to analyze sentiment of financial text called [FinBERT](https://huggingface.co/ProsusAI/finbert). Paper: [ FinBERT: Financial Sentiment Analysis with Pre-trained Language Models](https://arxiv.org/abs/1908.10063)--- SEC DatasetThe starting point for a vast amount of financial NLP is text in SEC filings. The SEC requires companies to report different types of information related to various events involving companies. The full list of SEC forms is here: https://www.sec.gov/forms.SEC filings are widely used by financial services companies as a source of information about companies in order to make trading, lending, investment, and risk management decisions. Because these filings are required by regulation, they are of high quality and veracity. They contain forward-looking information that helps with forecasts and are written with a view to the future, required by regulation. In addition, in recent times, the value of historical time-series data has degraded, since economies have been structurally transformed by trade wars, pandemics, and political upheavals. Therefore, text as a source of forward-looking information has been increasing in relevance. Obtain the dataset using the SageMaker JumpStart Industry Python SDKDownloading SEC filings is done from the SEC's Electronic Data Gathering, Analysis, and Retrieval (EDGAR) website, which provides open data access. EDGAR is the primary system under the U.S. Securities And Exchange Commission (SEC) for companies and others submitting documents under the Securities Act of 1933, the Securities Exchange Act of 1934, the Trust Indenture Act of 1939, and the Investment Company Act of 1940. EDGAR contains millions of company and individual filings. The system processes about 3,000 filings per day, serves up 3,000 terabytes of data to the public annually, and accommodates 40,000 new filers per year on average.There are several ways to download the data, and some open source packages available to extract the text from these filings. However, these require extensive programming and are not always easy-to-use. We provide a simple one-API call that will create a dataset in a few lines of code, for any period of time and for numerous tickers.We have wrapped the extraction functionality into a SageMaker processing container and provide this notebook to enable users to download a dataset of filings with metadata such as dates and parsed plain text that can then be used for machine learning using other SageMaker tools. This is included in the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for financial language models. Users only need to specify a date range and a list of ticker symbols, and the library will take care of the rest.As of now, the solution supports extracting a popular subset of SEC forms in plain text (excluding tables): 10-K, 10-Q, 8-K, 497, 497K, S-3ASR, and N-1A. For each of these, we provide examples throughout this notebook and a brief description of each form. For the 10-K and 10-Q forms, filed every year or quarter, we also extract the Management Discussion and Analysis (MDNA) section, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we provide this section automatically in a separate column of the dataframe alongside the full text of the filing.The extracted dataframe is written to S3 storage and to the local notebook instance. --- News articles related to the stock symbol -- datasetWe will use the MIT Licensed [NewsCatcher API](https://docs.newscatcherapi.com/) to grab top 4-5 articles about the specific organization using filters, however other sources such as Social media feeds, RSS Feeds can also be used. The first step in the pipeline is to fetch the SEC report from the EDGAR database using the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for Financial language models. This library provides us an easy to use functionality to obtain either one or multiple SEC reports for one or more Ticker symbols or CIKs. The ticker or CIK number will be passed to the SageMaker Pipeline using Pipeline parameter `inference_ticker_cik`. For demo purposes of this Pipeline we will focus on a single Ticker/CIK number at a time and the MDNA section of the 10-K form. The first processing will extract the MDNA from the 10-K form for a company and will also gather few news articles related to the company from the NewsCatcher API. This data will ultimately be used for summarization and then finally sentiment analysis. --- MLOps for NLP using SageMaker PipelinesWe will set up the following SageMaker Pipeline. The Pipleline has two flows depending on what the value for `model_register_deploy` Pipeline parameter is set to. If the value is set to `Y` we want the pipeline to register the model and deploy the latest version of the model from the model registry to the SageMaker endpoint. If the value is set to `N` then we simply want to run inferences using the FinBert and the Pegasus models using the Ticker symbol (or CIK number) that is passed to the pipeline using the `inference_ticker_cik` Pipeline parameter. Note: You must execute the script-processor-custom-container.ipynb notebook before you can set up the SageMaker Pipeline. This notebook creates a custom Docker image and registers it in Amazon Elastic Container Registry (Amazon ECR) for the pipeline to use. The image contains of all the dependencies required. --- Set Up SageMaker Project Install and import packages
###Code
# Install updated version of SageMaker
# !pip install -q sagemaker==2.49
!pip install sagemaker --upgrade
!pip install transformers
!pip install typing
!pip install sentencepiece
!pip install fiscalyear
#Install SageMaker Jumpstart Industry
!pip install smjsindustry
###Output
_____no_output_____
###Markdown
NOTE: After installing an updated version of SageMaker and PyTorch, save the notebook and then restart your kernel.
###Code
import boto3
import botocore
import pandas as pd
import sagemaker
print(f'SageMaker version: {sagemaker.__version__}')
from sagemaker.huggingface import HuggingFace
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.steps import CreateModelStep
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.steps import ProcessingStep
from sagemaker.workflow.steps import TransformStep
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.parameters import (ParameterInteger, ParameterString)
from sagemaker.sklearn.processing import ScriptProcessor
from sagemaker.lambda_helper import Lambda
from sagemaker.workflow.lambda_step import (
LambdaStep,
LambdaOutput,
LambdaOutputTypeEnum,
)
###Output
_____no_output_____
###Markdown
Define parameters that you'll use throughout the notebook
###Code
s3 = boto3.resource("s3")
region = boto3.Session().region_name
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
sagemaker_role = role
default_bucket = sagemaker_session.default_bucket()
prefix = 'nlp-e2e-mlops'
s3_client = boto3.client('s3', region_name=region)
sagemaker_boto_client = boto3.client("sagemaker", region_name=region)
#deploy_model_instance_type = "ml.m4.8xlarge"
deploy_model_instance_type = "ml.m4.xlarge"
inference_instances=["ml.t2.medium", "ml.m5.xlarge", "ml.m5.2xlarge", "ml.m5.4xlarge", "ml.m5.12xlarge"]
transform_instances=["ml.m5.xlarge"]
PROCESSING_INSTANCE="ml.m4.4xlarge"
ticker='AMZN'
%store -r
print(f's3://{default_bucket}/{prefix}/code/model_deploy.py')
print(f'SageMaker Role: {role}')
###Output
_____no_output_____
###Markdown
Define parameters to parametrize Pipeline ExecutionUsing SageMaker Pipelines, we can define the steps to be included in a pipeline but then use parameters to modify that pipeline when we go to execute the pipeline, without having to modify the pipeline definition. We'll provide some default parameter values that can be overridden on pipeline execution.
###Code
#Define some default parameters:
#specify default number of instances for processing step
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
#specify default instance type for processing step
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value=PROCESSING_INSTANCE
)
#specify location of inference data for data processing step
inference_input_data = ParameterString(
name="InferenceData",
default_value=f's3://{default_bucket}/{prefix}/nlp-pipeline/inf-data',
)
#Specify the Ticker CIK for the pipeline
inference_ticker_cik = ParameterString(
name="InferenceTickerCik",
default_value=ticker,
)
#specify default method for model approval
model_approval_status = ParameterString(
name="ModelApprovalStatus",
default_value="PendingManualApproval"
)
#specify if new model needs to be registered and deployed
model_register_deploy = ParameterString(
name="ModelRegisterDeploy",
default_value="Y"
)
%store
# These are the stored variables, the container is created in the
# previous notebook 01_script-processor-custom-container.ipynb
%pylab inline
%store -r
###Output
_____no_output_____
###Markdown
--- Preparing SEC dataset Before we dive right into setting up the pipeline, let's take a look at how the SageMaker Jumpstart Industry SDK for Financial language model helps obtain the dataset from SEC forms and what are the features available for us to use. **Note:** The code cells in this section are completely optional and for information purposes only; we will use the SageMaker JumpStart Industry SDK directly in the pipeline. Let's install the required dependencies first. Install the SageMaker JumpStart Industry SDKThe functionality is delivered through a client-side SDK. The first step requires pip installing a Python package that interacts with a SageMaker processing container. The retrieval, parsing, transforming, and scoring of text is a complex process and uses different algorithms and packages. In order to make this seamless and stable for the user, the functionality is packaged into a SageMaker container. This lifts the load of installation and maintenance of the workflow, reducing the user effort down to a pip install followed by a single API call.
###Code
!pip install --no-index smjsindustry==1.0.0
###Output
_____no_output_____
###Markdown
As an example, we will try to pull AMZN ticker 10k/10q filings from EDGAR and write the data as CSV to S3. Below is the single block of code that contains the API call. The options are all self-explanatory.
###Code
# from smfinance import SECDataSetConfig, DataLoader
from smjsindustry.finance import DataLoader
from smjsindustry.finance.processor_config import EDGARDataSetConfig
###Output
_____no_output_____
###Markdown
The extracted reports will be saved to an S3 bucket for us to review. This code will also be used in the Pipeline to fetch the report for the Ticker or CIK number passed to the SageMaker Pipeline. Executing the following code cell will run a processing job which will fetch the SEC reports from the EDGAR database. Obtain SEC data using the SageMaker JumpStart Industry SDK
###Code
%%time
dataset_config = EDGARDataSetConfig(
tickers_or_ciks=['amzn','goog', '27904', 'FB'], # list of stock tickers or CIKs
form_types=['10-K', '10-Q'], # list of SEC form types
filing_date_start='2019-01-01', # starting filing date
filing_date_end='2020-12-31', # ending filing date
email_as_user_agent='[email protected]') # user agent email
data_loader = DataLoader(
role=sagemaker.get_execution_role(), # loading job execution role
instance_count=1, # instances number, limit varies with instance type
instance_type='ml.c5.2xlarge', # instance type
volume_size_in_gb=30, # size in GB of the EBS volume to use
volume_kms_key=None, # KMS key for the processing volume
output_kms_key=None, # KMS key ID for processing job outputs
max_runtime_in_seconds=None, # timeout in seconds. Default is 24 hours.
sagemaker_session=sagemaker.Session(), # session object
tags=None) # a list of key-value pairs
data_loader.load(
dataset_config,
's3://{}/{}'.format(default_bucket, 'sample-sec-data'), # output s3 prefix (both bucket and folder names are required)
'dataset_10k_10q.csv', # output file name
wait=True,
logs=True)
###Output
_____no_output_____
###Markdown
OutputThe output of the `data_loader` processing job is a `CSV` file. We see the filings for different quarters. The filing date comes within a month of the end date of the reporting period. Both these dates are collected and displayed in the dataframe. The column `text` contains the full text of the report, but the tables are not extracted. The values in the tables in the filings are balance-sheet and income-statement data (numeric/tabular) and are easily available elsewhere as they are reported in numeric databases. The last column of the dataframe comprises the Management Discussion & Analysis section, the column is named `mdna`, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we will use the `mdna` text to derive the sentiment of the overall filing in this example.
###Code
!mkdir data
print (f"{default_bucket}/{prefix}/")
s3_client.download_file(default_bucket, '{}/{}'.format(f'sample-sec-data', f'dataset_10k_10q.csv'), f'./data/dataset_10k_10q.csv')
data_frame_10k_10q = pd.read_csv(f'./data/dataset_10k_10q.csv')
data_frame_10k_10q
###Output
_____no_output_____
###Markdown
--- Set Up Your MLOps NLP Pipeline with SageMaker Pipelines Step 1: Data pre-processing - extract SEC data and news about the company Define a processing step to prepare SEC data for inference We will define a processing step to extract 10K and 10Q forms for a specific Organization either using the company [Stock Ticker](https://www.investopedia.com/ask/answers/12/what-is-a-stock-ticker.asp) Symbol or [CIK (Central Index Key)](https://www.sec.gov/edgar/searchedgar/cik.htm) used to lookup reports in SEC's EDGAR System. You can find the company Stock Ticker Symbol to CIK Number mapping [here](https://www.sec.gov/include/ticker.txt). This step will also collect news article snippets related to the company using the NewsCatcher API. **Important**:It is recommended to use CIKs as the input. The tickers will be internally converted to CIKs according to the [mapping file](https://www.sec.gov/include/ticker.txt). One ticker may map to multiple CIKs, but we only support the latest ticker to CIK mapping. Please provide the old CIKs in the input when you want historical filings. Also note that even though the Client side SDK allows you to download multiple SEC reports for multiple CIKs at a time, we will set up our data preprocessing step to grab exactly 1 SEC Report for 1 CIK (Company/Organization).
###Code
'''
we used store magic in the previous note book script-processor-custom-container.ipynb
to instantiate the container in the region of choice
'''
CONTAINER_IMAGE_URI
loader_instance_type = "ml.c5.2xlarge"
create_dataset_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
###Output
_____no_output_____
###Markdown
Create a processing step to process the SEC data for inference:
###Code
create_dataset_script_uri = f's3://{default_bucket}/{prefix}/code/data-processing.py'
s3_client.upload_file(Filename='./scripts/data-processing.py', Bucket=default_bucket, Key=f'{prefix}/code/data-processing.py')
create_dataset_step = ProcessingStep(
name='HFSECFinBertCreateDataset',
processor=create_dataset_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='report_data',
source='/opt/ml/processing/output/10k10q',
destination=f'{inference_input_data}/10k10q'),
sagemaker.processing.ProcessingOutput(output_name='article_data',
source='/opt/ml/processing/output/articles',
destination=f'{inference_input_data}/articles')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--instance-type", loader_instance_type,
"--region", region,
"--bucket", default_bucket,
"--prefix", prefix,
"--role", role],
code=create_dataset_script_uri)
###Output
_____no_output_____
###Markdown
Step 2: Create models for summarization and sentiment analysis
###Code
sentiment_model_name="HFSECFinbertModel"
summarization_model_name="HFSECPegasusModel"
###Output
_____no_output_____
###Markdown
Create the `finBert` model for Sentiment Analysis
###Code
# Download pre-trained model using HuggingFaceModel class
from sagemaker.huggingface import HuggingFaceModel
hub = {
'HF_MODEL_ID':'ProsusAI/finbert',
'HF_TASK':'text-classification'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
sentiment_huggingface_model = HuggingFaceModel(
name=sentiment_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
inputs = sagemaker.inputs.CreateModelInput(
instance_type="ml.m4.xlarge"
)
create_sentiment_model_step = CreateModelStep(
name="HFSECFinBertCreateModel",
model=sentiment_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Create the Pegasus summarization model
###Code
hub = {
'HF_MODEL_ID':'human-centered-summarization/financial-summarization-pegasus',
'HF_TASK':'summarization'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
summary_huggingface_model = HuggingFaceModel(
name=summarization_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
create_summary_model_step = CreateModelStep(
name="HFSECPegasusCreateModel",
model=summary_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Step 3: Register modelUse HuggingFace register method to register Hugging Face Model for deployment. Set up step as a custom processing step
###Code
sentiment_model_package_group_name = "HuggingFaceSECSentimentModelPackageGroup"
summary_model_package_group_name = "HuggingFaceSECSummaryModelPackageGroup"
model_approval_status = "Approved"
register_sentiment_model_step = RegisterModel(
name="HFSECFinBertRegisterModel",
model = sentiment_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = sentiment_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECFinBertCreateModel']
)
register_summary_model_step = RegisterModel(
name="HFSECPegasusRegisterModel",
model = summary_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = summary_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECPegasusCreateModel']
)
###Output
_____no_output_____
###Markdown
Step 4: Deploy model We deploy the FinBert and Pegasus models from the model registry. **NOTE:** The models in the model registry are the pre-trained version from HuggingFace Model Hub. Each of the deployment step will attempt to deploy a SageMaker Endpoint with the model and will write a property file upon successful completion. The Pipeline will make use of these property files to decide whether to execute the subsequent summarization and sentiment analysis inference steps.
###Code
deploy_model_instance_type = "ml.m4.4xlarge"
deploy_model_instance_count = "1"
sentiment_endpoint_name = "HFSECFinBertModel-endpoint"
summarization_endpoint_name = "HFSECPegasusModel-endpoint"
%store -r
print (f"using ecr container in {CONTAINER_IMAGE_URI}")
s3_client.upload_file(Filename='./scripts/model_deploy_v2.py', Bucket=default_bucket, Key=f'{prefix}/code/model_deploy_v2.py')
deploy_model_script_uri = f's3://{default_bucket}/{prefix}/code/model_deploy_v2.py'
deploy_model_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
sentiment_deploy_response = PropertyFile(
name="SentimentPropertyFile",
output_name="sentiment_deploy_response",
path="success.json" # the property file generated by the script
)
sentiment_deploy_step = ProcessingStep(
name='HFSECFinBertDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/sentimentResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", sentiment_endpoint_name,
"--model-package-group-name", sentiment_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[sentiment_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECFinBertRegisterModel'])
summary_deploy_response = PropertyFile(
name="SummaryPropertyFile",
output_name="summary_deploy_response",
path="success.json" # the property file generated by the script
)
summary_deploy_step = ProcessingStep(
name='HFSECPegasusDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='summary_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/summaryResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", summarization_endpoint_name,
"--model-package-group-name", summary_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[summary_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECPegasusRegisterModel'])
###Output
_____no_output_____
###Markdown
Create pipeline conditions to check if the Endpoint deployments were successfulWe will define a condition that checks to see if our model deployment was successful based on the property files generated by the deployment steps of both the FinBert and Pegasus Models. If both the conditions evaluates to `True` then we will run or subsequent inferences for Summarization and Sentiment analysis.
###Code
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
from sagemaker.workflow.functions import JsonGet
summarize_script_uri = f's3://{default_bucket}/{prefix}/code/summarize.py'
sentiment_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECFinBertDeployModel", #the step from which the property file will be grabbed
property_file=sentiment_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summary_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECPegasusDeployModel", #the step from which the property file will be grabbed
property_file=summary_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
deploy_condition_step = ConditionStep(
name="HFSECFinBertDeployConditionCheck",
conditions=[sentiment_condition_eq,summary_condition_eq], #the equal to conditions defined above
if_steps=[summarize_step_2], #if the condition evaluates to true then run the summarization step
else_steps=[], #there are no else steps so we will keep it empty
depends_on=['HFSECFinBertDeployModel','HFSECPegasusDeployModel'] #dependencies on both Finbert and Pegasus Deployment steps
)
###Output
_____no_output_____
###Markdown
Step 5: Summarize SEC report step This step is to make use of the Pegasus Summarizer model endpoint to summarize the MDNA text from the SEC report. Because the MDNA text is usually large, we want to derive a short summary of the overall text to be able to determine the overall sentiment.
###Code
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
s3_client.upload_file(Filename='./scripts/summarize.py', Bucket=default_bucket, Key=f'{prefix}/code/summarize.py')
summarize_step_1 = ProcessingStep(
name='HFSECPegasusSummarizer_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
###Output
_____no_output_____
###Markdown
Step 6: Sentiment inference step - SEC summary and news articles This step uses the MDNA summary (determined by the previous step) and the news articles to find out the sentiment of the company's financial and what the Market trends are indicating. This would help us understand the overall position of the company's financial outlook and current position without leaning solely on the company's forward-looking statements and bring objective market opinions into the picture.
###Code
sentiment_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
sentiment_script_uri = f's3://{default_bucket}/{prefix}/code/sentiment.py'
s3_client.upload_file(Filename='./scripts/sentiment.py', Bucket=default_bucket, Key=f'{prefix}/code/sentiment.py')
sentiment_step_1 = ProcessingStep(
name='HFSECFinBertSentiment_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_1"])
sentiment_step_2 = ProcessingStep(
name='HFSECFinBertSentiment_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_2"])
###Output
_____no_output_____
###Markdown
Condition Step As explained earlier, this is a top level condition step. This step will determine based on the value of the pipeline parameter `model_register_deploy` on whether we want to register and deploy a new version of the models and then run inference, or to simply run inference using the existing endpoints.
###Code
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
condition_eq = ConditionEquals(
left=model_register_deploy,
right="Y"
)
#Define the condition step
condition_step = ConditionStep(
name="HFSECFinBertConditionCheck",
conditions=[condition_eq], #the parameter is Y
if_steps=[
create_sentiment_model_step,
register_sentiment_model_step,
sentiment_deploy_step,
create_summary_model_step,
register_summary_model_step,
summary_deploy_step
], # if the condition evaluates to true then create model, register, and deploy
else_steps=[summarize_step_1],
depends_on=['HFSECFinBertCreateDataset']
)
###Output
_____no_output_____
###Markdown
Combine Pipeline steps and run
###Code
pipeline_name = 'FinbertSECDeploymentPipeline'
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
model_register_deploy,
inference_ticker_cik,
inference_input_data],
steps=[
create_dataset_step,
condition_step,
deploy_condition_step,
sentiment_step_1,
sentiment_step_2
],
)
pipeline.upsert(role_arn=role)
%%time
start_response = pipeline.start()
start_response.wait(delay=60, max_attempts=200)
start_response.describe()
###Output
_____no_output_____
###Markdown
The following image shows a successful execution of the NLP end-to-end Pipeline. --- View Evaluation ResultsOnce the pipeline execution completes, we can download the evaluation data from S3 and view it.
###Code
s3_client.download_file(default_bucket, f'{prefix}/nlp-pipeline/inf-data/sentiment/{ticker}_sentiment_result.csv', f'./data/{ticker}_sentiment_result.csv')
sentiment_df = pd.read_csv(f'./data/{ticker}_sentiment_result.csv')
sentiment_df
###Output
_____no_output_____
###Markdown
--- Clean up Delete the SageMaker Pipeline and the SageMaker Endpoints created by the pipeline.
###Code
def clean_up_resources():
pipeline.delete()
sagemaker_boto_client.delete_endpoint(EndpointName=sentiment_endpoint_name)
sagemaker_boto_client.delete_endpoint(EndpointName=summarization_endpoint_name)
###Output
_____no_output_____ |
VizuGraphe/Algos_Graphes_en_NSI/algos_graphes_en_nsi.ipynb | ###Markdown
Ce notebook nรฉcessite de disposer du logiciel [graphviz](https://graphviz.org/download/) ainsi que du module graphviz pour python (`pip install graphviz`). Installation de graphviz sous Windows (mรฉthode 1)- On se rendra sur la page dรฉdiรฉe : [Downloads](https://graphviz.org/download/) de Graphviz. - Normalement le lien pour l'installeur de la version stable (parfois rompu lors des MAJ des dรฉpรดts) vous conduit sur cette page : - (https://www2.graphviz.org/Packages/stable/windows/). Il faudra ensuite suivre les liens pour parvenir ร - (https://www2.graphviz.org/Packages/stable/windows/10/cmake/Release/)- Lors de l'installation du logiciel, demandez bien ร actualiser le PATH Windows.- Une fois le logiciel installรฉ, il vous faut installer le module permettant ร Python de communiquer avec le logiciel Graphviz : - `pip install graphviz` Installation de graphviz sous Windows (mรฉthode 2)- Si ce qui est indiquรฉ sur la page [Downloads](https://graphviz.org/download/) de Graphviz conduit au dรฉpรดt Github (cela arrive parfois lorsque Graphviz effectue une MAJ sur ses serveus de dรฉpรดt) on prรฉfรฉrera l'installeur .msi de la version 2.38 qui est toujours disponible ici : **[installeur MSI pour Windows](https://graphviz.gitlab.io/_pages/Download/Download_windows.html)** - Une fois l'installation du logiciel effectuรฉe, on ajoutera alors graphviz au Path Windows (pour que python puisse le trouver) manuellement car la 2.38 ne le propose pas lors de l'installation : - Paramรจtres > Propriรฉtรฉs Systรจme > Variables d'environnement > Variables systรจme > Path > Modifier > Nouveau - copier le chemin vers l'installation de graphviz (sans doute "C:\Program Files (x86)\Graphviz2.38\bin") - Il suffira ensuite d'installer le module permettant ร Python de communiquer avec le logiciel Graphviz : - `pip install graphviz`
###Code
from random import random
from xile import Pile, File
from generateurs import maillage, soleil, etoiles_reliees
from vizu_graphe import VizuGraphe #nรฉcessite graphviz (pip install graphviz)
###Output
_____no_output_____
###Markdown
Parcours de graphes & Plus courts chemins et Dรฉtections de cycles Les dรฉtails techniques sont donnรฉs ci-dessous dans le code. On retiendra que si l'algorithme de parcours rรฉcursif et l'algorithme de parcours itรฉratif 1 s'adaptent facilement pour dรฉtecter les cycles, il n'en est pas de mรชme pour le parcours itรฉratif 2. En rรฉsumรฉ, et avec les prรฉcautions d'implรฉmentation dissรฉminรฉes dans ce notebook :``` largeur profondeur cycles plus court chemin recursif N O O N iteratif 1 O O O (L/P) N iteratif 2 O O N O (L)```**Remarque 1:**Ici il s'agit de recherche de cycles sur des graphes **non-orientรฉs**.**Remarque 2:**La recherche de cycles sur un graphe orientรฉ ne peut pas se baser sur le parcours en profondeur ou en largeur. Il faut utiliser d'autres algorithmes (par exemple ici : http://perso.ens-lyon.fr/eric.thierry/Graphes2007/adrien-panhaleux.pdf)
###Code
##############################
# explorer = "explorer les successeurs"
# visiter = "faire ce qu'il y a ร faire avec le sommet" (ici relever la date de visite)
# marquer = "empecher le sommet d'รชtre ultรฉrieurement rajoutรฉ ร la xile"
# ici on marque et visite en mรชme temps
######################### algorithme rรฉcursif ############################
# Pour le rรฉcursif, la variante oรน l'on teste si le sommet
# est marquรฉ en dรฉbut de fonction (avant #*) et pas avant l'appel de fonction #**
# est contraignante pour la dรฉcouverte de cycle
#
# En effet pour les cycles, en cas de successeur dรฉjร marquรฉ en #** , on a besoin de savoir s'il
# s'agit d'un faux-cycle "A -> B -> A". On utilisera donc (pour les dรฉtections de cycles)
# une fonction dfs modifiรฉe comme suit : dfs(sommet, predecesseur)
#
# En mettant le test au niveau de #**, on aura accรจs pour le test ร
# la fois ร prรฉdรฉcesseur et successeur --> facile de tester "A -> B -> A"
#
# Dans le cas de la variante avec le test en #*, on n'aurait pas accรจs ร prรฉdรฉcesseur et successeur
# et cela imposerait d'utiliser la visite pour stocker le prรฉdรฉcesseur du prรฉdรฉcesseur ce qui serait
# possible mais moins รฉlรฉgant
def parcours_recursif(graphe, depart):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter la date de visite du sommet.
On renvoie un dictionnaire {sommet : date_de_visite for sommet in graphe}
'''
dates = { 'en_cours':0, 'historique':dict() }
def visiter(sommet):
dates['historique'][sommet] = dates['en_cours']
dates['en_cours'] = dates['en_cours'] + 1
def dfs(sommet):
dejavu[sommet] = True #*
visiter(sommet)
for successeur in graphe[sommet]:
if successeur not in dejavu.keys(): #**
dfs(successeur)
dejavu = dict()
dfs(depart)
return dates['historique']
####################### algorithmes itรฉratifs ############################
# Il y a deux versions.
# 1 :
# - un sommet vient d'รชtre dรฉcouvert depuis un(des) prรฉdecesseur(s)
# - si pas marquรฉ, on enxile (รฉventuellement plusieurs fois, depuis des prรฉdecesseurs diffรฉrents)
# - plus tard on dรฉxile,
# - on marque/visite (si non dรฉjร marquรฉ auparavant)
# - on explore ses successeurs
#
# 2 :
# - un sommet vient d'รชtre dรฉcouvert depuis un prรฉdecesseur
# - si pas marquรฉ on marque/visite
# - si pas marquรฉ, on enxile (une seule fois donc)
# - plus tard on dรฉxile
# - on explore les successeurs
#
# Dans le 2 un sommet ne peut รชtre mis q'une fois dans la xile
# car il est marquรฉ avant d'y รชtre introduit.
#
# Dans 1 ce n'est plus vrai : un sommet peut รชtre mis plusieurs fois dans la xile
# Cela se produit si :
# - un premier prรฉdecesseur a mis S dans la pile
# - le parcours a continuรฉ sans dรฉpiler S
# - un second prรฉdรฉcesseur a remis S dans la pile
# - (puisque S n'a pas รฉtรฉ dรฉpilรฉ, on ne peut pas รชtre dans le cas du demi-tour)
#
# 1 et 2 donnent les mรชmes dates de visite en largeur
# mais pour le parcours en profondeur 2 ratisse plus large, c'est
# ร dire moins profond.
def parcours_1(graphe, depart, larg_ou_prof):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
- larg_ou_prof : 'prof' : en profondeur, 'larg' en largeur.
La visite consiste ร noter la date de visite du sommet
On renvoie un dictionnaire {sommet : date_de_visite for sommet in graphe}
'''
dates = { 'en_cours':0, 'historique':dict() }
def visiter(sommet):
dates['historique'][sommet] = dates['en_cours']
dates['en_cours'] = dates['en_cours'] + 1
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
a_explorer.ajouter(depart) #<sss
deja_vu = dict()
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
if sommet not in deja_vu.keys(): #<---------- # n'est pas en trop mรชme si on a le if plus
deja_vu[sommet] = True # bas en effet un sommet peut รชtre enxilรฉ
visiter(sommet) # plusieurs fois avant dรฉxilage
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys(): #<-------- # nรฉcessaire si on veut รฉtendre
a_explorer.ajouter(successeur) # ร la recherche de cycle
return dates['historique']
def parcours_2(graphe, depart, larg_ou_prof):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter la date de visite du sommet.
On renvoie un dictionnaire {sommet : date_de_visite for sommet in graphe}
'''
dates = { 'en_cours':0, 'historique':dict() }
def visiter(sommet):
dates['historique'][sommet] = dates['en_cours']
dates['en_cours'] = dates['en_cours'] + 1
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
deja_vu = dict()
deja_vu[depart] = True #<sss
visiter(depart) #<sss
a_explorer.ajouter(depart)
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys():
deja_vu[successeur] = True
visiter(successeur)
a_explorer.ajouter(successeur)
return dates['historique']
###Output
_____no_output_____
###Markdown
Une fois qu'on a les dates de visite du parcours en profondeur, on peut facilement rajouter ces informations sur le graphe reprรฉsentรฉ grรขce ร l'attribut `etiquettes_secondaires`. On peut ensuite "convertir" ce dictionnaire des dates de parcours en dictionnaire de couleurs `(H, S, V)` avec `H, S` et `V` de type `float` entre 0 et 1. Et demander ร reprรฉsenter le graphe avec ces couleurs.
###Code
from vizu_graphe import VizuGraphe
ma_liste_adjacence = maillage(11, 11, 0.65)
dates = parcours_1(ma_liste_adjacence, 'CC', 'larg')
date_max = max(dates.values())
dates_en_couleur = dict()
for key, val in dates.items():
dates_en_couleur[key] = (val/date_max, 0.6, 1)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence,
etiquettes_secondaires = dates,
couleurs = dates_en_couleur)
###Output
_____no_output_____
###Markdown
Pour un graphe en รฉtoile :
###Code
ma_liste_adjacence = etoiles_reliees('M')
dates = parcours_1(ma_liste_adjacence, 0, 'larg')
date_max = max(dates.values())
dates_en_couleur = dict()
for key, val in dates.items():
dates_en_couleur[key] = (val/date_max, 0.6, 1)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence,
etiquettes_secondaires = dates,
couleurs = dates_en_couleur,
moteur = 'circo')
###Output
_____no_output_____
###Markdown
Pour un graphe en soleil :
###Code
ma_liste_adjacence = soleil(10, 5)
dates = parcours_1(ma_liste_adjacence, 0, 'prof')
date_max = max(dates.values())
dates_en_couleur = dict()
for key, val in dates.items():
dates_en_couleur[key] = (val/date_max, 0.6, 1)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence,
etiquettes_secondaires = dates,
couleurs = dates_en_couleur)
###Output
_____no_output_____
###Markdown
Applications Plus courts chemins On se base sur le parcours en largeur. Il va s'agir de modifier la visite pour passer d'une date calculรฉe comme suit :- date du dernier sommet visitรฉ + 1 ร un calcul comme suit :- date du prรฉdรฉcesseur lors du parcours + 1Il faut avoir compris que l'avant-dernier sommet visitรฉ n'est en gรฉnรฉral pas le prรฉdรฉcesseur (pendant le parcours) du dernier sommet en cours de visite (ร cause de la xile qui fait tampon). Pour cela, un bref regard sur les algorithmes nous montre que le parcours 2 est plus adaptรฉ que le 1 puisque lors de l'appel au marquage/visite on a accรจs ร la fois au sommet et ร son prรฉdรฉcesseur : en passant les deux ร la fonction visite, on doit pouvoir s'en sortir en modifiant ร la marge le dictionnaire `dates` que l'on renommera en `distances` pour l'occasion.A contrario, le parcours 1 ne permet pas cet accรจs lors de la visite puisque dans l'algorithme, encore une fois, ce qui est รฉcrit comme `successeur` en fin de boucle n'est PAS le `sommet` en dรฉbut de tour de boucle suivant (toujours ร cause du rรดle tampon de la xile).
###Code
def parcours_2_bis(graphe, depart, larg_ou_prof):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter la distance depuis le dรฉpart.
On renvoie un dictionnaire {sommet : distance depuis le dรฉpart for sommet in graphe}
'''
distances = dict()
def visiter(successeur, sommet): #<sss
distances[successeur] = distances[sommet] + 1
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
deja_vu = dict()
deja_vu[depart] = True
distances[depart] = 0 #<sss
a_explorer.ajouter(depart)
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys():
deja_vu[successeur] = True
visiter(successeur, sommet) #<sss
a_explorer.ajouter(successeur)
return distances
ma_liste_adjacence = maillage(11, 11, 0.55) #mettre 'CC' par exemple en dรฉpart
#ma_liste_adjacence = soleil(10, 5) #mettre 0 par exemple en dรฉpart
#ma_liste_adjacence = etoiles_reliees('M') #mettre 0 par exemple en dรฉpart
distances = parcours_2_bis(ma_liste_adjacence, 'CC', 'larg')
distance_max = max(distances.values())
distances_en_couleur = dict()
for key, val in distances.items():
distances_en_couleur[key] = (val/distance_max, 0.6, 1)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence,
etiquettes_secondaires = distances,
couleurs = distances_en_couleur)
###Output
_____no_output_____
###Markdown
Plus court chemin d'un sommet au dรฉpart En modifiant trรจs lรฉgรจrement le code ci-dessus on va pouvoir s'en sortir. Lors de la visite, on peut directement stocker le prรฉdรฉcesseur ร la place de la distance. Ainsi, en "remontant" les prรฉdรฉcesseurs, on pourra reconstituer le chemin qui a abouti ร un sommet donnรฉ depuis le dรฉpart.On a donc deux modifications ร opรฉrer :- modifier lรฉgรจrement le code de la visite pour qu'elle donne le dictionnaire des prรฉdรฉcesseurs- ajouter une fonction de post-traitement capable de reconstituer un chemin `depart --x--x--x--x--> sommet` ร partir du dictionnaire des paires `sommet:predecesseur` (ou `successeur:sommet` selon le point de vue) obtenus lors du parcours.**Attention :** la clef est bien le successeur qui a un unique prรฉdรฉcesseur lors du parcours (alors que l'unictรฉ est perdue dans l'autre sens).**Question :** si sommet est mutable (plateau de jeu), a-t-on des effets de bord ?
###Code
def parcours_2_ter(graphe, depart, larg_ou_prof):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter le prรฉdรฉcesseur du sommet dans le parcours en largeur.
On renvoie un dictionnaire {sommet : predecesseur for sommet in graphe}
'''
parents = dict()
def visiter(successeur, sommet): #<sss
parents[successeur] = sommet
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
deja_vu = dict()
deja_vu[depart] = True
parents[depart] = None #<sss
a_explorer.ajouter(depart)
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys():
deja_vu[successeur] = True
visiter(successeur, sommet) #<sss
a_explorer.ajouter(successeur)
return parents
# on donne le chemin sous forme d'un dictionnaire avec les distances
# depuis le dรฉpart pour pouvoir l'afficher ensuite
# on pourrait faire plus lรฉger mais c'est l'occasion d'utiliser une pile
def donner_chemin(depart, sommet, parents):
if sommet not in parents.keys():
return dict()
chemin_inverse = Pile()
chemin_inverse.ajouter(sommet)
longueur = 0
while parents[sommet] != None:
sommet = parents[sommet]
chemin_inverse.ajouter(sommet)
longueur = longueur + 1
longueur_chemin = longueur
chemin = dict()
while longueur>=0:
sommet = chemin_inverse.extraire()
chemin[sommet] = longueur_chemin - longueur
longueur = longueur - 1
return chemin
ma_liste_adjacence = maillage(11, 11, 0.55) #mettre 'CC' par exemple en dรฉpart
#ma_liste_adjacence = soleil(10, 5) #mettre 0 par exemple en dรฉpart
#ma_liste_adjacence = etoiles_reliees('M') #mettre 0 par exemple en dรฉpart
parents = parcours_2_ter(ma_liste_adjacence, 'CC', 'larg')
chemin = donner_chemin('CC', 'JJ', parents)
chemin_en_couleur = dict()
for key in chemin.keys():
chemin_en_couleur[key] = (0.7, 0.3, 1)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence,
etiquettes_secondaires = chemin,
couleurs = chemin_en_couleur)
###Output
_____no_output_____
###Markdown
Dรฉcouverte de cycles (uniquement pour les graphes non orientรฉs - limitรฉ ร une composante connexe)L'idรฉe fondamentale est que, si l'on si prend bien (c'est ร dire en rรฉussissant ร รฉcarter *simplement* le cas du cycle "A --> B --> A"), le fait d'explorer un sommet dรฉjร marquรฉ signifie que l'on a rรฉussi ร accรฉder ร un sommet depuis deux chemins diffรฉrents qui partent du dรฉpart. On a donc nรฉcessairement un cycle.Si on regarde en dรฉtail ce qui suit, on s'aperรงoit que contrairement au cas des plus courts chemins, cette fois-ci ce sont les algorithmes rรฉcursifs et itรฉratifs nยฐ1 qui sont les plus adaptรฉs (alors que l'itรฉratif nยฐ2 demanderait des adaptations peu รฉlรฉgantes).
###Code
#############################################################################
## Recherche de cycles en rรฉcursif
#############################################################################
def parcours_recursif_bis(graphe, depart):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter la date de visite du sommet.
On renvoie :
- un boolรฉen indiquant si le graphe possรจde un cycle
- un dictionnaire {sommet : date_de_visite for sommet in {sommets parcourus}}
'''
dates = { 'en_cours':0, 'historique':dict() }
def visiter(sommet):
dates['historique'][sommet] = dates['en_cours']
dates['en_cours'] = dates['en_cours'] + 1
def dfs(sommet, predecesseur):
dejavu[sommet] = True
visiter(sommet)
for successeur in graphe[sommet]:
if successeur in dejavu.keys() :
if predecesseur != successeur: #<sss # pas un demi-tour --> on a un cycle
return True
else:
if dfs(successeur, sommet): #<sss # cycle plus haut dans pile d'appels
return True # --> on sort
return False
dejavu = dict()
return dfs(depart, None), dates['historique']
##############################################################################
## recherche de cycle sur parcours 1
##############################################################################
#
# Le cas du cycle "sommetA --> sommetB --> sommetA" - possible en rรฉcursif -
# est ici exclu puisque si un successeur (B) est mis dans la xile, son parent (A)
# dans le parcours est dรฉjร marquรฉ donc ne pourra pas รชtre empilรฉ en tant
# que successeur de successeur de lui mรชme.
#
#------------------------------------
# renvoie True => il existe un cycle
#------------------------------------
# On renvoie True si un sommet sort de la pile et a dรฉjร รฉtรฉ marquรฉ
# Cela se produit si on arrive ร la situation oรน un sommet est
# passรฉ deux fois dans la xile.
#
# Cela n'est possible que si la xile contient *ร un instant donnรฉ* deux fois
# le mรชme sommet. En effet, une fois dรฉxilรฉ/marquรฉ une premiรจre fois un sommet ne peut
# plus รชtre rรฉintroduit dans la xile (d'oรน l'importance du if en bas d'algo).
# C'est donc que le second exemplaire a รฉtรฉ rรฉintroduit dans la xile AVANT
# la sortie du premier exemplaire
#
# Cette situation se produit uniquement si :
# - un premier prรฉdecesseur a mis S dans la xile
# - le parcours a continuรฉ sans dรฉxiler S
# - un second prรฉdรฉcesseur a remis S dans la xile
# - (puisque S n'a pas รฉtรฉ dรฉxilรฉ, on ne peut pas รชtre dans le cas du demi-tour)
# Donc uniquement si S a au moins deux prรฉdรฉcesseurs diffรฉrents
# ce qui signifie qu'on a un cycle (car on est dans le cas d'un graphe non orientรฉ)
#
# Remarque : sans le if additionnel, on pourrait visiter un sommet S, visiter un de ses successeurs
# et revisiter S (cas du cycle "A -> B -> A") => entrainerait une dรฉtection de cycle erronรฉe
#--------------------------------------
# Il existe un cycle => renvoie True
#--------------------------------------
# Puisque tous les sommets du cycle finiront par รชtre visitรฉs
# (sauf si la fonction renvoie True avant)
# notons Sb le dernier sommet du cycle ร รชtre marquรฉ.
# *Juste avant* de sortir de la xile pour รชtre marquรฉ, les deux sommets adjacents ร Sb
# (notons les Sa et Sc) dans le cycle ont dรฉjร รฉtรฉ marquรฉs
# (puisque Sb est par dรฉfinition le dernier ร l'รชtre).
# Ces deux sommets Sa et Sc ont donc chacun envoyรฉ un exemplaire de Sb dans la xile
# Juste avant de sortir de la xile pour รชtre marquรฉ, Sb se trouve donc en double
# exemplaire dans la xile
# ---> renvoie True
def parcours_1_bis(graphe, depart, larg_ou_prof):
'''
- graphe liste ou matrice d'adjacence
- depart : sommet de depart
La visite consiste ร noter la date de visite du sommet.
On renvoie :
- un boolรฉen indiquant si le graphe possรจde un cycle
- un dictionnaire {sommet : date_de_visite for sommet in {sommets parcourus}}
'''
dates = { 'en_cours':0, 'historique':dict() }
def visiter(sommet):
dates['historique'][sommet] = dates['en_cours']
dates['en_cours'] = dates['en_cours'] + 1
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
a_explorer.ajouter(depart)
deja_vu = dict()
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
if sommet not in deja_vu.keys():
deja_vu[sommet] = True
visiter(sommet)
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys():
a_explorer.ajouter(successeur)
else:
return True, dates['historique'] #<sss
return False, dates['historique'] #<sss
################################
# On regardera avec profit ce qui se passe sur le parcours 1 dans cette configuration
# pour pouvoir comparer avec la mรฉthode 2 (qui elle sera mise en dรฉfaut)
#
# *--*--*
# /
# >>--*--x
# \
# O--*--*
#
# si on parcourt de la gauche vers la droite
# d'abord vers le haut, quand on arrivera
# sur O il n'y aura pas de dรฉtection de cycle "x -> O -> x"
######################################################################
## recherche de cycle sur parcours 2 : Ne fonctionne pas
## sauf ร implรฉmenter une visite avec mรฉmorisation du prรฉdรฉcesseur
######################################################################
#
# La xile casse le lien prรฉdecesseur --> suivant
# Lien de filiation dont on peut avoir besoin pour dรฉtecter les cycle "A -> B -> A"
# Alors qu'en rรฉcursif il est facile d'avoir ce lien puisque c'est le lien
# appelant ---> appelรฉ
# Donc en rรฉcursif, dรฉtection de cycle "sommetA --> sommetB --> sommetA"
# facilement รฉcartรฉe.
#
# La dรฉtection du cycle "A -> B -> A" est รฉgalement exclue en mรฉthode 1 puisque
# si un successeur (B) est mis dans la xile, son parent (A) dans le parcours
# est dรฉjร marquรฉ donc ne pourra pas รชtre enxilรฉ par (B).
# (Si (A) est une seconde fois dans la xile, c'est en tant que successeur d'un
# sommet (C). C'est donc qu'il y a un cycle).
def parcours_2_quattro(graphe, depart, larg_ou_prof):
def visiter(sommet):
pass
a_explorer = Pile() if larg_ou_prof == 'prof' else File()
deja_vu = dict()
deja_vu[depart] = True #<sss
visiter(depart) #<sss
a_explorer.ajouter(depart)
while not a_explorer.est_vide():
sommet = a_explorer.extraire()
for successeur in graphe[sommet]:
if successeur not in deja_vu.keys():
deja_vu[successeur] = True
visiter(successeur)
a_explorer.ajouter(successeur)
else: #<-----------------<####
return True #
return False #
#
#
##########################################
# Comment savoir qu'on est pas dans le cas du cycle "A -> B -> A" ?
# garder en mรฉmoire le dernier sommet visitรฉ n'est pas
# suffisant puisqu'on fait parfois des sauts en arriรจre
# oรน le dernier sommet visitรฉ n'est pas le prรฉdรฉcesseur
# (dans le parcours) du sommet sur lequel on est
# *--*--D
# /
# >>--*--x
# \
# O--*--*
# sur cet exemple, si on parcourt de la gauche vers la droite
# d'abord vers le haut, que se passera-t-il quand on arrivera
# sur O ? Comment รฉviter de dรฉtecter le cycle "x -> O -> x" ?
# Alors que quand on arrive sur O, c'est D qui a รฉtรฉ visitรฉ en dernier (pas x) ?
# Cette difficultรฉ doit nous pousser ร privilรฉgier le parcours 1
# pour la recherche de cycle.
from vizu_graphe import VizuGraphe
ma_liste_adjacence = maillage(3, 3, 0.45)
existe_cycle, cycle = parcours_1_bis(ma_liste_adjacence, 'BC', 'larg')
print(existe_cycle)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence, etiquettes_secondaires = cycle)
from vizu_graphe import VizuGraphe
ma_liste_adjacence = maillage(5, 5, 0.45)
existe_cycle, cycle = parcours_recursif_bis(ma_liste_adjacence, 'BC')
cycle_en_couleur = dict()
for key in cycle.keys():
cycle_en_couleur[key] = (0.8, 0.6, 1)
print(existe_cycle)
mon_vizualisateur = VizuGraphe('liste', ma_liste_adjacence, etiquettes_secondaires = cycle,
couleurs = cycle_en_couleur)
###Output
_____no_output_____ |
0_Aleutian_Enumeration_Example.ipynb | ###Markdown
This is to demonstrate how to use the `s1-enumerator` to get a full time series of GUNWs.We are going basically take each month in acceptable date range and increment by a month and make sure the temporal window is large enough to ensure connectivity across data gaps.
###Code
%load_ext autoreload
%autoreload 2
from s1_enumerator import get_aoi_dataframe, distill_all_pairs, enumerate_ifgs, get_s1_coverage_tiles, enumerate_ifgs_from_stack, get_s1_stack_by_dataframe
import concurrent
from s1_enumerator import duplicate_gunw_found
from tqdm import tqdm
from shapely.geometry import Point
import datetime
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from dateutil.relativedelta import relativedelta
import networkx as nx
import boto3
df_aoi = gpd.read_file('aois/Aleutians_pathNumber124.geojson')
aoi = df_aoi.geometry.unary_union
aoi
df_aoi
###Output
_____no_output_____
###Markdown
Currently, there is a lot of data in each of the rows above. We really only need the AOI `geometry` and the `path_number`. ParametersThis is what the operator is going to have to change. Will provide some comments.
###Code
today = datetime.datetime.now()
# Earliest year for reference frames
START_YEAR = 2016
# Latest year for reference frames
END_YEAR = today.year
# Adjust depending on seasonality
# For annual IFGs, select a single months of interest and you will get what you want.
MONTHS_OF_INTEREST = [6, 7, 8, 9, 10]
###Output
_____no_output_____
###Markdown
Below, we select the boundary for minimum reference dates.
###Code
min_reference_dates = [datetime.datetime(year, month, 1)
for year in range(START_YEAR, END_YEAR + 1)
for month in MONTHS_OF_INTEREST
if datetime.datetime(year, month, 1) < today]
min_reference_dates = list(reversed(min_reference_dates))
min_reference_dates[:3]
###Output
_____no_output_____
###Markdown
The maximum secondary dates for enumeration are going to be a month after the previous `min_reference_date`. What we are going to do is: *select 3 SLCs before and after the 1st of the month*. When we reach the earliest month of interest in a given year, we select secondaries occuring before the latest occuring month of interest in the previous calendar year (at the end of that month). Here the number of neighnbors (i.e. 3) is arbitrary and can be changed below. We found this parameter useful to making sure the graph was connected. So, don't change it unless you are careful.In the setup above and below, we are thus picking three dates occuring before one item of the `max_secondary_dates` and three dates after one item of `min_reference_dates`.
###Code
max_secondary_dates = [ref_date + relativedelta(months=1) for ref_date in min_reference_dates]
max_secondary_dates = max_secondary_dates[1:] + min_reference_dates[-1:]
max_secondary_dates[:3]
for ref, sec in zip(min_reference_dates[:6], max_secondary_dates):
print('############################')
print(ref.date(), ' to ', sec.date())
print('or: days backward', (ref - sec).days)
path_numbers = df_aoi.path_number.unique().tolist()
path_numbers
###Output
_____no_output_____
###Markdown
Generate tiles used for coverageThis function just does a geometric query based on each frame and using one pass from the start date.
###Code
df_coverage_tiles = get_s1_coverage_tiles(aoi,
# the date is used to get coverage tiles for extracting stack.
# Recent data has reliable coverage.
start_date=datetime.datetime(2021, 1, 1))
## Uncomment if you want to inspect the dataframe
# df_coverage_tiles.to_file('coverage_tiles.geojson', driver='GeoJSON', index=False)
fig, ax = plt.subplots()
df_coverage_tiles.plot(ax=ax, alpha=.5, color='green', label='Frames interesecting tile')
df_aoi.exterior.plot(color='black', ax=ax, label='AOI')
plt.legend()
###Output
_____no_output_____
###Markdown
For the Aleutians above, note there is:1. Above the coverage was 12 days + 1 after January 1 2021. It will used to generate an entire collection of frames below.2. the track we are interested doesn't "jiggle" above the AOI because of the restriction to 12 days + 1 (see below when we make the stack, for more frames "jiggling").3. the other tracks have some more "jiggle" or "seams" partially because of how the frames fell during this limited time period.For larger frames, piecemealing the queries ensures that 1000 product limit that ASF imposes will no prevent us from collecting all the data we need over the AOI. Generate a stackUsing all the tiles that are needed to cover the AOI we make a geometric query based on the frame. We now include only the path we are interested in.
###Code
df_stack = get_s1_stack_by_dataframe(df_coverage_tiles,
path_numbers=path_numbers)
f'We have {df_stack.shape[0]} frames in our stack'
fig, ax = plt.subplots()
df_stack.plot(ax=ax, alpha=.5, color='green', label='Frames interesecting tile')
df_aoi.exterior.plot(color='black', ax=ax, label='AOI')
plt.legend()
###Output
_____no_output_____
###Markdown
Note, we now see the frames cover the entire AOI as we expect. Next, we filter the stack by month to ensure we only have SLCs we need.
###Code
df_stack_month = df_stack[df_stack.start_date.dt.month.isin(MONTHS_OF_INTEREST)]
df_stack_month.shape
ifg_pairs = []
for min_ref_date, max_sec_date in zip(tqdm(min_reference_dates), (max_secondary_dates)):
# Mostly this is 0 unless we get to our earliest month of interest in the calendar year
min_days_backward = (min_ref_date - max_sec_date).days
# because of s1 availability/gaps, we just make this huge so that we can look up slcs in the stack
# occuring in previous years - the gaps can cause temporal jumps that you have to be mindful of
# That's why we have neighbors being 3.
temporal_window_days=364 * 3
temp = enumerate_ifgs_from_stack(df_stack_month,
aoi,
min_ref_date,
# options are 'tile' and 'path'
# 'path' processes multiple references simultaneously
enumeration_type='tile',
min_days_backward=min_days_backward,
num_neighbors_ref=3,
num_neighbors_sec=3,
temporal_window_days=temporal_window_days,
)
ifg_pairs += temp
f'The number of GUNWs (likely lots of duplicates) is {len(ifg_pairs)}'
###Output
_____no_output_____
###Markdown
Get Dataframe
###Code
df_pairs = distill_all_pairs(ifg_pairs)
df_pairs.head()
f"# of GUNWs: ' {df_pairs.shape[0]}"
###Output
_____no_output_____
###Markdown
Deduplication Pt. 1A `GUNW` is uniquely determined by the reference and secondary IDs. We contanenate these sorted lists and generate a lossy hash to deduplicate products we may have introduced from the enumeration above.
###Code
import hashlib
import json
def get_gunw_hash_id(reference_ids: list, secondary_ids: list) -> str:
all_ids = json.dumps([' '.join(sorted(reference_ids)),
' '.join(sorted(secondary_ids))
]).encode('utf8')
hash_id = hashlib.md5(all_ids).hexdigest()
return hash_id
def hasher(row):
return get_gunw_hash_id(row['reference'], row['secondary'])
df_pairs['hash_id'] = df_pairs.apply(hasher, axis=1)
df_pairs.head()
f"# of duplicated entries: {df_pairs.duplicated(subset=['hash_id']).sum()}"
df_pairs = df_pairs.drop_duplicates(subset=['hash_id']).reset_index(drop=True)
f"# of UNIQUE GUNWs: {df_pairs.shape[0]}"
###Output
_____no_output_____
###Markdown
Viewing GUNW pairs
###Code
# start index
M = 0
# number of pairs to view
N = 5
for J in range(M, M + N):
pair = ifg_pairs[J]
fig, axs = plt.subplots(1, 2, sharey=True, sharex=True)
df_ref_plot = pair['reference']
df_sec_plot = pair['secondary']
df_ref_plot.plot(column='start_date_str',
legend=True,
ax=axs[0], alpha=.15)
df_aoi.exterior.plot(ax=axs[0], alpha=.5, color='black')
axs[0].set_title('Reference')
df_sec_plot.plot(column='start_date_str',
legend=True,
ax=axs[1], alpha=.15)
df_aoi.exterior.plot(ax=axs[1], alpha=.5, color='black')
axs[0].set_title(f'Reference {J}')
axs[1].set_title('Secondary')
###Output
_____no_output_____
###Markdown
Update types for Graphical AnalysisWe want to do some basic visualization to support the understanding if we traverse time correctly. We do some simple standard pandas manipulation.
###Code
df_pairs['reference_date'] = pd.to_datetime(df_pairs['reference_date'])
df_pairs['secondary_date'] = pd.to_datetime(df_pairs['secondary_date'])
df_pairs.head()
###Output
_____no_output_____
###Markdown
Visualize a Date Graph from Time SeriesWe can put this into a network Directed Graph and use some simple network functions to check connectivity.We are going to use just dates for nodes, though you could use `(ref_date, hash_id)` for nodes and then inspect connected components. That is for another notebook.
###Code
list(zip(df_pairs.reference_date, df_pairs.secondary_date))[:15]
unique_dates = df_pairs.reference_date.tolist() + df_pairs.secondary_date.tolist()
unique_dates = sorted(list(set(unique_dates)))
unique_dates[:4]
date2node = {date: k for (k, date) in enumerate(unique_dates)}
node2date = {k: date for (date, k) in date2node.items()}
G = nx.DiGraph()
edges = [(date2node[ref_date], date2node[sec_date])
for (ref_date, sec_date) in zip(df_pairs.reference_date, df_pairs.secondary_date)]
G.add_edges_from(edges)
nx.draw(G)
###Output
_____no_output_____
###Markdown
This function checks there is a path from the first date to the last one.
###Code
nx.has_path(G,
target=date2node[unique_dates[0]],
source=date2node[unique_dates[-1]])
###Output
_____no_output_____
###Markdown
The y-axis is created purely for display so doesn't really indicated anything but flow by month.
###Code
fig, ax = plt.subplots(figsize=(15, 5))
increment = [date.month + date.day for date in unique_dates]
# source: https://stackoverflow.com/a/27852570
scat = ax.scatter(unique_dates, increment)
position = scat.get_offsets().data
pos = {date2node[date]: position[k] for (k, date) in enumerate(unique_dates)}
nx.draw_networkx_edges(G, pos=pos, ax=ax)
ax.grid('on')
ax.tick_params(axis='x',
which='major',
labelbottom=True,
labelleft=True)
ymin, ymax = ax.get_ylim()
for y in range(2015, 2022):
label = 'Oct to Nov' if y == 2016 else None
ax.fill_between([datetime.datetime(y, 6, 1), datetime.datetime(y, 11, 1)],
ymin, ymax,
alpha=.5, color='green', zorder=0, label=label)
plt.legend()
###Output
_____no_output_____
###Markdown
Observe there is a gap in 2018 over are area of interest. This is where our 3 year "temporal_window_days" parameter in our enumeration was essential. Deduplication Pt. 2This is to ensure that previous processing hasn't generate any of the products we have just enumerated. Check CMRThis function checks the ASF DAAC if there are GUNWs with the same spatial extent and same date pairs as the ones created. At some point, we will be able to check the input SLC ids from CMR, but currently that is not possible.If you are processing a new AOI whose products have not been delivered, you can ignore this step. It is a bit time consuming as the queries are done product by product.
###Code
from s1_enumerator import duplicate_gunw_found
import concurrent
from tqdm import tqdm
n = df_pairs.shape[0]
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:
results = list(tqdm(executor.map(duplicate_gunw_found, df_pairs.to_dict('records')), total=n))
df_pairs['existing_gunw'] = [r != '' for r in results]
df_pairs['existing_gunw_id'] = results
total_existing_gunws = df_pairs['existing_gunw'].sum()
print('existing_gunws: ', total_existing_gunws)
print('Total pairs', df_pairs.shape[0])
df_pairs_filtered = df_pairs[~df_pairs['existing_gunw']].reset_index(drop=True)
# df_pairs_filtered.drop_duplicates(subset=['hash_id'], inplace=True)
print('after filtering, total pairs: ', df_pairs_filtered.shape[0])
###Output
after filtering, total pairs: 140
###Markdown
Check Hyp3 AccountWe are now going to check1. check products in the open s3 bucket2. check running/pending jobsNotes:1. Above, to accomplish step 1., there is some verbose code (see below). Once we automate delivery, this step will be obsolete. However, until we have delivery, we have to make sure that there are no existing products. Additionally, if we are using a separate (non-operational account), then would be good to use this.2. If we are debugging products and some of our previously generated products were made incorrectly, we will want to ignore this step.
###Code
import hyp3_sdk
# uses .netrc; add `prompt=True` to prompt for credentials;
hyp3_isce = hyp3_sdk.HyP3('https://hyp3-isce.asf.alaska.edu/')
pending_jobs = hyp3_isce.find_jobs(status_code='PENDING') + hyp3_isce.find_jobs(status_code='RUNNING')
all_jobs = hyp3_isce.find_jobs()
print(all_jobs)
###Output
416 HyP3 Jobs: 416 succeeded, 0 failed, 0 running, 0 pending.
###Markdown
1. Get existing products in s3 bucket
###Code
job_data = [j.to_dict() for j in all_jobs]
job_data[0]
###Output
_____no_output_____
###Markdown
Get bucket (there is only one)
###Code
job_data_s3 = list(filter(lambda job: 'files' in job.keys(), job_data))
len(job_data_s3)
bucket = job_data_s3[0]['files'][0]['s3']['bucket']
bucket
###Output
_____no_output_____
###Markdown
Get all keys
###Code
job_keys = [job['files'][0]['s3']['key'] for job in job_data_s3]
job_keys[0]
s3 = boto3.resource('s3')
prod_bucket = s3.Bucket(bucket)
objects = list(prod_bucket.objects.all())
ncs = list(filter(lambda x: x.key.endswith('.nc'), objects))
ncs[:10]
###Output
_____no_output_____
###Markdown
Need to physically check if the products are not there (could have been deleted!)
###Code
nc_keys = [nc_ob.key for nc_ob in ncs]
jobs_with_prods_in_s3 = [job for (k, job) in enumerate(job_data_s3) if job_keys[k] in nc_keys]
len(jobs_with_prods_in_s3)
slcs = [(job['job_parameters']['granules'],
job['job_parameters']['secondary_granules'])
for job in jobs_with_prods_in_s3]
slcs[:2]
hash_ids_of_prods_in_s3 = [get_gunw_hash_id(*slc) for slc in slcs]
hash_ids_of_prods_in_s3[0]
f"We are removing {df_pairs_filtered['hash_id'].isin(hash_ids_of_prods_in_s3).sum()} GUNWs for submission"
items = hash_ids_of_prods_in_s3
df_pairs_filtered = df_pairs_filtered[~df_pairs_filtered['hash_id'].isin(items)].reset_index(drop=True)
f"Current # of GUNWs: {df_pairs_filtered.shape[0]}"
###Output
_____no_output_____
###Markdown
2. Running or Pending Jobs
###Code
pending_job_data = [j.to_dict() for j in pending_jobs]
pending_slcs = [(job['job_parameters']['granules'],
job['job_parameters']['secondary_granules'])
for job in pending_job_data]
hash_ids_of_pending_jobs = [get_gunw_hash_id(*slc) for slc in pending_slcs]
hash_ids_of_pending_jobs[:4]
items = hash_ids_of_pending_jobs
f"We are removing {df_pairs_filtered['hash_id'].isin(items).sum()} GUNWs for submission"
items = hash_ids_of_pending_jobs
df_pairs_filtered = df_pairs_filtered[~df_pairs_filtered['hash_id'].isin(items)].reset_index(drop=True)
f"Current # of GUNWs: {df_pairs_filtered.shape[0]}"
###Output
_____no_output_____
###Markdown
Submit jobs to Hyp3
###Code
records_to_submit = df_pairs_filtered.to_dict('records')
records_to_submit[0]
import hyp3_sdk
# uses .netrc; add `prompt=True` to prompt for credentials;
hyp3_isce = hyp3_sdk.HyP3('https://hyp3-isce.asf.alaska.edu/')
###Output
_____no_output_____
###Markdown
The below puts the records in a format that we can submit to the Hyp3 API.**Note 1**: there is an index in the records to submit to ensure we don't over submit jobs for generating GUNWs. \**Note 2**: uncomment the code to *actually* submit the jobs.
###Code
import hyp3_sdk
# uses .netrc; add `prompt=True` to prompt for credentials;
hyp3_isce = hyp3_sdk.HyP3('https://hyp3-isce.asf.alaska.edu/')
job_dicts = [{'name': 'test-aleutian-95-new',
# NOTE: we are still using the `dev` branch. Change this to "INSAR_ISCE" to use the `main` branch.
'job_type': 'INSAR_ISCE_TEST',
'job_parameters': {'granules': r['reference'],
'secondary_granules': r['secondary']}}
# NOTE THERE IS AN INDEX - this is to submit only a subset of Jobs
for r in records_to_submit[:5]]
# UNCOMMENT TO SUBMIT
# submitted_jobs = hyp3_isce.submit_prepared_jobs(job_dicts)
jobs = hyp3_isce.find_jobs()
print(jobs)
###Output
416 HyP3 Jobs: 416 succeeded, 0 failed, 0 running, 0 pending.
###Markdown
Below, we show how to download files. The multi-threading example will download products in parallel much faster than `jobs.download_files()`.
###Code
# import concurrent.futures
# from tqdm import tqdm
# with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# results = list(tqdm(executor.map(lambda job: job.download_files(), jobs), total=len(jobs)))
###Output
_____no_output_____ |
Jud Taylor - reference_regression_classification_1.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 1*--- Regression 1- Begin with baselines for regression- Use scikit-learn to fit a linear regression- Explain the coefficients from a linear regression Brandon Rohrer wrote a good blog post, [โWhat questions can machine learning answer?โ](https://brohrer.github.io/five_questions_data_science_answers.html)Weโll focus on two of these questions in Unit 2. These are both types of โsupervised learning.โ- โHow Much / How Many?โ (Regression)- โIs this A or B?โ (Classification)This unit, youโll build supervised learning models with โtabular dataโ (data in tables, like spreadsheets). Including, but not limited to:- Predict New York City real estate prices <-- **Today, we'll start this!**- Predict which water pumps in Tanzania need repairs- Choose your own labeled, tabular dataset, train a predictive model, and publish a blog post or web app with visualizations to explain your model! SetupRun the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.Libraries:- ipywidgets- pandas- plotly- scikit-learn
###Code
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
###Output
_____no_output_____
###Markdown
Begin with baselines for regression Overview Predict how much a NYC condo costs ๐ ๐ธRegression models output continuous numbers, so we can use regression to answer questions like "How much?" or "How many?" Often, the question is "How much will this cost? How many dollars?" For example, here's a fun YouTube video, which we'll use as our scenario for this lesson:[Amateurs & Experts Guess How Much a NYC Condo With a Private Terrace Costs](https://www.youtube.com/watch?v=JQCctBOgH9I)> Real Estate Agent Leonard Steinberg just sold a pre-war condo in New York City's Tribeca neighborhood. We challenged three people - an apartment renter, an apartment owner and a real estate expert - to try to guess how much the apartment sold for. Leonard reveals more and more details to them as they refine their guesses. The condo from the video is **1,497 square feet**, built in 1852, and is in a desirable neighborhood. According to the real estate agent, _"Tribeca is known to be one of the most expensive ZIP codes in all of the United States of America."_How can we guess what this condo sold for? Let's look at 3 methods:1. Heuristics2. Descriptive Statistics3. Predictive Model Follow Along 1. HeuristicsHeuristics are "rules of thumb" that people use to make decisions and judgments. The video participants discussed their heuristics: **Participant 1**, Chinwe, is a real estate amateur. She rents her apartment in New York City. Her first guess was `8 million, and her final guess was 15 million.[She said](https://youtu.be/JQCctBOgH9I?t=465), _"People just go crazy for numbers like 1852. You say **'pre-war'** to anyone in New York City, they will literally sell a kidney. They will just give you their children."_ **Participant 3**, Pam, is an expert. She runs a real estate blog. Her first guess was 1.55 million, and her final guess was 2.2 million.[She explained](https://youtu.be/JQCctBOgH9I?t=280) her first guess: _"I went with a number that I think is kind of the going rate in the location, and that's **a thousand bucks a square foot.**"_ **Participant 2**, Mubeen, is between the others in his expertise level. He owns his apartment in New York City. His first guess was 1.7 million, and his final guess was also 2.2 million. 2. Descriptive Statistics We can use data to try to do better than these heuristics. How much have other Tribeca condos sold for?Let's answer this question with a relevant dataset, containing most of the single residential unit, elevator apartment condos sold in Tribeca, from January throughย April 2019.We can get descriptive statistics for the dataset's `SALE_PRICE` column.How many condo sales are in this dataset? What was the average sale price? The median? Minimum? Maximum?
###Code
import pandas as pd
df = pd.read_csv(DATA_PATH+'condos/tribeca.csv')
pd.options.display.float_format = '{:,.0f}'.format
df['SALE_PRICE'].describe()
###Output
_____no_output_____
###Markdown
On average, condos in Tribeca have sold for \$3.9 million. So that could be a reasonable first guess.In fact, here's the interesting thing: **we could use this one number as a "prediction", if we didn't have any data except for sales price...** Imagine we didn't have any any other information about condos, then what would you tell somebody? If you had some sales prices like this but you didn't have any of these other columns. If somebody asked you, "How much do you think a condo in Tribeca costs?"You could say, "Well, I've got 90 sales prices here, and I see that on average they cost \$3.9 million."So we do this all the time in the real world. We use descriptive statistics for prediction. And that's not wrong or bad, in fact **that's where you should start. This is called the _mean baseline_.** **Baseline** is an overloaded term, with multiple meanings:1. [**The score you'd get by guessing**](https://twitter.com/koehrsen_will/status/1088863527778111488)2. [**Fast, first models that beat guessing**](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa) 3. **Complete, tuned "simpler" model** (Simpler mathematically, computationally. Or less work for you, the data scientist.)4. **Minimum performance that "matters"** to go to production and benefit your employer and the people you serve.5. **Human-level performance** Baseline type 1 is what we're doing now.(Linear models can be great for 2, 3, 4, and [sometimes even 5 too!](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.188.5825)) ---Let's go back to our mean baseline for Tribeca condos. If we just guessed that every Tribeca condo sold for \$3.9 million, how far off would we be, on average?
###Code
guess = df['SALE_PRICE'].mean()
errors = guess - df['SALE_PRICE']
mean_absolute_error = errors.abs().mean()
print(f'If we just guessed every Tribeca condo sold for ${guess:,.0f},')
print(f'we would be off by ${mean_absolute_error:,.0f} on average.')
###Output
_____no_output_____
###Markdown
That sounds like a lot of error! But fortunately, we can do better than this first baseline โย we can use more data. For example, the condo's size.Could sale price be **dependent** on square feet? To explore this relationship, let's make a scatterplot, using [Plotly Express](https://plot.ly/python/plotly-express/):
###Code
import plotly.express as px
px.scatter(df, x='GROSS_SQUARE_FEET', y='SALE_PRICE')
###Output
_____no_output_____
###Markdown
3. Predictive ModelTo go from a _descriptive_ [scatterplot](https://www.plotly.express/plotly_express/plotly_express.scatter) to a _predictive_ regression, just add a _line of best fit:_
###Code
###Output
_____no_output_____
###Markdown
Roll over the Plotly regression line to see its equation and predictions for sale price, dependent on gross square feet.Linear Regression helps us **interpolate.** For example, in this dataset, there's a gap between 4016 sq ft and 4663 sq ft. There were no 4300 sq ft condos sold, but what price would you predict, using this line of best fit?Linear Regression also helps us **extrapolate.** For example, in this dataset, there were no 6000 sq ft condos sold, but what price would you predict? The line of best fit tries to summarize the relationship between our x variable and y variable in a way that enables us to use the equation for that line to make predictions. **Synonyms for "y variable"**- **Dependent Variable**- Response Variable- Outcome Variable - Predicted Variable- Measured Variable- Explained Variable- **Label**- **Target** **Synonyms for "x variable"**- **Independent Variable**- Explanatory Variable- Regressor- Covariate- Correlate- **Feature** The bolded terminology will be used most often by your instructors this unit. ChallengeIn your assignment, you will practice how to begin with baselines for regression, using a new dataset! Use scikit-learn to fit a linear regression Overview We can use visualization libraries to do simple linear regression ("simple" means there's only one independent variable). But during this unit, we'll usually use the scikit-learn library for predictive models, and we'll usually have multiple independent variables. In [_Python Data Science Handbook,_ Chapter 5.2: Introducing Scikit-Learn](https://jakevdp.github.io/PythonDataScienceHandbook/05.02-introducing-scikit-learn.htmlBasics-of-the-API), Jake VanderPlas explains **how to structure your data** for scikit-learn:> The best way to think about data within Scikit-Learn is in terms of tables of data. >> >>The features matrix is often stored in a variable named `X`. The features matrix is assumed to be two-dimensional, with shape `[n_samples, n_features]`, and is most often contained in a NumPy array or a Pandas `DataFrame`.>>We also generally work with a label or target array, which by convention we will usually call `y`. The target array is usually one dimensional, with length `n_samples`, and is generally contained in a NumPy array or Pandas `Series`. The target array may have continuous numerical values, or discrete classes/labels. >>The target array is the quantity we want to _predict from the data:_ in statistical terms, it is the dependent variable. VanderPlas also lists a **5 step process** for scikit-learn's "Estimator API":> Every machine learning algorithm in Scikit-Learn is implemented via the Estimator API, which provides a consistent interface for a wide range of machine learning applications.>> Most commonly, the steps in using the Scikit-Learn estimator API are as follows:>> 1. Choose a class of model by importing the appropriate estimator class from Scikit-Learn.> 2. Choose model hyperparameters by instantiating this class with desired values.> 3. Arrange data into a features matrix and target vector following the discussion above.> 4. Fit the model to your data by calling the `fit()` method of the model instance.> 5. Apply the Model to new data: For supervised learning, often we predict labels for unknown data using the `predict()` method.Let's try it! Follow AlongFollow the 5 step process, and refer to [Scikit-Learn LinearRegression documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
###Code
# 1. Import the appropriate estimator class from Scikit-Learn
# 2. Instantiate this class
# 3. Arrange X features matrix & y target vector
# 4. Fit the model
# 5. Apply the model to new data
###Output
_____no_output_____
###Markdown
So, we used scikit-learn to fit a linear regression, and predicted the sales price for a 1,497 square foot Tribeca condo, like the one from the video.Now, what did that condo actually sell for? ___The final answer is revealed in [the video at 12:28](https://youtu.be/JQCctBOgH9I?t=748)!___
###Code
###Output
_____no_output_____
###Markdown
What was the error for our prediction, versus the video participants?Let's use [scikit-learn's mean absolute error function](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html).
###Code
chinwe_final_guess = [15000000]
mubeen_final_guess = [2200000]
pam_final_guess = [2200000]
###Output
_____no_output_____
###Markdown
This [diagram](https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/tutorial/text_analytics/general_concepts.htmlsupervised-learning-model-fit-x-y) shows what we just did! Don't worry about understanding it all now. But can you start to match some of these boxes/arrows to the corresponding lines of code from above? Here's [another diagram](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/), which shows how machine learning is a "new programming paradigm":> A machine learning system is "trained" rather than explicitly programmed. It is presented with many "examples" relevant to a task, and it finds statistical structure in these examples which eventually allows the system to come up with rules for automating the task. โ[Francois Chollet](https://livebook.manning.com/book/deep-learning-with-python/chapter-1/) Wait, are we saying that *linear regression* could be considered a *machine learning algorithm*? Maybe it depends? What do you think? We'll discuss throughout this unit. ChallengeIn your assignment, you will use scikit-learn for linear regression with one feature. For a stretch goal, you can do linear regression with two or more features. Explain the coefficients from a linear regression OverviewWhat pattern did the model "learn", about the relationship between square feet & price? Follow Along To help answer this question, we'll look at the `coef_` and `intercept_` attributes of the `LinearRegression` object. (Again, [here's the documentation](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).)
###Code
###Output
_____no_output_____
###Markdown
We can repeatedly apply the model to new/unknown data, and explain the coefficient:
###Code
def predict(square_feet):
y_pred = model.predict([[square_feet]])
estimate = y_pred[0]
coefficient = model.coef_[0]
result = f'${estimate:,.0f} estimated price for {square_feet:,.0f} square foot condo in Tribeca.'
explanation = f'In this linear regression, each additional square foot adds ${coefficient:,.0f}.'
return result + '\n' + explanation
predict(1497)
# What does the model predict for low square footage?
predict(500)
# For high square footage?
predict(10000)
###Output
_____no_output_____ |
tutorials/opf_dcline.ipynb | ###Markdown
DC Line dispatch with pandapower OPFThis is an introduction into the usage of the pandapower optimal power flow with dc lines. Example NetworkWe use the following four bus example network for this tutorial:We first create this network in pandapower:
###Code
import pandapower as pp
from numpy import array
net = pp.create_empty_network()
b1 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b5 = pp.create_bus(net, 380)
l1 = pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
l2 = pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
l3 = pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
dcl1 = pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_kw=0.2e6, loss_percent=1.0,
loss_kw=500, vm_from_pu=1.01, vm_to_pu=1.012, max_p_kw=1e6,
in_service=True)
eg1 = pp.create_ext_grid(net, b1, 1.02, max_p_kw=0.)
eg2 = pp.create_ext_grid(net, b5, 1.02, max_p_kw=0.)
l1 = pp.create_load(net, bus=b4, p_kw=800e3, controllable = False)
###Output
_____no_output_____
###Markdown
We now run a regular load flow to check out the DC line model:
###Code
pp.runpp(net)
###Output
_____no_output_____
###Markdown
The transmission power of the DC line is defined in the loadflow as given by the p_kw parameter, which was set to 0.2 GW:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
The losses amount to 2500 kW, which are made up of 500 kW conversion loss and 200 MW * 0.01 = 2 MW transmission losses. The voltage setpoints defined at from and to bus are complied with. Now lets define costs for the external grids to run an OPF:
###Code
costeg0 = pp.create_polynomial_cost(net, 0, 'ext_grid', array([.1, 0]))
costeg1 = pp.create_polynomial_cost(net, 1, 'ext_grid', array([.08, 0]))
net.bus['max_vm_pu'] = 1.5
net.line['max_loading_percent'] = 1000
pp.runopp(net)
###Output
_____no_output_____
###Markdown
Since we defined lower costs for Ext Grid 2, it fully services the load:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
While the DC line does not transmit any power:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
If we set the costs of the left grid to a lower value than the right grid and run the loadflow again:
###Code
net.polynomial_cost.c.at[costeg0]= array([[0.08, 0]])
net.polynomial_cost.c.at[costeg1]= array([[0.1, 0]])
pp.runopp(net)
###Output
_____no_output_____
###Markdown
We can see that the power now comes from the left ext_grid:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And is transmitted over the DC line:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
We can however see that the lines on the left hand side are now overloaded:
###Code
net.res_line
###Output
_____no_output_____
###Markdown
If we set the maximum line loading to 100% and run the OPF again:
###Code
net.line["max_loading_percent"] = 100
pp.runopp(net)
###Output
_____no_output_____
###Markdown
We can see that the lines are no longer overloaded:
###Code
net.res_line
###Output
_____no_output_____
###Markdown
Because the load is serviced from both grids:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line transmits only part of the power needed to service the load:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Finally, we can also define transmission costs for the DC line:
###Code
costeg1 = pp.create_polynomial_cost(net, 0, 'dcline', array([.03, 0]))
pp.runopp(net)
###Output
_____no_output_____
###Markdown
Because the sum of the costs for generating power on the left hand side (0.08) and transmitting it to the right side (0.03) is now larger than for generating on the right side (0.1), the OPF draws as much power from the right side as is possible without violating line loading constraints:
###Code
net.res_line
net.res_dcline
###Output
_____no_output_____
###Markdown
If we broaden the line loading constraint and run the OPF again:
###Code
net.line["max_loading_percent"] = 1000
pp.runopp(net)
###Output
_____no_output_____
###Markdown
The load is once again fully serviced by the grid on the right hand side:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line is in open loop operation:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
DC Line dispatch with pandapower OPFThis is an introduction into the usage of the pandapower optimal power flow with dc lines. Example NetworkWe use the following four bus example network for this tutorial:We first create this network in pandapower:
###Code
import pandapower as pp
from numpy import array
net = pp.create_empty_network()
b1 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b5 = pp.create_bus(net, 380)
l1 = pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
l2 = pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
l3 = pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
dcl1 = pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_kw=0.2e6, loss_percent=1.0,
loss_kw=500, vm_from_pu=1.01, vm_to_pu=1.012, max_p_kw=1e6,
in_service=True)
eg1 = pp.create_ext_grid(net, b1, 1.02, max_p_kw=0.)
eg2 = pp.create_ext_grid(net, b5, 1.02, max_p_kw=0.)
l1 = pp.create_load(net, bus=b4, p_kw=800e3, controllable = False)
###Output
_____no_output_____
###Markdown
We now run a regular load flow to check out the DC line model:
###Code
pp.runpp(net)
###Output
_____no_output_____
###Markdown
The transmission power of the DC line is defined in the loadflow as given by the p_kw parameter, which was set to 0.2 GW:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
The losses amount to 2500 kW, which are made up of 500 kW conversion loss and 200 MW * 0.01 = 2 MW transmission losses. The voltage setpoints defined at from and to bus are complied with. Now lets define costs for the external grids to run an OPF:
###Code
costeg0 = pp.create_polynomial_cost(net, 0, 'ext_grid', array([-.1, 0]))
costeg1 = pp.create_polynomial_cost(net, 1, 'ext_grid', array([-.08, 0]))
net.bus['max_vm_pu'] = 1.5
net.line['max_loading_percent'] = 1000
pp.runopp(net)
###Output
hp.pandapower.run - WARNING: The OPF cost definition has changed! Please check out the tutorial 'opf_changes-may18.ipynb' or the documentation!
hp.pandapower.run - INFO: These missing columns in ext_grid are considered in OPF as +- 1000 TW.: ['min_p_kw' 'min_q_kvar' 'max_q_kvar']
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Since we defined lower costs for Ext Grid 2, it fully services the load:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
While the DC line does not transmit any power:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
If we set the costs of the left grid to a lower value than the right grid and run the loadflow again:
###Code
net.polynomial_cost.c.at[costeg0]= array([[-0.08, 0]])
net.polynomial_cost.c.at[costeg1]= array([[-0.1, 0]])
pp.runopp(net)
###Output
hp.pandapower.run - WARNING: The OPF cost definition has changed! Please check out the tutorial 'opf_changes-may18.ipynb' or the documentation!
hp.pandapower.run - INFO: These missing columns in ext_grid are considered in OPF as +- 1000 TW.: ['min_p_kw' 'min_q_kvar' 'max_q_kvar']
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the power now comes from the left ext_grid:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And is transmitted over the DC line:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
We can however see that the lines on the left hand side are now overloaded:
###Code
net.res_line
###Output
_____no_output_____
###Markdown
If we set the maximum line loading to 100% and run the OPF again:
###Code
net.line["max_loading_percent"] = 100
pp.runopp(net)
###Output
hp.pandapower.run - WARNING: The OPF cost definition has changed! Please check out the tutorial 'opf_changes-may18.ipynb' or the documentation!
hp.pandapower.run - INFO: These missing columns in ext_grid are considered in OPF as +- 1000 TW.: ['min_p_kw' 'min_q_kvar' 'max_q_kvar']
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the lines are no longer overloaded:
###Code
net.res_line
###Output
_____no_output_____
###Markdown
Because the load is serviced from both grids:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line transmits only part of the power needed to service the load:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Finally, we can also define transmission costs for the DC line:
###Code
costeg1 = pp.create_polynomial_cost(net, 0, 'dcline', array([.03, 0]))
pp.runopp(net)
###Output
hp.pandapower.run - WARNING: The OPF cost definition has changed! Please check out the tutorial 'opf_changes-may18.ipynb' or the documentation!
hp.pandapower.run - INFO: These missing columns in ext_grid are considered in OPF as +- 1000 TW.: ['min_p_kw' 'min_q_kvar' 'max_q_kvar']
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Because the sum of the costs for generating power on the left hand side (0.08) and transmitting it to the right side (0.03) is now larger than for generating on the right side (0.1), the OPF draws as much power from the right side as is possible without violating line loading constraints:
###Code
net.res_line
net.res_dcline
###Output
_____no_output_____
###Markdown
If we broaden the line loading constraint and run the OPF again:
###Code
net.line["max_loading_percent"] = 1000
pp.runopp(net)
###Output
hp.pandapower.run - WARNING: The OPF cost definition has changed! Please check out the tutorial 'opf_changes-may18.ipynb' or the documentation!
hp.pandapower.run - INFO: These missing columns in ext_grid are considered in OPF as +- 1000 TW.: ['min_p_kw' 'min_q_kvar' 'max_q_kvar']
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
The load is once again fully serviced by the grid on the right hand side:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line is in open loop operation:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Little consistency check:
###Code
net.res_ext_grid.p_kw.at[0]*-0.08 + net.res_ext_grid.p_kw.at[1]*-0.1 + net.res_dcline.p_from_kw.at[0]*0.03
net.res_cost
###Output
_____no_output_____
###Markdown
DC Line dispatch with pandapower OPFThis is an introduction into the usage of the pandapower optimal power flow with dc lines. Example NetworkWe use the following four bus example network for this tutorial:We first create this network in pandapower:
###Code
import pandapower as pp
from numpy import array
net = pp.create_empty_network()
b1 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b5 = pp.create_bus(net, 380)
l1 = pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
l2 = pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
l3 = pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
dcl1 = pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_mw=200, loss_percent=1.0,
loss_mw=0.5, vm_from_pu=1.01, vm_to_pu=1.012, max_p_mw=1000,
in_service=True)
eg1 = pp.create_ext_grid(net, b1, 1.02, min_p_mw=0.)
eg2 = pp.create_ext_grid(net, b5, 1.02, min_p_mw=0.)
l1 = pp.create_load(net, bus=b4, p_mw=800, controllable = False)
###Output
_____no_output_____
###Markdown
We now run a regular load flow to check out the DC line model:
###Code
pp.runpp(net)
###Output
_____no_output_____
###Markdown
The transmission power of the DC line is defined in the loadflow as given by the p_kw parameter, which was set to 200 MW:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
The losses amount to 2.5 MW, which are made up of 0.5 MW conversion loss and 200 MW * 0.01 = 2 MW transmission losses. The voltage setpoints defined at from and to bus are complied with. Now lets define costs for the external grids to run an OPF:
###Code
costeg0 = pp.create_poly_cost(net, 0, 'ext_grid', cp1_eur_per_mw=10)
costeg1 = pp.create_poly_cost(net, 1, 'ext_grid', cp1_eur_per_mw=8)
net.bus['max_vm_pu'] = 1.5
net.line['max_loading_percent'] = 1000
pp.runopp(net)
###Output
tazan.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
tazan.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Since we defined lower costs for Ext Grid 2, it fully services the load:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
While the DC line does not transmit any power:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
If we set the costs of the left grid to a lower value than the right grid and run the loadflow again:
###Code
net.poly_cost.cp1_eur_per_mw.at[costeg0] = 8
net.poly_cost.cp1_eur_per_mw.at[costeg1] = 10
pp.runopp(net)
###Output
tazan.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
tazan.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the power now comes from the left ext_grid:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And is transmitted over the DC line:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
We can however see that the lines on the left hand side are now overloaded:
###Code
net.res_line.loading_percent
###Output
_____no_output_____
###Markdown
If we set the maximum line loading to 100% and run the OPF again:
###Code
net.line["max_loading_percent"] = 100
pp.runopp(net)
###Output
tazan.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
tazan.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the lines are no longer overloaded:
###Code
net.res_line.loading_percent
###Output
_____no_output_____
###Markdown
Because the load is serviced from both grids:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line transmits only part of the power needed to service the load:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Finally, we can also define transmission costs for the DC line:
###Code
costeg1 = pp.create_poly_cost(net, 0, 'dcline', cp1_eur_per_mw=3)
pp.runopp(net)
###Output
tazan.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
tazan.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Because the sum of the costs for generating power on the left hand side (0.08) and transmitting it to the right side (0.03) is now larger than for generating on the right side (0.1), the OPF draws as much power from the right side as is possible without violating line loading constraints:
###Code
net.res_line.loading_percent
net.res_dcline
###Output
_____no_output_____
###Markdown
If we broaden the line loading constraint and run the OPF again:
###Code
net.line["max_loading_percent"] = 1000
pp.runopp(net)
###Output
tazan.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
tazan.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
The load is once again fully serviced by the grid on the right hand side:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line is in open loop operation:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Little consistency check:
###Code
net.res_ext_grid.p_mw.at[0]*8 + net.res_ext_grid.p_mw.at[1]*10 + net.res_dcline.p_from_mw.at[0]*3
net.res_cost
###Output
_____no_output_____
###Markdown
DC Line dispatch with pandapower OPFThis is an introduction into the usage of the pandapower optimal power flow with dc lines. Example NetworkWe use the following four bus example network for this tutorial:We first create this network in pandapower:
###Code
import pandapower as pp
from numpy import array
net = pp.create_empty_network()
b1 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b5 = pp.create_bus(net, 380)
l1 = pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
l2 = pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
l3 = pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
dcl1 = pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_mw=200, loss_percent=1.0,
loss_mw=0.5, vm_from_pu=1.01, vm_to_pu=1.012, max_p_mw=1000,
in_service=True)
eg1 = pp.create_ext_grid(net, b1, 1.02, min_p_mw=0.)
eg2 = pp.create_ext_grid(net, b5, 1.02, min_p_mw=0.)
l1 = pp.create_load(net, bus=b4, p_mw=800, controllable = False)
###Output
_____no_output_____
###Markdown
We now run a regular load flow to check out the DC line model:
###Code
pp.runpp(net)
###Output
_____no_output_____
###Markdown
The transmission power of the DC line is defined in the loadflow as given by the p_kw parameter, which was set to 200 MW:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
The losses amount to 2.5 MW, which are made up of 0.5 MW conversion loss and 200 MW * 0.01 = 2 MW transmission losses. The voltage setpoints defined at from and to bus are complied with. Now lets define costs for the external grids to run an OPF:
###Code
costeg0 = pp.create_poly_cost(net, 0, 'ext_grid', cp1_eur_per_mw=10)
costeg1 = pp.create_poly_cost(net, 1, 'ext_grid', cp1_eur_per_mw=8)
net.bus['max_vm_pu'] = 1.5
net.line['max_loading_percent'] = 1000
pp.runopp(net)
###Output
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Since we defined lower costs for Ext Grid 2, it fully services the load:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
While the DC line does not transmit any power:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
If we set the costs of the left grid to a lower value than the right grid and run the loadflow again:
###Code
net.poly_cost.cp1_eur_per_mw.at[costeg0] = 8
net.poly_cost.cp1_eur_per_mw.at[costeg1] = 10
pp.runopp(net)
###Output
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the power now comes from the left ext_grid:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And is transmitted over the DC line:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
We can however see that the lines on the left hand side are now overloaded:
###Code
net.res_line.loading_percent
###Output
_____no_output_____
###Markdown
If we set the maximum line loading to 100% and run the OPF again:
###Code
net.line["max_loading_percent"] = 100
pp.runopp(net)
###Output
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
We can see that the lines are no longer overloaded:
###Code
net.res_line.loading_percent
###Output
_____no_output_____
###Markdown
Because the load is serviced from both grids:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line transmits only part of the power needed to service the load:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Finally, we can also define transmission costs for the DC line:
###Code
costeg1 = pp.create_poly_cost(net, 0, 'dcline', cp1_eur_per_mw=3)
pp.runopp(net)
###Output
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
Because the sum of the costs for generating power on the left hand side (8) and transmitting it to the right side (3) is now larger than for generating on the right side (10), the OPF draws as much power from the right side as is possible without violating line loading constraints:
###Code
net.res_line.loading_percent
net.res_dcline
###Output
_____no_output_____
###Markdown
If we relax the line loading constraint and run the OPF again:
###Code
net.line["max_loading_percent"] = 1000
pp.runopp(net)
###Output
hp.pandapower.run - INFO: These elements have missing power constraint values, which are considered in OPF as +- 1000 TW: ['dcline']
hp.pandapower.run - INFO: min_vm_pu is missing in bus table. In OPF these limits are considered as 0.0 pu.
###Markdown
The load is once again fully serviced by the grid on the right hand side:
###Code
net.res_ext_grid
###Output
_____no_output_____
###Markdown
And the DC line is in open loop operation:
###Code
net.res_dcline
###Output
_____no_output_____
###Markdown
Little consistency check:
###Code
net.res_ext_grid.p_mw.at[0]*8 + net.res_ext_grid.p_mw.at[1]*10 + net.res_dcline.p_from_mw.at[0]*3
net.res_cost
###Output
_____no_output_____ |
Mini_Project_Linear_Regression.ipynb | ###Markdown
Regression in Python***This is a very quick run-through of some basic statistical concepts, adapted from [Lab 4 in Harvard's CS109](https://github.com/cs109/2015lab4) course. Please feel free to try the original lab if you're feeling ambitious :-) The CS109 git repository also has the solutions if you're stuck.* Linear Regression Models* Prediction using linear regression* Some re-sampling methods * Train-Test splits * Cross ValidationLinear regression is used to model and predict continuous outcomes while logistic regression is used to model binary outcomes. We'll see some examples of linear regression as well as Train-test splits.The packages we'll cover are: `statsmodels`, `seaborn`, and `scikit-learn`. While we don't explicitly teach `statsmodels` and `seaborn` in the Springboard workshop, those are great libraries to know.*** ***
###Code
# special IPython command to prepare the notebook for matplotlib and other libraries
%pylab inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
*** Part 1: Linear Regression Purpose of linear regression*** Given a dataset $X$ and $Y$, linear regression can be used to: Build a predictive model to predict future values of $X_i$ without a $Y$ value. Model the strength of the relationship between each dependent variable $X_i$ and $Y$ Sometimes not all $X_i$ will have a relationship with $Y$ Need to figure out which $X_i$ contributes most information to determine $Y$ Linear regression is used in so many applications that I won't warrant this with examples. It is in many cases, the first pass prediction algorithm for continuous outcomes. A brief recap (feel free to skip if you don't care about the math)***[Linear Regression](http://en.wikipedia.org/wiki/Linear_regression) is a method to model the relationship between a set of independent variables $X$ (also knowns as explanatory variables, features, predictors) and a dependent variable $Y$. This method assumes the relationship between each predictor $X$ is linearly related to the dependent variable $Y$. $$ Y = \beta_0 + \beta_1 X + \epsilon$$where $\epsilon$ is considered as an unobservable random variable that adds noise to the linear relationship. This is the simplest form of linear regression (one variable), we'll call this the simple model. * $\beta_0$ is the intercept of the linear model* Multiple linear regression is when you have more than one independent variable * $X_1$, $X_2$, $X_3$, $\ldots$$$ Y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$ * Back to the simple model. The model in linear regression is the *conditional mean* of $Y$ given the values in $X$ is expressed a linear function. $$ y = f(x) = E(Y | X = x)$$ http://www.learner.org/courses/againstallodds/about/glossary.html* The goal is to estimate the coefficients (e.g. $\beta_0$ and $\beta_1$). We represent the estimates of the coefficients with a "hat" on top of the letter. $$ \hat{\beta}_0, \hat{\beta}_1 $$* Once you estimate the coefficients $\hat{\beta}_0$ and $\hat{\beta}_1$, you can use these to predict new values of $Y$$$\hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1$$* How do you estimate the coefficients? * There are many ways to fit a linear regression model * The method called **least squares** is one of the most common methods * We will discuss least squares today Estimating $\hat\beta$: Least squares***[Least squares](http://en.wikipedia.org/wiki/Least_squares) is a method that can estimate the coefficients of a linear model by minimizing the difference between the following: $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$where $N$ is the number of observations. * We will not go into the mathematical details, but the least squares estimates $\hat{\beta}_0$ and $\hat{\beta}_1$ minimize the sum of the squared residuals $r_i = y_i - (\beta_0 + \beta_1 x_i)$ in the model (i.e. makes the difference between the observed $y_i$ and linear model $\beta_0 + \beta_1 x_i$ as small as possible). The solution can be written in compact matrix notation as$$\hat\beta = (X^T X)^{-1}X^T Y$$ We wanted to show you this in case you remember linear algebra, in order for this solution to exist we need $X^T X$ to be invertible. Of course this requires a few extra assumptions, $X$ must be full rank so that $X^T X$ is invertible, etc. **This is important for us because this means that having redundant features in our regression models will lead to poorly fitting (and unstable) models.** We'll see an implementation of this in the extra linear regression example.**Note**: The "hat" means it is an estimate of the coefficient. *** Part 2: Boston Housing Data SetThe [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing) contains information about the housing values in suburbs of Boston. This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University and is now available on the UCI Machine Learning Repository. Load the Boston Housing data set from `sklearn`***This data set is available in the [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.htmlsklearn.datasets.load_boston) python module which is how we will access it today.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print (boston.feature_names)
# Print description of Boston housing data set
print (boston.DESCR)
###Output
Boston House Prices dataset
Notes
------
Data Set Characteristics:
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive
:Median Value (attribute 14) is usually the target
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
http://archive.ics.uci.edu/ml/datasets/Housing
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
**References**
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
- many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)
###Markdown
Now let's explore the data set itself.
###Code
bos = pd.DataFrame(boston.data)
bos.head()
###Output
_____no_output_____
###Markdown
There are no column names in the DataFrame. Let's add those.
###Code
bos.columns = boston.feature_names
bos.head()
###Output
_____no_output_____
###Markdown
Now we have a pandas DataFrame called `bos` containing all the data we want to use to predict Boston Housing prices. Let's create a variable called `PRICE` which will contain the prices. This information is contained in the `target` data.
###Code
print (boston.target.shape)
bos['PRICE'] = boston.target
bos.head()
###Output
_____no_output_____
###Markdown
EDA and Summary Statistics***Let's explore this data set. First we use `describe()` to get basic summary statistics for each of the columns.
###Code
bos.describe()
###Output
_____no_output_____
###Markdown
Scatter plots***Let's look at some scatter plots for three variables: 'CRIM', 'RM' and 'PTRATIO'. What kind of relationship do you see? e.g. positive, negative? linear? non-linear?
###Code
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
###Output
_____no_output_____
###Markdown
**Your turn**: Create scatter plots between *RM* and *PRICE*, and *PTRATIO* and *PRICE*. What do you notice?
###Code
#your turn: scatter plot between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE)
plt.xlabel("average number of rooms per dwelling (RM)")
plt.ylabel("Housing Price")
plt.title("Relationship between RM and Price")
#your turn: scatter plot between *PTRATIO* and *PRICE*
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("pupil-teacher ratio by town (PTRATIO)")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRATIO and Price")
###Output
_____no_output_____
###Markdown
**Your turn**: What are some other numeric variables of interest? Plot scatter plots with these variables and *PRICE*.
###Code
#your turn: create some other scatter plots
plt.scatter(bos.AGE, bos.PRICE)
plt.xlabel("proportion of owner-occupied units built prior to 1940 (AGE)")
plt.ylabel("Housing Price")
plt.title("Relationship between House Ages and Price")
###Output
_____no_output_____
###Markdown
Scatter Plots using Seaborn***[Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) is a cool Python plotting library built on top of matplotlib. It provides convenient syntax and shortcuts for many common types of plots, along with better-looking defaults.We can also use [seaborn regplot](https://stanford.edu/~mwaskom/software/seaborn/tutorial/regression.htmlfunctions-to-draw-linear-regression-models) for the scatterplot above. This provides automatic linear regression fits (useful for data exploration later on). Here's one example below.
###Code
sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Histograms*** Histograms are a useful way to visually summarize the statistical properties of numeric variables. They can give you an idea of the mean and the spread of the variables as well as outliers.
###Code
plt.hist(bos.CRIM)
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
**Your turn**: Plot separate histograms and one for *RM*, one for *PTRATIO*. Any interesting observations?
###Code
#your turn
plt.hist(bos.RM)
plt.title("RM")
plt.xlabel("average number of rooms per dwelling")
plt.ylabel("Frequency")
plt.show()
# Histogram for pupil-teacher ratio by town
plt.hist(bos.PTRATIO)
plt.title("PTRATIO")
plt.xlabel("pupil-teacher ratio by town")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
Linear regression with Boston housing data example***Here, $Y$ = boston housing prices (also called "target" data in python)and$X$ = all the other features (or independent variables)which we will use to fit a linear regression model and predict Boston housing prices. We will use the least squares method as the way to estimate the coefficients. We'll use two ways of fitting a linear regression. We recommend the first but the second is also powerful in its features. Fitting Linear Regression using `statsmodels`***[Statsmodels](http://statsmodels.sourceforge.net/) is a great Python library for a lot of basic and inferential statistics. It also provides basic regression functions using an R-like syntax, so it's commonly used by statisticians. While we don't cover statsmodels officially in the Data Science Intensive, it's a good library to have in your toolbox. Here's a quick example of what you could do with it.
###Code
# Import regression modules
# ols - stands for Ordinary least squares, we'll use this
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print (m.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.484
Model: OLS Adj. R-squared: 0.483
Method: Least Squares F-statistic: 471.8
Date: Sat, 17 Jun 2017 Prob (F-statistic): 2.49e-74
Time: 14:39:37 Log-Likelihood: -1673.1
No. Observations: 506 AIC: 3350.
Df Residuals: 504 BIC: 3359.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -34.6706 2.650 -13.084 0.000 -39.877 -29.465
RM 9.1021 0.419 21.722 0.000 8.279 9.925
==============================================================================
Omnibus: 102.585 Durbin-Watson: 0.684
Prob(Omnibus): 0.000 Jarque-Bera (JB): 612.449
Skew: 0.726 Prob(JB): 1.02e-133
Kurtosis: 8.190 Cond. No. 58.4
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Interpreting coefficientsThere is a ton of information in this output. But we'll concentrate on the coefficient table (middle table). We can interpret the `RM` coefficient (9.1021) by first noticing that the p-value (under `P>|t|`) is so small, basically zero. We can interpret the coefficient as, if we compare two groups of towns, one where the average number of rooms is say $5$ and the other group is the same except that they all have $6$ rooms. For these two groups the average difference in house prices is about $9.1$ (in thousands) so about $\$9,100$ difference. The confidence interval fives us a range of plausible values for this difference, about ($\$8,279, \$9,925$), deffinitely not chump change. `statsmodels` formulas***This formula notation will seem familiar to `R` users, but will take some getting used to for people coming from other languages or are new to statistics.The formula gives instruction for a general structure for a regression call. For `statsmodels` (`ols` or `logit`) calls you need to have a Pandas dataframe with column names that you will add to your formula. In the below example you need a pandas data frame that includes the columns named (`Outcome`, `X1`,`X2`, ...), bbut you don't need to build a new dataframe for every regression. Use the same dataframe with all these things in it. The structure is very simple:`Outcome ~ X1`But of course we want to to be able to handle more complex models, for example multiple regression is doone like this:`Outcome ~ X1 + X2 + X3`This is the very basic structure but it should be enough to get you through the homework. Things can get much more complex, for a quick run-down of further uses see the `statsmodels` [help page](http://statsmodels.sourceforge.net/devel/example_formulas.html). Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines. **Your turn:** Create a scatterpot between the predicted prices, available in `m.fittedvalues` and the original prices. How does the plot look?
###Code
# your turn
plt.scatter(bos.PRICE, m.fittedvalues)
plt.xlabel("Housing Price")
plt.ylabel("Predicted Housing Price")
plt.title("Relationship between Predicted and Actual Price")
###Output
_____no_output_____
###Markdown
Fitting Linear Regression using `sklearn`
###Code
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
lm
###Output
_____no_output_____
###Markdown
What can you do with a LinearRegression object? ***Check out the scikit-learn [docs here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html). We have listed the main functions here. Main functions | Description--- | --- `lm.fit()` | Fit a linear model`lm.predit()` | Predict Y using the linear model with estimated coefficients`lm.score()` | Returns the coefficient of determination (R^2). *A measure of how well observed outcomes are replicated by the model, as the proportion of total variation of outcomes explained by the model* What output can you get?
###Code
# Look inside lm object
#lm.<tab>
###Output
_____no_output_____
###Markdown
Output | Description--- | --- `lm.coef_` | Estimated coefficients`lm.intercept_` | Estimated intercept Fit a linear model***The `lm.fit()` function estimates the coefficients the linear regression using least squares.
###Code
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
###Output
_____no_output_____
###Markdown
**Your turn:** How would you change the model to not fit an intercept term? Would you recommend not having an intercept? Estimated intercept and coefficientsLet's look at the estimated coefficients from the linear model using `1m.intercept_` and `lm.coef_`. After we have fit our linear regression model using the least squares method, we want to see what are the estimates of our coefficients $\beta_0$, $\beta_1$, ..., $\beta_{13}$: $$ \hat{\beta}_0, \hat{\beta}_1, \ldots, \hat{\beta}_{13} $$
###Code
print ('Estimated intercept coefficient:', lm.intercept_)
print ('Number of coefficients:', len(lm.coef_))
# The coefficients
pd.DataFrame(list(zip(X.columns, lm.coef_)), columns = ['features', 'estimatedCoefficients'])
###Output
_____no_output_____
###Markdown
Predict Prices We can calculate the predicted prices ($\hat{Y}_i$) using `lm.predict`. $$ \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_1 + \ldots \hat{\beta}_{13} X_{13} $$
###Code
# first five predicted prices
lm.predict(X)[0:5]
###Output
_____no_output_____
###Markdown
**Your turn:** * Histogram: Plot a histogram of all the predicted prices* Scatter Plot: Let's plot the true prices compared to the predicted prices to see they disagree (we did this with `statsmodels` before).
###Code
# your turn
# Plot a histogram of all the predicted prices
plt.hist(lm.predict(X))
plt.title("Predicted Prices")
plt.xlabel("Predicted Prices")
plt.ylabel("Frequency")
plt.show()
# Let's plot the true prices compared to the predicted prices to see they disagree
plt.scatter(bos.PRICE, lm.predict(X))
plt.xlabel("Housing Price")
plt.ylabel("Predicted Housing Price")
plt.title("Relationship between Predicted and Actual Price")
###Output
_____no_output_____
###Markdown
Residual sum of squaresLet's calculate the residual sum of squares $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$
###Code
print (np.sum((bos.PRICE - lm.predict(X)) ** 2))
###Output
11080.2762841
###Markdown
Mean squared error***This is simple the mean of the residual sum of squares.**Your turn:** Calculate the mean squared error and print it.
###Code
#your turn
print ('Mean squared error: ', np.mean((bos.PRICE - lm.predict(X)) ** 2))
###Output
Mean squared error: 21.8977792177
###Markdown
Relationship between `PTRATIO` and housing price***Try fitting a linear regression model using only the 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
lm = LinearRegression()
lm.fit(X[['PTRATIO']], bos.PRICE)
msePTRATIO = np.mean((bos.PRICE - lm.predict(X[['PTRATIO']])) ** 2)
print (msePTRATIO)
###Output
62.6522000138
###Markdown
We can also plot the fitted linear regression line.
###Code
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("Pupil-to-Teacher Ratio (PTRATIO)")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRATIO and Price")
plt.plot(bos.PTRATIO, lm.predict(X[['PTRATIO']]), color='blue', linewidth=3)
plt.show()
###Output
_____no_output_____
###Markdown
Your turn***Try fitting a linear regression model using three independent variables1. 'CRIM' (per capita crime rate by town)2. 'RM' (average number of rooms per dwelling)3. 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
# your turn
lm.fit(X[['CRIM']], bos.PRICE)
print ('(MSE) Per capita crime rate by town: ', np.mean((bos.PRICE - lm.predict(X[['CRIM']])) ** 2))
lm.fit(X[['RM']], bos.PRICE)
print ('(MSE) Average number of rooms per dwelling: ', np.mean((bos.PRICE - lm.predict(X[['RM']])) ** 2))
lm.fit(X[['PTRATIO']], bos.PRICE)
print ('(MSE) Pupil-teacher ratio by town: ', np.mean((bos.PRICE - lm.predict(X[['PTRATIO']])) ** 2))
###Output
(MSE) Per capita crime rate by town: 71.8523466653
(MSE) Average number of rooms per dwelling: 43.6005517712
(MSE) Pupil-teacher ratio by town: 62.6522000138
###Markdown
Other important things to think about when fitting a linear regression model*** **Linearity**. The dependent variable $Y$ is a linear combination of the regression coefficients and the independent variables $X$. **Constant standard deviation**. The SD of the dependent variable $Y$ should be constant for different values of X. e.g. PTRATIO **Normal distribution for errors**. The $\epsilon$ term we discussed at the beginning are assumed to be normally distributed. $$ \epsilon_i \sim N(0, \sigma^2)$$Sometimes the distributions of responses $Y$ may not be normally distributed at any given value of $X$. e.g. skewed positively or negatively. **Independent errors**. The observations are assumed to be obtained independently. e.g. Observations across time may be correlated
###Code
sns.set(font_scale=.8)
sns.heatmap(X.corr(), vmax=.8, square=True, annot=True)
###Output
_____no_output_____
###Markdown
Part 3: Training and Test Data sets Purpose of splitting data into Training/testing sets*** Let's stick to the linear regression example: We built our model with the requirement that the model fit the data well. As a side-effect, the model will fit THIS dataset well. What about new data? We wanted the model for predictions, right? One simple solution, leave out some data (for testing) and train the model on the rest This also leads directly to the idea of cross-validation, next section. ***One way of doing this is you can create training and testing data sets manually.
###Code
X_train = X[:-50]
X_test = X[-50:]
Y_train = bos.PRICE[:-50]
Y_test = bos.PRICE[-50:]
print (X_train.shape)
print (X_test.shape)
print (Y_train.shape)
print (Y_test.shape)
###Output
(456, 13)
(50, 13)
(456,)
(50,)
###Markdown
Another way, is to split the data into random train and test subsets using the function `train_test_split` in `sklearn.cross_validation`. Here's the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html).
###Code
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
X, bos.PRICE, test_size=0.33, random_state = 5)
print (X_train.shape)
print (X_test.shape)
print (Y_train.shape)
print (Y_test.shape)
###Output
(339, 13)
(167, 13)
(339,)
(167,)
###Markdown
**Your turn:** Let's build a linear regression model using our new training data sets. * Fit a linear regression model to the training set* Predict the output on the test set
###Code
# your turn
# Fit a linear regression model to the training set
lm.fit(X_train, Y_train)
lm.predict(X_test)
###Output
_____no_output_____
###Markdown
**Your turn:**Calculate the mean squared error * using just the test data* using just the training dataAre they pretty similar or very different? What does that mean?
###Code
# your turn
# Calculate MSE using just the test data
print ('(MSE) using just the test data: ', np.mean((Y_test - lm.predict(X_test)) ** 2))
# Calculate MSE using just the training data
print ('(MSE) using just the training data: ', np.mean((Y_train - lm.predict(X_train)) ** 2))
###Output
(MSE) using just the test data: 28.5413672756
(MSE) using just the training data: 19.5467584735
###Markdown
Are they pretty similar or very different? What does that mean?-> They are very different because the model us based on training data so it will be accurate compared to the test data. The model is not exposed to test data so it will give a greater mean square error. It means there are data in test data which are different with the training data. Residual plots
###Code
plt.scatter(lm.predict(X_train), lm.predict(X_train) - Y_train, c='b', s=40, alpha=0.5)
plt.scatter(lm.predict(X_test), lm.predict(X_test) - Y_test, c='g', s=40)
plt.hlines(y = 0, xmin=0, xmax = 50)
plt.title('Residual Plot using training (blue) and test (green) data')
plt.ylabel('Residuals')
###Output
_____no_output_____
###Markdown
**Your turn:** Do you think this linear regression model generalizes well on the test data?-> No, the scatter points are not close to zero so the model needs improvements. Check the features to see highly correlated predictors and remove one of them or check the parameters of the model and do fine-tuning. K-fold Cross-validation as an extension of this idea*** A simple extension of the Test/train split is called K-fold cross-validation. Here's the procedure: randomly assign your $n$ samples to one of $K$ groups. They'll each have about $n/k$ samples For each group $k$: Fit the model (e.g. run regression) on all data excluding the $k^{th}$ group Use the model to predict the outcomes in group $k$ Calculate your prediction error for each observation in $k^{th}$ group (e.g. $(Y_i - \hat{Y}_i)^2$ for regression, $\mathbb{1}(Y_i = \hat{Y}_i)$ for logistic regression). Calculate the average prediction error across all samples $Err_{CV} = \frac{1}{n}\sum_{i=1}^n (Y_i - \hat{Y}_i)^2$ ***Luckily you don't have to do this entire process all by hand (``for`` loops, etc.) every single time, ``sci-kit learn`` has a very nice implementation of this, have a look at the [documentation](http://scikit-learn.org/stable/modules/cross_validation.html). **Your turn (extra credit):** Implement K-Fold cross-validation using the procedure above and Boston Housing data set using $K=4$. How does the average prediction error compare to the train-test split above?
###Code
from sklearn import cross_validation, linear_model
# If the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used.
# In all other cases, KFold is used
scores = cross_validation.cross_val_score(lm, X, bos.PRICE, scoring='mean_squared_error', cv=4)
# This will print metric for evaluation
print ('(MSE) Using k-fold: ', np.mean(scores))
print ('The K-fold cross-validation is not performaing well compared to the previous train-test split above')
###Output
(MSE) Using k-fold: -42.4894695275
The K-fold cross-validation is not performaing well compared to the previous train-test split above
###Markdown
Regression in Python***This is a very quick run-through of some basic statistical concepts, adapted from [Lab 4 in Harvard's CS109](https://github.com/cs109/2015lab4) course. Please feel free to try the original lab if you're feeling ambitious :-) The CS109 git repository also has the solutions if you're stuck.* Linear Regression Models* Prediction using linear regressionLinear regression is used to model and predict continuous outcomes with normal random errors. There are nearly an infinite number of different types of regression models and each regression model is typically defined by the distribution of the prediction errors (called "residuals") of the type of data. Logistic regression is used to model binary outcomes whereas Poisson regression is used to predict counts. In this exercise, we'll see some examples of linear regression as well as Train-test splits.The packages we'll cover are: `statsmodels`, `seaborn`, and `scikit-learn`. While we don't explicitly teach `statsmodels` and `seaborn` in the Springboard workshop, those are great libraries to know.*** ***
###Code
# special IPython command to prepare the notebook for matplotlib and other libraries
%matplotlib inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
###Output
_____no_output_____
###Markdown
*** Part 1: Introduction to Linear Regression Purpose of linear regression*** Given a dataset containing predictor variables $X$ and outcome/response variable $Y$, linear regression can be used to: Build a predictive model to predict future values of $\hat{Y}$, using new data $X^*$ where $Y$ is unknown. Model the strength of the relationship between each independent variable $X_i$ and $Y$ Many times, only a subset of independent variables $X_i$ will have a linear relationship with $Y$ Need to figure out which $X_i$ contributes most information to predict $Y$ It is in many cases, the first pass prediction algorithm for continuous outcomes. A Brief Mathematical Recap***[Linear Regression](http://en.wikipedia.org/wiki/Linear_regression) is a method to model the relationship between a set of independent variables $X$ (also knowns as explanatory variables, features, predictors) and a dependent variable $Y$. This method assumes the relationship between each predictor $X$ is **linearly** related to the dependent variable $Y$. The most basic linear regression model contains one independent variable $X$, we'll call this the simple model. $$ Y = \beta_0 + \beta_1 X + \epsilon$$where $\epsilon$ is considered as an unobservable random variable that adds noise to the linear relationship. In linear regression, $\epsilon$ is assumed to be normally distributed with a mean of 0. In other words, what this means is that on average, if we know $Y$, a roughly equal number of predictions $\hat{Y}$ will be above $Y$ and others will be below $Y$. That is, on average, the error is zero. The residuals, $\epsilon$ are also assumed to be "i.i.d.": independently and identically distributed. Independence means that the residuals are not correlated -- the residual from one prediction has no effect on the residual from another prediction. Correlated errors are common in time series analysis and spatial analyses.* $\beta_0$ is the intercept of the linear model and represents the average of $Y$ when all independent variables $X$ are set to 0.* $\beta_1$ is the slope of the line associated with the regression model and represents the average effect of a one-unit increase in $X$ on $Y$.* Back to the simple model. The model in linear regression is the *conditional mean* of $Y$ given the values in $X$ is expressed a linear function. $$ y = f(x) = E(Y | X = x)$$ *Image from http://www.learner.org/courses/againstallodds/about/glossary.html. Note this image uses $\alpha$ and $\beta$ instead of $\beta_0$ and $\beta_1$.** The goal is to estimate the coefficients (e.g. $\beta_0$ and $\beta_1$). We represent the estimates of the coefficients with a "hat" on top of the letter. $$ \hat{\beta}_0, \hat{\beta}_1 $$* Once we estimate the coefficients $\hat{\beta}_0$ and $\hat{\beta}_1$, we can use these to predict new values of $Y$ given new data $X$.$$\hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1$$* Multiple linear regression is when you have more than one independent variable and the estimation involves matrices * $X_1$, $X_2$, $X_3$, $\ldots$* How do you estimate the coefficients? * There are many ways to fit a linear regression model * The method called **least squares** is the most common methods * We will discuss least squares$$ Y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$ Estimating $\hat\beta$: Least squares***[Least squares](http://en.wikipedia.org/wiki/Least_squares) is a method that can estimate the coefficients of a linear model by minimizing the squared residuals: $$ \mathscr{L} = \sum_{i=1}^N \epsilon_i^2 = \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 = \sum_{i=1}^N \left(y_i - \left(\beta_0 + \beta_1 x_i\right)\right)^2 $$where $N$ is the number of observations and $\epsilon$ represents a residual or error, ACTUAL - PREDICTED. Estimating the intercept $\hat{\beta_0}$ for the simple linear modelWe want to minimize the squared residuals and solve for $\hat{\beta_0}$ so we take the partial derivative of $\mathscr{L}$ with respect to $\hat{\beta_0}$ $\begin{align}\frac{\partial \mathscr{L}}{\partial \hat{\beta_0}} &= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \epsilon^2 \\&= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 \\&= \frac{\partial}{\partial \hat{\beta_0}} \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right)^2 \\&= -2 \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right) \hspace{25mm} \mbox{(by chain rule)} \\&= -2 \sum_{i=1}^N (y_i - \hat{\beta}_0 - \hat{\beta}_1 x_i) \\&= -2 \left[ \left( \sum_{i=1}^N y_i \right) - N \hat{\beta_0} - \hat{\beta}_1 \left( \sum_{i=1}^N x_i\right) \right] \\& 2 \left[ N \hat{\beta}_0 + \hat{\beta}_1 \sum_{i=1}^N x_i - \sum_{i=1}^N y_i \right] = 0 \hspace{20mm} \mbox{(Set equal to 0 and solve for $\hat{\beta}_0$)} \\& N \hat{\beta}_0 + \hat{\beta}_1 \sum_{i=1}^N x_i - \sum_{i=1}^N y_i = 0 \\& N \hat{\beta}_0 = \sum_{i=1}^N y_i - \hat{\beta}_1 \sum_{i=1}^N x_i \\& \hat{\beta}_0 = \frac{\sum_{i=1}^N y_i - \hat{\beta}_1 \sum_{i=1}^N x_i}{N} \\& \hat{\beta}_0 = \frac{\sum_{i=1}^N y_i}{N} - \hat{\beta}_1 \frac{\sum_{i=1}^N x_i}{N} \\& \boxed{\hat{\beta}_0 = \bar{y} - \hat{\beta}_1 \bar{x}}\end{align}$ Using this new information, we can compute the estimate for $\hat{\beta}_1$ by taking the partial derivative of $\mathscr{L}$ with respect to $\hat{\beta}_1$. $\begin{align}\frac{\partial \mathscr{L}}{\partial \hat{\beta_1}} &= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \epsilon^2 \\&= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \left( y_i - \hat{y}_i \right)^2 \\&= \frac{\partial}{\partial \hat{\beta_1}} \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right)^2 \\&= 2 \sum_{i=1}^N \left( y_i - \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) \right) \left( -x_i \right) \hspace{25mm}\mbox{(by chain rule)} \\&= -2 \sum_{i=1}^N x_i \left( y_i - \hat{\beta}_0 - \hat{\beta}_1 x_i \right) \\&= -2 \sum_{i=1}^N x_i (y_i - \hat{\beta}_0 x_i - \hat{\beta}_1 x_i^2) \\&= -2 \sum_{i=1}^N x_i (y_i - \left( \bar{y} - \hat{\beta}_1 \bar{x} \right) x_i - \hat{\beta}_1 x_i^2) \\&= -2 \sum_{i=1}^N (x_i y_i - \bar{y}x_i + \hat{\beta}_1\bar{x}x_i - \hat{\beta}_1 x_i^2) \\&= -2 \left[ \sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i + \hat{\beta}_1\bar{x}\sum_{i=1}^N x_i - \hat{\beta}_1 \sum_{i=1}^N x_i^2 \right] \\&= -2 \left[ \hat{\beta}_1 \left\{ \bar{x} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i^2 \right\} + \left\{ \sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i \right\}\right] \\& 2 \left[ \hat{\beta}_1 \left\{ \sum_{i=1}^N x_i^2 - \bar{x} \sum_{i=1}^N x_i \right\} + \left\{ \bar{y} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i y_i \right\} \right] = 0 \\& \hat{\beta}_1 = \frac{-\left( \bar{y} \sum_{i=1}^N x_i - \sum_{i=1}^N x_i y_i \right)}{\sum_{i=1}^N x_i^2 - \bar{x}\sum_{i=1}^N x_i} \\&= \frac{\sum_{i=1}^N x_i y_i - \bar{y} \sum_{i=1}^N x_i}{\sum_{i=1}^N x_i^2 - \bar{x} \sum_{i=1}^N x_i} \\& \boxed{\hat{\beta}_1 = \frac{\sum_{i=1}^N x_i y_i - \bar{x}\bar{y}n}{\sum_{i=1}^N x_i^2 - n \bar{x}^2}}\end{align}$ The solution can be written in compact matrix notation as$$\hat\beta = (X^T X)^{-1}X^T Y$$ We wanted to show you this in case you remember linear algebra, in order for this solution to exist we need $X^T X$ to be invertible. Of course this requires a few extra assumptions, $X$ must be full rank so that $X^T X$ is invertible, etc. Basically, $X^T X$ is full rank if all rows and columns are linearly independent. This has a loose relationship to variables and observations being independent respective. **This is important for us because this means that having redundant features in our regression models will lead to poorly fitting (and unstable) models.** We'll see an implementation of this in the extra linear regression example. *** Part 2: Exploratory Data Analysis for Linear RelationshipsThe [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing) contains information about the housing values in suburbs of Boston. This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University and is now available on the UCI Machine Learning Repository. Load the Boston Housing data set from `sklearn`***This data set is available in the [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.htmlsklearn.datasets.load_boston) python module which is how we will access it today.
###Code
from sklearn.datasets import load_boston
import pandas as pd
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print(boston.feature_names)
# Print description of Boston housing data set
print(boston.DESCR)
###Output
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
###Markdown
Now let's explore the data set itself.
###Code
bos = pd.DataFrame(boston.data)
bos.head()
###Output
_____no_output_____
###Markdown
There are no column names in the DataFrame. Let's add those.
###Code
bos.columns = boston.feature_names
bos.head()
###Output
_____no_output_____
###Markdown
Now we have a pandas DataFrame called `bos` containing all the data we want to use to predict Boston Housing prices. Let's create a variable called `PRICE` which will contain the prices. This information is contained in the `target` data.
###Code
print(boston.target.shape)
bos['PRICE'] = boston.target
bos.head()
###Output
_____no_output_____
###Markdown
EDA and Summary Statistics***Let's explore this data set. First we use `describe()` to get basic summary statistics for each of the columns.
###Code
bos.describe()
###Output
_____no_output_____
###Markdown
Scatterplots***Let's look at some scatter plots for three variables: 'CRIM' (per capita crime rate), 'RM' (number of rooms) and 'PTRATIO' (pupil-to-teacher ratio in schools).
###Code
plt.scatter(bos.CRIM, bos.PRICE, s=10)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
###Output
_____no_output_____
###Markdown
Part 2 Checkup Exercise Set IExercise: What kind of relationship do you see? e.g. positive, negative? linear? non-linear? Is there anything else strange or interesting about the data? What about outliers?Exercise: Create scatter plots between *RM* and *PRICE*, and *PTRATIO* and *PRICE*. Label your axes appropriately using human readable labels. Tell a story about what you see.Exercise: What are some other numeric variables of interest? Why do you think they are interesting? Plot scatterplots with these variables and *PRICE* (house price) and tell a story about what you see. -- your turn: describe relationshipIt seems clear that there is a negative relationship between crime rate and price. It might be better fitted with a curve but, in general, it's fairly linear. It is somewhat interesting that houses at the bottom of the pricing scale have a large range of crime ratios.
###Code
# your turn: scatter plot between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE, s=12)
plt.xlabel('Number of Rooms')
plt.ylabel('Housing Price')
plt.title('Relationship between number of rooms and price')
plt.show()
# your turn: scatter plot between *PTRATIO* and *PRICE*
plt.scatter(bos.PTRATIO, bos.PRICE, s=12)
plt.xlabel('Pupil-Teacher ratio')
plt.ylabel('Housing Price')
plt.title('Relationship between PTRATIO and PRICE')
# your turn: create some other scatter plots
plt.scatter(bos.B, bos.PRICE, s=12)
plt.xlabel('Proportion of blacks')
plt.ylabel('Housing Price')
plt.scatter(bos.NOX, bos.PRICE, s=12)
plt.xlabel('Nitric Oxide Level')
plt.ylabel('Housing Price')
###Output
_____no_output_____
###Markdown
Nitric oxide levels do seem to have an effect on pricing. Or it may be that areas without facilities that produce NOX are naturally more appealing, which correlates to price. Scatterplots using Seaborn***[Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) is a cool Python plotting library built on top of matplotlib. It provides convenient syntax and shortcuts for many common types of plots, along with better-looking defaults.We can also use [seaborn regplot](https://stanford.edu/~mwaskom/software/seaborn/tutorial/regression.htmlfunctions-to-draw-linear-regression-models) for the scatterplot above. This provides automatic linear regression fits (useful for data exploration later on). Here's one example below.
###Code
_ = sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Histograms***
###Code
plt.hist(np.log(bos.CRIM))
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
###Output
_____no_output_____
###Markdown
Part 2 Checkup Exercise Set IIExercise: In the above histogram, we took the logarithm of the crime rate per capita. Repeat this histogram without taking the log. What was the purpose of taking the log? What do we gain by making this transformation? What do you now notice about this variable that is not obvious without making the transformation?Exercise: Plot the histogram for *RM* and *PTRATIO* against each other, along with the two variables you picked in the previous section. We are looking for correlations in predictors here.
###Code
#your turn
#histogram without the log
plt.hist(bos.CRIM)
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequencey")
plt.show()
plt.subplot(2,2,1)
plt.hist(np.log(bos.RM))
plt.xlabel('RM', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.title('Room Count', fontsize=14)
plt.subplot(2,2,2)
plt.hist(np.log(bos.PTRATIO))
plt.xlabel('PTRATIO', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.title('Pupil-Teacher Ratio', fontsize=14)
plt.subplot(2,2,3)
plt.hist(bos.B)
plt.xlabel('B', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.title('Proportion of Blacks', fontsize=14)
plt.subplot(2,2,4)
plt.hist(np.log(bos.NOX))
plt.xlabel('NOX', fontsize=14)
plt.xticks(fontsize=14, rotation=60)
plt.yticks(fontsize=14)
plt.title('Nitric Oxide Level', fontsize=14)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Part 3: Linear Regression with Boston Housing Data Example***Here, $Y$ = boston housing prices (called "target" data in python, and referred to as the dependent variable or response variable)and$X$ = all the other features (or independent variables, predictors or explanatory variables)which we will use to fit a linear regression model and predict Boston housing prices. We will use the least-squares method to estimate the coefficients. We'll use two ways of fitting a linear regression. We recommend the first but the second is also powerful in its features. Fitting Linear Regression using `statsmodels`***[Statsmodels](http://statsmodels.sourceforge.net/) is a great Python library for a lot of basic and inferential statistics. It also provides basic regression functions using an R-like syntax, so it's commonly used by statisticians. While we don't cover statsmodels officially in the Data Science Intensive workshop, it's a good library to have in your toolbox. Here's a quick example of what you could do with it. The version of least-squares we will use in statsmodels is called *ordinary least-squares (OLS)*. There are many other versions of least-squares such as [partial least squares (PLS)](https://en.wikipedia.org/wiki/Partial_least_squares_regression) and [weighted least squares (WLS)](https://en.wikipedia.org/wiki/Iteratively_reweighted_least_squares).
###Code
# Import regression modules
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print(m.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.484
Model: OLS Adj. R-squared: 0.483
Method: Least Squares F-statistic: 471.8
Date: Sat, 08 Jun 2019 Prob (F-statistic): 2.49e-74
Time: 11:10:27 Log-Likelihood: -1673.1
No. Observations: 506 AIC: 3350.
Df Residuals: 504 BIC: 3359.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept -34.6706 2.650 -13.084 0.000 -39.877 -29.465
RM 9.1021 0.419 21.722 0.000 8.279 9.925
==============================================================================
Omnibus: 102.585 Durbin-Watson: 0.684
Prob(Omnibus): 0.000 Jarque-Bera (JB): 612.449
Skew: 0.726 Prob(JB): 1.02e-133
Kurtosis: 8.190 Cond. No. 58.4
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Interpreting coefficientsThere is a ton of information in this output. But we'll concentrate on the coefficient table (middle table). We can interpret the `RM` coefficient (9.1021) by first noticing that the p-value (under `P>|t|`) is so small, basically zero. This means that the number of rooms, `RM`, is a statistically significant predictor of `PRICE`. The regression coefficient for `RM` of 9.1021 means that *on average, each additional room is associated with an increase of $\$9,100$ in house price net of the other variables*. The confidence interval gives us a range of plausible values for this average change, about ($\$8,279, \$9,925$), definitely not chump change. In general, the $\hat{\beta_i}, i > 0$ can be interpreted as the following: "A one unit increase in $x_i$ is associated with, on average, a $\hat{\beta_i}$ increase/decrease in $y$ net of all other variables."On the other hand, the interpretation for the intercept, $\hat{\beta}_0$ is the average of $y$ given that all of the independent variables $x_i$ are 0. `statsmodels` formulas***This formula notation will seem familiar to `R` users, but will take some getting used to for people coming from other languages or are new to statistics.The formula gives instruction for a general structure for a regression call. For `statsmodels` (`ols` or `logit`) calls you need to have a Pandas dataframe with column names that you will add to your formula. In the below example you need a pandas data frame that includes the columns named (`Outcome`, `X1`,`X2`, ...), but you don't need to build a new dataframe for every regression. Use the same dataframe with all these things in it. The structure is very simple:`Outcome ~ X1`But of course we want to to be able to handle more complex models, for example multiple regression is done like this:`Outcome ~ X1 + X2 + X3`In general, a formula for an OLS multiple linear regression is`Y ~ X1 + X2 + ... + Xp`This is the very basic structure but it should be enough to get you through the homework. Things can get much more complex. You can force statsmodels to treat variables as categorical with the `C()` function, call numpy functions to transform data such as `np.log` for extremely-skewed data, or fit a model without an intercept by including `- 1` in the formula. For a quick run-down of further uses see the `statsmodels` [help page](http://statsmodels.sourceforge.net/devel/example_formulas.html). Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines. Part 3 Checkup Exercise Set IExercise: Create a scatterplot between the predicted prices, available in `m.fittedvalues` (where `m` is the fitted model) and the original prices. How does the plot look? Do you notice anything interesting or weird in the plot? Comment on what you see.
###Code
plt.scatter(m.fittedvalues, bos.PRICE, s=12)
plt.xlabel('Predicted Prices')
plt.ylabel('Actual Prices')
plt.title('Predicted vs. Actual')
###Output
_____no_output_____
###Markdown
For the most part, the original prices and the predicted prices correlate to each other. Most of the observations that are predicted in the $20k range actually concentrate in a similar area on the Y-axis. However, there are a few observations where the actual price is much higher that the predicted price, and there is one very noticeable example of the opposite. One final curiosity, there looks to be one house with a predicted negative value, which is odd, especially given that the actual price is above the median. Fitting Linear Regression using `sklearn`
###Code
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
lm
###Output
_____no_output_____
###Markdown
What can you do with a LinearRegression object? ***Check out the scikit-learn [docs here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html). We have listed the main functions here. Most machine learning models in scikit-learn follow this same API of fitting a model with `fit`, making predictions with `predict` and the appropriate scoring function `score` for each model. Main functions | Description--- | --- `lm.fit()` | Fit a linear model`lm.predit()` | Predict Y using the linear model with estimated coefficients`lm.score()` | Returns the coefficient of determination (R^2). *A measure of how well observed outcomes are replicated by the model, as the proportion of total variation of outcomes explained by the model* What output can you get?
###Code
# Look inside lm object
# lm.<tab>
###Output
_____no_output_____
###Markdown
Output | Description--- | --- `lm.coef_` | Estimated coefficients`lm.intercept_` | Estimated intercept Fit a linear model***The `lm.fit()` function estimates the coefficients the linear regression using least squares.
###Code
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
###Output
_____no_output_____
###Markdown
Part 3 Checkup Exercise Set IIExercise: How would you change the model to not fit an intercept term? Would you recommend not having an intercept? Why or why not? For more information on why to include or exclude an intercept, look [here](https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faq-what-is-regression-through-the-origin/).Exercise: One of the assumptions of the linear model is that the residuals must be i.i.d. (independently and identically distributed). To satisfy this, is it enough that the residuals are normally distributed? Explain your answer.Exercise: True or false. To use linear regression, $Y$ must be normally distributed. Explain your answer. \ your turnIf we wanted to not fit an intercept, we could set the fit_intercept argument when calling LinearRegression() to =False. However, if the model predicts that there is a constant or base value of Y given all X variables to be zero, it seems we would want to capture that information. Looking at the housing example, even an empty plot of land has value regardless of the fact that there are zero rooms. It would be useful to be able to predict the value of that plot.While normally distributed residuals is ideal, it's not the only consideration. For one thing, the sample size might be too low to indicate normality, but doesn't guarantee that the residuals are not i.i.d. As for Y, the dependent variable does not need to be normally distributed. The distribution can be bi-modal, or otherwise, and still 'fit the line.' Estimated intercept and coefficientsLet's look at the estimated coefficients from the linear model using `1m.intercept_` and `lm.coef_`. After we have fit our linear regression model using the least squares method, we want to see what are the estimates of our coefficients $\beta_0$, $\beta_1$, ..., $\beta_{13}$: $$ \hat{\beta}_0, \hat{\beta}_1, \ldots, \hat{\beta}_{13} $$
###Code
print('Estimated intercept coefficient: {}'.format(lm.intercept_))
print('Number of coefficients: {}'.format(len(lm.coef_)))
# The coefficients
coef_df = pd.DataFrame({'features': X.columns, 'estimatedCoefficients': lm.coef_})[['features', 'estimatedCoefficients']]
coef_df
###Output
_____no_output_____
###Markdown
Predict Prices We can calculate the predicted prices ($\hat{Y}_i$) using `lm.predict`. $$ \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_1 + \ldots \hat{\beta}_{13} X_{13} $$
###Code
# first five predicted prices
lm.predict(X)[0:5]
###Output
_____no_output_____
###Markdown
Part 3 Checkup Exercise Set IIIExercise: Histogram: Plot a histogram of all the predicted prices. Write a story about what you see. Describe the shape, center and spread of the distribution. Are there any outliers? What might be the reason for them? Should we do anything special with them?Exercise: Scatterplot: Let's plot the true prices compared to the predicted prices to see if they disagree (we did this with `statsmodels` before).Exercise: We have looked at fitting a linear model in both `statsmodels` and `scikit-learn`. What are the advantages and disadvantages of each based on your exploration? Based on the information provided by both packages, what advantage does `statsmodels` provide?
###Code
# your turn
#histogram of predicted prices
_ = plt.hist(lm.predict(X))
_ = plt.xlabel('Predicted prices')
_ = plt.title('Predicted home values')
_ = plt.scatter(lm.predict(X), bos.PRICE, s=12)
_ = plt.xlabel('Predicted Prices')
_ = plt.ylabel('Actual Prices')
###Output
_____no_output_____
###Markdown
Evaluating the Model: Sum-of-SquaresThe partitioning of the sum-of-squares shows the variance in the predictions explained by the model and the variance that is attributed to error.$$TSS = ESS + RSS$$ Residual Sum-of-Squares (aka $RSS$)The residual sum-of-squares is one of the basic ways of quantifying how much error exists in the fitted model. We will revisit this in a bit.$$ RSS = \sum_{i=1}^N r_i^2 = \sum_{i=1}^N \left(y_i - \left(\beta_0 + \beta_1 x_i\right)\right)^2 $$
###Code
print(np.sum((bos.PRICE - lm.predict(X)) ** 2))
###Output
11078.784577954977
###Markdown
Explained Sum-of-Squares (aka $ESS$)The explained sum-of-squares measures the variance explained by the regression model.$$ESS = \sum_{i=1}^N \left( \hat{y}_i - \bar{y} \right)^2 = \sum_{i=1}^N \left( \left( \hat{\beta}_0 + \hat{\beta}_1 x_i \right) - \bar{y} \right)^2$$
###Code
print(np.sum((lm.predict(X) - np.mean(bos.PRICE)) ** 2))
###Output
31637.510837065056
###Markdown
Evaluating the Model: The Coefficient of Determination ($R^2$)The coefficient of determination, $R^2$, tells us the percentage of the variance in the response variable $Y$ that can be explained by the linear regression model.$$ R^2 = \frac{ESS}{TSS} $$The $R^2$ value is one of the most common metrics that people use in describing the quality of a model, but it is important to note that *$R^2$ increases artificially as a side-effect of increasing the number of independent variables.* While $R^2$ is reported in almost all statistical packages, another metric called the *adjusted $R^2$* is also provided as it takes into account the number of variables in the model, and can sometimes even be used for non-linear regression models!$$R_{adj}^2 = 1 - \left( 1 - R^2 \right) \frac{N - 1}{N - K - 1} = R^2 - \left( 1 - R^2 \right) \frac{K}{N - K - 1} = 1 - \frac{\frac{RSS}{DF_R}}{\frac{TSS}{DF_T}}$$where $N$ is the number of observations, $K$ is the number of variables, $DF_R = N - K - 1$ is the degrees of freedom associated with the residual error and $DF_T = N - 1$ is the degrees of the freedom of the total error. Evaluating the Model: Mean Squared Error and the $F$-Statistic***The mean squared errors are just the *averages* of the sum-of-squares errors over their respective degrees of freedom.$$MSE = \frac{RSS}{N-K-1}$$$$MSR = \frac{ESS}{K}$$**Remember:** Notation may vary across resources particularly the use of $R$ and $E$ in $RSS/ESS$ and $MSR/MSE$. In some resources, E = explained and R = residual. In other resources, E = error and R = regression (explained). **This is a very important distinction that requires looking at the formula to determine which naming scheme is being used.**Given the MSR and MSE, we can now determine whether or not the entire model we just fit is even statistically significant. We use an $F$-test for this. The null hypothesis is that all of the $\beta$ coefficients are zero, that is, none of them have any effect on $Y$. The alternative is that *at least one* $\beta$ coefficient is nonzero, but it doesn't tell us which one in a multiple regression:$$H_0: \beta_i = 0, \mbox{for all $i$} \\H_A: \beta_i > 0, \mbox{for some $i$}$$ $$F = \frac{MSR}{MSE} = \left( \frac{R^2}{1 - R^2} \right) \left( \frac{N - K - 1}{K} \right)$$ Once we compute the $F$-statistic, we can use the $F$-distribution with $N-K$ and $K-1$ degrees of degrees of freedom to get a p-value.**Warning!** The $F$-statistic mentioned in this section is NOT the same as the F1-measure or F1-value discused in Unit 7. Part 3 Checkup Exercise Set IVLet's look at the relationship between `PTRATIO` and housing price.Exercise: Try fitting a linear regression model using only the 'PTRATIO' (pupil-teacher ratio by town) and interpret the intercept and the coefficients.Exercise: Calculate (or extract) the $R^2$ value. What does it tell you?Exercise: Compute the $F$-statistic. What does it tell you?Exercise: Take a close look at the $F$-statistic and the $t$-statistic for the regression coefficient. What relationship do you notice? Note that this relationship only applies in *simple* linear regression models.
###Code
# your turn
#reshape the PTRATIO data and instatiate the regression model
X = np.array(bos['PTRATIO']).reshape(-1,1)
pt_reg = LinearRegression()
#fit and predict the model, extracting the first 5 observations
pt_reg.fit(X, bos.PRICE)
pt_reg.predict(X)[:5]
print('Intercept is {}'.format(pt_reg.intercept_))
print('Estimated coefficient is {}'.format(pt_reg.coef_))
#The R2 value is fairly low, indicating a weak predictor
print('The R2 value is {}'.format(pt_reg.score(X, bos.PRICE)))
#use the summary to find the F-statistic
f = ols('PRICE ~ PTRATIO',bos).fit()
print(f.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.258
Model: OLS Adj. R-squared: 0.256
Method: Least Squares F-statistic: 175.1
Date: Tue, 07 May 2019 Prob (F-statistic): 1.61e-34
Time: 19:39:12 Log-Likelihood: -1764.8
No. Observations: 506 AIC: 3534.
Df Residuals: 504 BIC: 3542.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
Intercept 62.3446 3.029 20.581 0.000 56.393 68.296
PTRATIO -2.1572 0.163 -13.233 0.000 -2.477 -1.837
==============================================================================
Omnibus: 92.924 Durbin-Watson: 0.725
Prob(Omnibus): 0.000 Jarque-Bera (JB): 191.444
Skew: 1.001 Prob(JB): 2.68e-42
Kurtosis: 5.252 Cond. No. 160.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Based on this summary and the previous summary from the 'Rooms' model, the t-statistic and the F-statistic seem to be correlated. Part 3 Checkup Exercise Set VFit a linear regression model using three independent variables 'CRIM' (per capita crime rate by town) 'RM' (average number of rooms per dwelling) 'PTRATIO' (pupil-teacher ratio by town)Exercise: Compute or extract the $F$-statistic. What does it tell you about the model?Exercise: Compute or extract the $R^2$ statistic. What does it tell you about the model?Exercise: Which variables in the model are significant in predicting house price? Write a story that interprets the coefficients.
###Code
# your turn
from sklearn.metrics import r2_score
X = bos[['CRIM', 'RM', 'PTRATIO']]
reg_3 = LinearRegression()
#Fit the three variables against price
reg_3.fit(X, bos.PRICE)
r2 = r2_score(bos.PRICE, reg_3.predict(X))
r2
X = bos[['CRIM', 'RM', 'PTRATIO']]
f_3 = ols('PRICE ~ X', bos).fit()
f_3.summary()
###Output
_____no_output_____
###Markdown
The model performs better with multiple predictor variables.
###Code
#Re-view the coefficients table
coef_df
###Output
_____no_output_____
###Markdown
The strongest coefficients appear to be RM and NOX. Even though NOX is negative, that information is still useful. Part 4: Comparing Models During modeling, there will be times when we want to compare models to see which one is more predictive or fits the data better. There are many ways to compare models, but we will focus on two. The $F$-Statistic RevisitedThe $F$-statistic can also be used to compare two *nested* models, that is, two models trained on the same dataset where one of the models contains a *subset* of the variables of the other model. The *full* model contains $K$ variables and the *reduced* model contains a subset of these $K$ variables. This allows us to add additional variables to a base model and then test if adding the variables helped the model fit.$$F = \frac{\left( \frac{RSS_{reduced} - RSS_{full}}{DF_{reduced} - DF_{full}} \right)}{\left( \frac{RSS_{full}}{DF_{full}} \right)}$$where $DF_x = N - K_x - 1$ where $K_x$ is the number of variables in model $x$. Akaike Information Criterion (AIC)Another statistic for comparing two models is AIC, which is based on the likelihood function and takes into account the number of variables in the model.$$AIC = 2 K - 2 \log_e{L}$$where $L$ is the likelihood of the model. AIC is meaningless in the absolute sense, and is only meaningful when compared to AIC values from other models. Lower values of AIC indicate better fitting models.`statsmodels` provides the AIC in its output. Part 4 Checkup ExercisesExercise: Find another variable (or two) to add to the model we built in Part 3. Compute the $F$-test comparing the two models as well as the AIC. Which model is better?
###Code
X = bos[['CRIM', 'RM', 'PTRATIO', 'NOX', 'CHAS']]
f_5 = ols('PRICE ~ X', bos).fit()
f_5.summary()
###Output
_____no_output_____
###Markdown
With more variables the F-statistic is lower, but R-squared and AIC indicate a better fit. Part 5: Evaluating the Model via Model Assumptions and Other Issues***Linear regression makes several assumptions. It is always best to check that these assumptions are valid after fitting a linear regression model. **Linearity**. The dependent variable $Y$ is a linear combination of the regression coefficients and the independent variables $X$. This can be verified with a scatterplot of each $X$ vs. $Y$ and plotting correlations among $X$. Nonlinearity can sometimes be resolved by [transforming](https://onlinecourses.science.psu.edu/stat501/node/318) one or more independent variables, the dependent variable, or both. In other cases, a [generalized linear model](https://en.wikipedia.org/wiki/Generalized_linear_model) or a [nonlinear model](https://en.wikipedia.org/wiki/Nonlinear_regression) may be warranted. **Constant standard deviation**. The SD of the dependent variable $Y$ should be constant for different values of X. We can check this by plotting each $X$ against $Y$ and verifying that there is no "funnel" shape showing data points fanning out as $X$ increases or decreases. Some techniques for dealing with non-constant variance include weighted least squares (WLS), [robust standard errors](https://en.wikipedia.org/wiki/Heteroscedasticity-consistent_standard_errors), or variance stabilizing transformations. **Normal distribution for errors**. The $\epsilon$ term we discussed at the beginning are assumed to be normally distributed. This can be verified with a fitted values vs. residuals plot and verifying that there is no pattern, and with a quantile plot. $$ \epsilon_i \sim N(0, \sigma^2)$$Sometimes the distributions of responses $Y$ may not be normally distributed at any given value of $X$. e.g. skewed positively or negatively. **Independent errors**. The observations are assumed to be obtained independently. e.g. Observations across time may be correlated There are some other issues that are important investigate with linear regression models. **Correlated Predictors:** Care should be taken to make sure that the independent variables in a regression model are not too highly correlated. Correlated predictors typically do not majorly affect prediction, but do inflate standard errors of coefficients making interpretation unreliable. Common solutions are dropping the least important variables involved in the correlations, using regularlization, or, when many predictors are highly correlated, considering a dimension reduction technique such as principal component analysis (PCA). **Influential Points:** Data points that have undue influence on the regression model. These points can be high leverage points or outliers. Such points are typically removed and the regression model rerun. Part 5 Checkup ExercisesTake the reduced model from Part 3 to answer the following exercises. Take a look at [this blog post](http://mpastell.com/2013/04/19/python_regression/) for more information on using statsmodels to construct these plots. Exercise: Construct a fitted values versus residuals plot. What does the plot tell you? Are there any violations of the model assumptions?Exercise: Construct a quantile plot of the residuals. What does the plot tell you?Exercise: What are some advantages and disadvantages of the fitted vs. residual and quantile plot compared to each other?Exercise: Identify any outliers (if any) in your model and write a story describing what these outliers might represent.Exercise: Construct a leverage plot and identify high leverage points in the model. Write a story explaining possible reasons for the high leverage points.Exercise: Remove the outliers and high leverage points from your model and run the regression again. How do the results change?
###Code
# Your turn.
X_3 = bos[['CRIM', 'RM', 'PTRATIO']]
f_3 = ols('PRICE ~ X_3', bos).fit()
f_3.summary()
_ = plt.hist(f_3.resid)
_ = plt.title('Residual Histogram', fontsize=16)
import scipy.stats as stats
stats.probplot(f_3.resid, plot=plt)
plt.show()
from statsmodels.graphics.regressionplots import *
plot_leverage_resid2(f_3)
bos_adj = bos.drop(bos.index[[380,418, 405, 367, 373, 365, 373, 369, 371, 372, 368]])
res_adj = ols('PRICE ~ CRIM + RM + PTRATIO', bos_adj).fit()
res_adj.summary()
###Output
_____no_output_____
###Markdown
Regression in Python***This is a very quick run-through of some basic statistical concepts, adapted from [Lab 4 in Harvard's CS109](https://github.com/cs109/2015lab4) course. Please feel free to try the original lab if you're feeling ambitious :-) The CS109 git repository also has the solutions if you're stuck.* Linear Regression Models* Prediction using linear regression* Some re-sampling methods * Train-Test splits * Cross ValidationLinear regression is used to model and predict continuous outcomes while logistic regression is used to model binary outcomes. We'll see some examples of linear regression as well as Train-test splits.The packages we'll cover are: `statsmodels`, `seaborn`, and `scikit-learn`. While we don't explicitly teach `statsmodels` and `seaborn` in the Springboard workshop, those are great libraries to know.*** ***
###Code
# special IPython command to prepare the notebook for matplotlib and other libraries
%pylab inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
*** Part 1: Linear Regression Purpose of linear regression*** Given a dataset $X$ and $Y$, linear regression can be used to: Build a predictive model to predict future values of $X_i$ without a $Y$ value. Model the strength of the relationship between each dependent variable $X_i$ and $Y$ Sometimes not all $X_i$ will have a relationship with $Y$ Need to figure out which $X_i$ contributes most information to determine $Y$ Linear regression is used in so many applications that I won't warrant this with examples. It is in many cases, the first pass prediction algorithm for continuous outcomes. A brief recap (feel free to skip if you don't care about the math)***[Linear Regression](http://en.wikipedia.org/wiki/Linear_regression) is a method to model the relationship between a set of independent variables $X$ (also knowns as explanatory variables, features, predictors) and a dependent variable $Y$. This method assumes the relationship between each predictor $X$ is linearly related to the dependent variable $Y$. $$ Y = \beta_0 + \beta_1 X + \epsilon$$where $\epsilon$ is considered as an unobservable random variable that adds noise to the linear relationship. This is the simplest form of linear regression (one variable), we'll call this the simple model. * $\beta_0$ is the intercept of the linear model* Multiple linear regression is when you have more than one independent variable * $X_1$, $X_2$, $X_3$, $\ldots$$$ Y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$ * Back to the simple model. The model in linear regression is the *conditional mean* of $Y$ given the values in $X$ is expressed a linear function. $$ y = f(x) = E(Y | X = x)$$ http://www.learner.org/courses/againstallodds/about/glossary.html* The goal is to estimate the coefficients (e.g. $\beta_0$ and $\beta_1$). We represent the estimates of the coefficients with a "hat" on top of the letter. $$ \hat{\beta}_0, \hat{\beta}_1 $$* Once you estimate the coefficients $\hat{\beta}_0$ and $\hat{\beta}_1$, you can use these to predict new values of $Y$$$\hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1$$* How do you estimate the coefficients? * There are many ways to fit a linear regression model * The method called **least squares** is one of the most common methods * We will discuss least squares today Estimating $\hat\beta$: Least squares***[Least squares](http://en.wikipedia.org/wiki/Least_squares) is a method that can estimate the coefficients of a linear model by minimizing the difference between the following: $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$where $N$ is the number of observations. * We will not go into the mathematical details, but the least squares estimates $\hat{\beta}_0$ and $\hat{\beta}_1$ minimize the sum of the squared residuals $r_i = y_i - (\beta_0 + \beta_1 x_i)$ in the model (i.e. makes the difference between the observed $y_i$ and linear model $\beta_0 + \beta_1 x_i$ as small as possible). The solution can be written in compact matrix notation as$$\hat\beta = (X^T X)^{-1}X^T Y$$ We wanted to show you this in case you remember linear algebra, in order for this solution to exist we need $X^T X$ to be invertible. Of course this requires a few extra assumptions, $X$ must be full rank so that $X^T X$ is invertible, etc. **This is important for us because this means that having redundant features in our regression models will lead to poorly fitting (and unstable) models.** We'll see an implementation of this in the extra linear regression example.**Note**: The "hat" means it is an estimate of the coefficient. *** Part 2: Boston Housing Data SetThe [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing) contains information about the housing values in suburbs of Boston. This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University and is now available on the UCI Machine Learning Repository. Load the Boston Housing data set from `sklearn`***This data set is available in the [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.htmlsklearn.datasets.load_boston) python module which is how we will access it today.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print boston.feature_names
# Print description of Boston housing data set
print boston.DESCR
###Output
Boston House Prices dataset
===========================
Notes
------
Data Set Characteristics:
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive
:Median Value (attribute 14) is usually the target
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
http://archive.ics.uci.edu/ml/datasets/Housing
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
**References**
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
- many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)
###Markdown
Now let's explore the data set itself.
###Code
bos = pd.DataFrame(boston.data)
bos.head()
###Output
_____no_output_____
###Markdown
There are no column names in the DataFrame. Let's add those.
###Code
bos.columns = boston.feature_names
bos.head()
###Output
_____no_output_____
###Markdown
Now we have a pandas DataFrame called `bos` containing all the data we want to use to predict Boston Housing prices. Let's create a variable called `PRICE` which will contain the prices. This information is contained in the `target` data.
###Code
print boston.target.shape
bos['PRICE'] = boston.target
bos.head()
###Output
_____no_output_____
###Markdown
EDA and Summary Statistics***Let's explore this data set. First we use `describe()` to get basic summary statistics for each of the columns.
###Code
bos.describe()
###Output
_____no_output_____
###Markdown
Scatter plots***Let's look at some scatter plots for three variables: 'CRIM', 'RM' and 'PTRATIO'. What kind of relationship do you see? e.g. positive, negative? linear? non-linear?
###Code
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
###Output
_____no_output_____
###Markdown
**Your turn**: Create scatter plots between *RM* and *PRICE*, and *PTRATIO* and *PRICE*. What do you notice?
###Code
#your turn: scatter plot between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE)
plt.xlabel("Average Number of Rooms per Dwelling")
plt.ylabel("Housing Price")
plt.title("Relationship between No. of Rooms and Price");
#your turn: scatter plot between *PTRATIO* and *PRICE*
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("Pupil-Teacher Ratio by town")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRatio and Price");
###Output
_____no_output_____
###Markdown
**Your turn**: What are some other numeric variables of interest? Plot scatter plots with these variables and *PRICE*.
###Code
#your turn: create some other scatter plots
###Output
_____no_output_____
###Markdown
Scatter Plots using Seaborn***[Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) is a cool Python plotting library built on top of matplotlib. It provides convenient syntax and shortcuts for many common types of plots, along with better-looking defaults.We can also use [seaborn regplot](https://stanford.edu/~mwaskom/software/seaborn/tutorial/regression.htmlfunctions-to-draw-linear-regression-models) for the scatterplot above. This provides automatic linear regression fits (useful for data exploration later on). Here's one example below.
###Code
sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Histograms*** Histograms are a useful way to visually summarize the statistical properties of numeric variables. They can give you an idea of the mean and the spread of the variables as well as outliers.
###Code
plt.hist(bos.CRIM)
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
**Your turn**: Plot separate histograms and one for *RM*, one for *PTRATIO*. Any interesting observations?
###Code
#your turn
plt.hist(bos.PTRATIO)
plt.title("PTRATIO")
plt.xlabel("Pupil-Teacher Ratio")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
Linear regression with Boston housing data example***Here, $Y$ = boston housing prices (also called "target" data in python)and$X$ = all the other features (or independent variables)which we will use to fit a linear regression model and predict Boston housing prices. We will use the least squares method as the way to estimate the coefficients. We'll use two ways of fitting a linear regression. We recommend the first but the second is also powerful in its features. Fitting Linear Regression using `statsmodels`***[Statsmodels](http://statsmodels.sourceforge.net/) is a great Python library for a lot of basic and inferential statistics. It also provides basic regression functions using an R-like syntax, so it's commonly used by statisticians. While we don't cover statsmodels officially in the Data Science Intensive, it's a good library to have in your toolbox. Here's a quick example of what you could do with it.
###Code
# Import regression modules
# ols - stands for Ordinary least squares, we'll use this
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print m.summary()
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.484
Model: OLS Adj. R-squared: 0.483
Method: Least Squares F-statistic: 471.8
Date: Tue, 04 Jul 2017 Prob (F-statistic): 2.49e-74
Time: 13:38:43 Log-Likelihood: -1673.1
No. Observations: 506 AIC: 3350.
Df Residuals: 504 BIC: 3359.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [95.0% Conf. Int.]
------------------------------------------------------------------------------
Intercept -34.6706 2.650 -13.084 0.000 -39.877 -29.465
RM 9.1021 0.419 21.722 0.000 8.279 9.925
==============================================================================
Omnibus: 102.585 Durbin-Watson: 0.684
Prob(Omnibus): 0.000 Jarque-Bera (JB): 612.449
Skew: 0.726 Prob(JB): 1.02e-133
Kurtosis: 8.190 Cond. No. 58.4
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Interpreting coefficientsThere is a ton of information in this output. But we'll concentrate on the coefficient table (middle table). We can interpret the `RM` coefficient (9.1021) by first noticing that the p-value (under `P>|t|`) is so small, basically zero. We can interpret the coefficient as, if we compare two groups of towns, one where the average number of rooms is say $5$ and the other group is the same except that they all have $6$ rooms. For these two groups the average difference in house prices is about $9.1$ (in thousands) so about $\$9,100$ difference. The confidence interval fives us a range of plausible values for this difference, about ($\$8,279, \$9,925$), deffinitely not chump change. `statsmodels` formulas***This formula notation will seem familiar to `R` users, but will take some getting used to for people coming from other languages or are new to statistics.The formula gives instruction for a general structure for a regression call. For `statsmodels` (`ols` or `logit`) calls you need to have a Pandas dataframe with column names that you will add to your formula. In the below example you need a pandas data frame that includes the columns named (`Outcome`, `X1`,`X2`, ...), bbut you don't need to build a new dataframe for every regression. Use the same dataframe with all these things in it. The structure is very simple:`Outcome ~ X1`But of course we want to to be able to handle more complex models, for example multiple regression is doone like this:`Outcome ~ X1 + X2 + X3`This is the very basic structure but it should be enough to get you through the homework. Things can get much more complex, for a quick run-down of further uses see the `statsmodels` [help page](http://statsmodels.sourceforge.net/devel/example_formulas.html). Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines. **Your turn:** Create a scatterpot between the predicted prices, available in `m.fittedvalues` and the original prices. How does the plot look?
###Code
# your turn
plt.scatter(m.fittedvalues, bos.PRICE)
plt.xlabel("Predicted Price")
plt.ylabel("Original Housing Price")
plt.title("Predicted vs. Original Prices");
###Output
_____no_output_____
###Markdown
Fitting Linear Regression using `sklearn`
###Code
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
lm
###Output
_____no_output_____
###Markdown
What can you do with a LinearRegression object? ***Check out the scikit-learn [docs here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html). We have listed the main functions here. Main functions | Description--- | --- `lm.fit()` | Fit a linear model`lm.predit()` | Predict Y using the linear model with estimated coefficients`lm.score()` | Returns the coefficient of determination (R^2). *A measure of how well observed outcomes are replicated by the model, as the proportion of total variation of outcomes explained by the model* What output can you get?
###Code
# Look inside lm object
#lm.
###Output
_____no_output_____
###Markdown
Output | Description--- | --- `lm.coef_` | Estimated coefficients`lm.intercept_` | Estimated intercept Fit a linear model***The `lm.fit()` function estimates the coefficients the linear regression using least squares.
###Code
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
###Output
_____no_output_____
###Markdown
**Your turn:** How would you change the model to not fit an intercept term? Would you recommend not having an intercept? Estimated intercept and coefficientsLet's look at the estimated coefficients from the linear model using `1m.intercept_` and `lm.coef_`. After we have fit our linear regression model using the least squares method, we want to see what are the estimates of our coefficients $\beta_0$, $\beta_1$, ..., $\beta_{13}$: $$ \hat{\beta}_0, \hat{\beta}_1, \ldots, \hat{\beta}_{13} $$
###Code
print 'Estimated intercept coefficient:', lm.intercept_
print 'Number of coefficients:', len(lm.coef_)
# The coefficients
pd.DataFrame(zip(X.columns, lm.coef_), columns = ['features', 'estimatedCoefficients'])
###Output
_____no_output_____
###Markdown
Predict Prices We can calculate the predicted prices ($\hat{Y}_i$) using `lm.predict`. $$ \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_1 + \ldots \hat{\beta}_{13} X_{13} $$
###Code
# first five predicted prices
lm.predict(X)[0:5]
###Output
_____no_output_____
###Markdown
**Your turn:** * Histogram: Plot a histogram of all the predicted prices* Scatter Plot: Let's plot the true prices compared to the predicted prices to see they disagree (we did this with `statsmodels` before).
###Code
# your turn
plt.hist(lm.predict(X))
plt.title("Predicted Prices")
plt.xlabel("Price")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
Residual sum of squaresLet's calculate the residual sum of squares $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$
###Code
print np.sum((bos.PRICE - lm.predict(X)) ** 2)
###Output
11080.2762841
###Markdown
Mean squared error***This is simple the mean of the residual sum of squares.**Your turn:** Calculate the mean squared error and print it.
###Code
#your turn
print np.mean((bos.PRICE - lm.predict(X)) ** 2)
###Output
21.8977792177
###Markdown
Relationship between `PTRATIO` and housing price***Try fitting a linear regression model using only the 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
lm = LinearRegression()
lm.fit(X[['PTRATIO']], bos.PRICE)
#pd.DataFrame(zip(X.columns, lm.coef_), columns = ['features', 'estimatedCoefficients'])
print np.mean((bos.PRICE - lm.predict(X[['PTRATIO']])) ** 2)
msePTRATIO = np.mean((bos.PRICE - lm.predict(X[['PTRATIO']])) ** 2)
print msePTRATIO
###Output
62.6522000138
###Markdown
We can also plot the fitted linear regression line.
###Code
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("Pupil-to-Teacher Ratio (PTRATIO)")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRATIO and Price")
plt.plot(bos.PTRATIO, lm.predict(X[['PTRATIO']]), color='blue', linewidth=3)
plt.show()
###Output
_____no_output_____
###Markdown
Your turn***Try fitting a linear regression model using three independent variables1. 'CRIM' (per capita crime rate by town)2. 'RM' (average number of rooms per dwelling)3. 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
# your turn
lm = LinearRegression()
lm.fit(X[['CRIM']], bos.PRICE)
print np.mean((bos.PRICE - lm.predict(X[['CRIM']])) ** 2)
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Crime per Capita")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
plt.plot(bos.CRIM, lm.predict(X[['CRIM']]), color='blue', linewidth=3)
plt.show()
###Output
_____no_output_____
###Markdown
Other important things to think about when fitting a linear regression model*** **Linearity**. The dependent variable $Y$ is a linear combination of the regression coefficients and the independent variables $X$. **Constant standard deviation**. The SD of the dependent variable $Y$ should be constant for different values of X. e.g. PTRATIO **Normal distribution for errors**. The $\epsilon$ term we discussed at the beginning are assumed to be normally distributed. $$ \epsilon_i \sim N(0, \sigma^2)$$Sometimes the distributions of responses $Y$ may not be normally distributed at any given value of $X$. e.g. skewed positively or negatively. **Independent errors**. The observations are assumed to be obtained independently. e.g. Observations across time may be correlated Part 3: Training and Test Data sets Purpose of splitting data into Training/testing sets*** Let's stick to the linear regression example: We built our model with the requirement that the model fit the data well. As a side-effect, the model will fit THIS dataset well. What about new data? We wanted the model for predictions, right? One simple solution, leave out some data (for testing) and train the model on the rest This also leads directly to the idea of cross-validation, next section. ***One way of doing this is you can create training and testing data sets manually.
###Code
X_train = X[:-50]
X_test = X[-50:]
Y_train = bos.PRICE[:-50]
Y_test = bos.PRICE[-50:]
print X_train.shape
print X_test.shape
print Y_train.shape
print Y_test.shape
X_train.head()
###Output
(456, 13)
(50, 13)
(456,)
(50,)
###Markdown
Another way, is to split the data into random train and test subsets using the function `train_test_split` in `sklearn.cross_validation`. Here's the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html).
###Code
#X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
# X, bos.PRICE, test_size=0.33, random_state = 5)
print X_train.shape
print X_test.shape
print Y_train.shape
print Y_test.shape
X_train
###Output
(456, 13)
(50, 13)
(456,)
(50,)
###Markdown
**Your turn:** Let's build a linear regression model using our new training data sets. * Fit a linear regression model to the training set* Predict the output on the test set
###Code
# your turn
lm = LinearRegression()
lm.fit(X_train[['RM']], Y_train)
print np.mean((Y_train - lm.predict(X_train[['RM']])) ** 2)
plt.scatter(X_train.RM, Y_train)
plt.xlabel("No. of Rooms")
plt.ylabel("Housing Price")
plt.title("Relationship between No. of Rooms and Price")
plt.plot(X_train.RM, lm.predict(X_train[['RM']]), color='blue', linewidth=3)
plt.show()
###Output
46.2932242688
###Markdown
**Your turn:**Calculate the mean squared error * using just the test data* using just the training dataAre they pretty similar or very different? What does that mean?
###Code
# your turn
lm = LinearRegression()
lm.fit(X_test[['RM']], Y_test)
print np.mean((Y_test - lm.predict(X_test[['RM']])) ** 2)
plt.scatter(X_test.RM, Y_test)
plt.xlabel("No. of Rooms")
plt.ylabel("Housing Price")
plt.title("Relationship between No. of Rooms and Price")
plt.plot(X_test.RM, lm.predict(X_test[['RM']]), color='blue', linewidth=3)
plt.show()
###Output
12.9000419523
###Markdown
Residual plots
###Code
plt.scatter(lm.predict(X_train), lm.predict(X_train) - Y_train, c='b', s=40, alpha=0.5)
plt.scatter(lm.predict(X_test), lm.predict(X_test) - Y_test, c='g', s=40)
plt.hlines(y = 0, xmin=0, xmax = 50)
plt.title('Residual Plot using training (blue) and test (green) data')
plt.ylabel('Residuals')
###Output
_____no_output_____
###Markdown
Regression in Python***This is a very quick run-through of some basic statistical concepts, adapted from [Lab 4 in Harvard's CS109](https://github.com/cs109/2015lab4) course. Please feel free to try the original lab if you're feeling ambitious :-) The CS109 git repository also has the solutions if you're stuck.* Linear Regression Models* Prediction using linear regression* Some re-sampling methods * Train-Test splits * Cross ValidationLinear regression is used to model and predict continuous outcomes while logistic regression is used to model binary outcomes. We'll see some examples of linear regression as well as Train-test splits.The packages we'll cover are: `statsmodels`, `seaborn`, and `scikit-learn`. While we don't explicitly teach `statsmodels` and `seaborn` in the Springboard workshop, those are great libraries to know.*** ***
###Code
# special IPython command to prepare the notebook for matplotlib and other libraries
%pylab inline
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
# special matplotlib argument for improved plots
from matplotlib import rcParams
sns.set_style("whitegrid")
sns.set_context("poster")
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
*** Part 1: Linear Regression Purpose of linear regression*** Given a dataset $X$ and $Y$, linear regression can be used to: Build a predictive model to predict future values of $X_i$ without a $Y$ value. Model the strength of the relationship between each independent variable $X_i$ and $Y$ Sometimes not all $X_i$ will have a relationship with $Y$ Need to figure out which $X_i$ contributes most information to determine $Y$ Linear regression is used in so many applications that I won't warrant this with examples. It is in many cases, the first pass prediction algorithm for continuous outcomes. A brief recap (feel free to skip if you don't care about the math)***[Linear Regression](http://en.wikipedia.org/wiki/Linear_regression) is a method to model the relationship between a set of independent variables $X$ (also knowns as explanatory variables, features, predictors) and a dependent variable $Y$. This method assumes the relationship between each predictor $X$ is linearly related to the dependent variable $Y$. $$ Y = \beta_0 + \beta_1 X + \epsilon$$where $\epsilon$ is considered as an unobservable random variable that adds noise to the linear relationship. This is the simplest form of linear regression (one variable), we'll call this the simple model. * $\beta_0$ is the intercept of the linear model* Multiple linear regression is when you have more than one independent variable * $X_1$, $X_2$, $X_3$, $\ldots$$$ Y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$ * Back to the simple model. The model in linear regression is the *conditional mean* of $Y$ given the values in $X$ is expressed a linear function. $$ y = f(x) = E(Y | X = x)$$ http://www.learner.org/courses/againstallodds/about/glossary.html* The goal is to estimate the coefficients (e.g. $\beta_0$ and $\beta_1$). We represent the estimates of the coefficients with a "hat" on top of the letter. $$ \hat{\beta}_0, \hat{\beta}_1 $$* Once you estimate the coefficients $\hat{\beta}_0$ and $\hat{\beta}_1$, you can use these to predict new values of $Y$$$\hat{y} = \hat{\beta}_0 + \hat{\beta}_1 x_1$$* How do you estimate the coefficients? * There are many ways to fit a linear regression model * The method called **least squares** is one of the most common methods * We will discuss least squares today Estimating $\hat\beta$: Least squares***[Least squares](http://en.wikipedia.org/wiki/Least_squares) is a method that can estimate the coefficients of a linear model by minimizing the difference between the following: $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$where $N$ is the number of observations. * We will not go into the mathematical details, but the least squares estimates $\hat{\beta}_0$ and $\hat{\beta}_1$ minimize the sum of the squared residuals $r_i = y_i - (\beta_0 + \beta_1 x_i)$ in the model (i.e. makes the difference between the observed $y_i$ and linear model $\beta_0 + \beta_1 x_i$ as small as possible). The solution can be written in compact matrix notation as$$\hat\beta = (X^T X)^{-1}X^T Y$$ We wanted to show you this in case you remember linear algebra, in order for this solution to exist we need $X^T X$ to be invertible. Of course this requires a few extra assumptions, $X$ must be full rank so that $X^T X$ is invertible, etc. **This is important for us because this means that having redundant features in our regression models will lead to poorly fitting (and unstable) models.** We'll see an implementation of this in the extra linear regression example.**Note**: The "hat" means it is an estimate of the coefficient. *** Part 2: Boston Housing Data SetThe [Boston Housing data set](https://archive.ics.uci.edu/ml/datasets/Housing) contains information about the housing values in suburbs of Boston. This dataset was originally taken from the StatLib library which is maintained at Carnegie Mellon University and is now available on the UCI Machine Learning Repository. Load the Boston Housing data set from `sklearn`***This data set is available in the [sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.htmlsklearn.datasets.load_boston) python module which is how we will access it today.
###Code
from sklearn.datasets import load_boston
boston = load_boston()
boston.keys()
boston.data.shape
# Print column names
print(boston.feature_names)
# Print description of Boston housing data set
print(boston.DESCR)
###Output
Boston House Prices dataset
===========================
Notes
------
Data Set Characteristics:
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive
:Median Value (attribute 14) is usually the target
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
http://archive.ics.uci.edu/ml/datasets/Housing
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
**References**
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
- many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)
###Markdown
Now let's explore the data set itself.
###Code
bos = pd.DataFrame(boston.data)
bos.head()
###Output
_____no_output_____
###Markdown
There are no column names in the DataFrame. Let's add those.
###Code
bos.columns = boston.feature_names
bos.head()
###Output
_____no_output_____
###Markdown
Now we have a pandas DataFrame called `bos` containing all the data we want to use to predict Boston Housing prices. Let's create a variable called `PRICE` which will contain the prices. This information is contained in the `target` data.
###Code
print(boston.target.shape)
bos['PRICE'] = boston.target
bos.head()
###Output
_____no_output_____
###Markdown
EDA and Summary Statistics***Let's explore this data set. First we use `describe()` to get basic summary statistics for each of the columns.
###Code
bos.describe()
###Output
_____no_output_____
###Markdown
Scatter plots***Let's look at some scatter plots for three variables: 'CRIM', 'RM' and 'PTRATIO'. What kind of relationship do you see? e.g. positive, negative? linear? non-linear?
###Code
plt.scatter(bos.CRIM, bos.PRICE)
plt.xlabel("Per capita crime rate by town (CRIM)")
plt.ylabel("Housing Price")
plt.title("Relationship between CRIM and Price")
###Output
_____no_output_____
###Markdown
**Your turn**: Create scatter plots between *RM* and *PRICE*, and *PTRATIO* and *PRICE*. What do you notice?
###Code
#your turn: scatter plot between *RM* and *PRICE*
plt.scatter(bos.RM, bos.PRICE)
plt.xlabel("Average number of rooms per dwelling")
plt.ylabel("Housing Price")
plt.title("Relationship between RM and Price")
#your turn: scatter plot between *PTRATIO* and *PRICE*
sns.regplot(y="PRICE", x="PTRATIO", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
**Your turn**: What are some other numeric variables of interest? Plot scatter plots with these variables and *PRICE*.
###Code
#your turn: create some other scatter plots
sns.regplot(y="PRICE", x="TAX", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Scatter Plots using Seaborn***[Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) is a cool Python plotting library built on top of matplotlib. It provides convenient syntax and shortcuts for many common types of plots, along with better-looking defaults.We can also use [seaborn regplot](https://stanford.edu/~mwaskom/software/seaborn/tutorial/regression.htmlfunctions-to-draw-linear-regression-models) for the scatterplot above. This provides automatic linear regression fits (useful for data exploration later on). Here's one example below.
###Code
sns.regplot(y="PRICE", x="RM", data=bos, fit_reg = True)
###Output
_____no_output_____
###Markdown
Histograms*** Histograms are a useful way to visually summarize the statistical properties of numeric variables. They can give you an idea of the mean and the spread of the variables as well as outliers.
###Code
plt.hist(bos.CRIM)
plt.title("CRIM")
plt.xlabel("Crime rate per capita")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
**Your turn**: Plot separate histograms and one for *RM*, one for *PTRATIO*. Any interesting observations?
###Code
#your turn
plt.hist(bos.RM)
plt.title("Avg Rooms")
plt.xlabel("Average number of rooms per dwelling")
plt.ylabel("Frequency")
plt.show()
plt.hist(bos.PTRATIO)
plt.title("Pupil-Teacher Ratio")
plt.xlabel("Pupil-Teacher Ratio")
plt.ylabel("Frequency")
plt.show()
###Output
_____no_output_____
###Markdown
The distribution of average rooms per dwelling is approximately a gaussian curve distribution whereas the pupil teacher ratio is more of a bimodal distribution. Linear regression with Boston housing data example***Here, $Y$ = boston housing prices (also called "target" data in python)and$X$ = all the other features (or independent variables)which we will use to fit a linear regression model and predict Boston housing prices. We will use the least squares method as the way to estimate the coefficients. We'll use two ways of fitting a linear regression. We recommend the first but the second is also powerful in its features. Fitting Linear Regression using `statsmodels`***[Statsmodels](http://statsmodels.sourceforge.net/) is a great Python library for a lot of basic and inferential statistics. It also provides basic regression functions using an R-like syntax, so it's commonly used by statisticians. While we don't cover statsmodels officially in the Data Science Intensive, it's a good library to have in your toolbox. Here's a quick example of what you could do with it.
###Code
# Import regression modules
# ols - stands for Ordinary least squares, we'll use this
import statsmodels.api as sm
from statsmodels.formula.api import ols
# statsmodels works nicely with pandas dataframes
# The thing inside the "quotes" is called a formula, a bit on that below
m = ols('PRICE ~ RM',bos).fit()
print(m.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: PRICE R-squared: 0.484
Model: OLS Adj. R-squared: 0.483
Method: Least Squares F-statistic: 471.8
Date: Fri, 16 Jun 2017 Prob (F-statistic): 2.49e-74
Time: 20:28:10 Log-Likelihood: -1673.1
No. Observations: 506 AIC: 3350.
Df Residuals: 504 BIC: 3359.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [95.0% Conf. Int.]
------------------------------------------------------------------------------
Intercept -34.6706 2.650 -13.084 0.000 -39.877 -29.465
RM 9.1021 0.419 21.722 0.000 8.279 9.925
==============================================================================
Omnibus: 102.585 Durbin-Watson: 0.684
Prob(Omnibus): 0.000 Jarque-Bera (JB): 612.449
Skew: 0.726 Prob(JB): 1.02e-133
Kurtosis: 8.190 Cond. No. 58.4
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Interpreting coefficientsThere is a ton of information in this output. But we'll concentrate on the coefficient table (middle table). We can interpret the `RM` coefficient (9.1021) by first noticing that the p-value (under `P>|t|`) is so small, basically zero. We can interpret the coefficient as, if we compare two groups of towns, one where the average number of rooms is say $5$ and the other group is the same except that they all have $6$ rooms. For these two groups the average difference in house prices is about $9.1$ (in thousands) so about $\$9,100$ difference. The confidence interval fives us a range of plausible values for this difference, about ($\$8,279, \$9,925$), deffinitely not chump change. `statsmodels` formulas***This formula notation will seem familiar to `R` users, but will take some getting used to for people coming from other languages or are new to statistics.The formula gives instruction for a general structure for a regression call. For `statsmodels` (`ols` or `logit`) calls you need to have a Pandas dataframe with column names that you will add to your formula. In the below example you need a pandas data frame that includes the columns named (`Outcome`, `X1`,`X2`, ...), bbut you don't need to build a new dataframe for every regression. Use the same dataframe with all these things in it. The structure is very simple:`Outcome ~ X1`But of course we want to to be able to handle more complex models, for example multiple regression is doone like this:`Outcome ~ X1 + X2 + X3`This is the very basic structure but it should be enough to get you through the homework. Things can get much more complex, for a quick run-down of further uses see the `statsmodels` [help page](http://statsmodels.sourceforge.net/devel/example_formulas.html). Let's see how our model actually fit our data. We can see below that there is a ceiling effect, we should probably look into that. Also, for large values of $Y$ we get underpredictions, most predictions are below the 45-degree gridlines. **Your turn:** Create a scatterpot between the predicted prices, available in `m.fittedvalues` and the original prices. How does the plot look?
###Code
# your turn
predicted_variables=m.fittedvalues
plt.scatter(predicted_variables, bos.PRICE)
plt.xlabel("Predicted_Prices")
plt.ylabel("Housing Price")
plt.title("Predicted vs Original price")
###Output
_____no_output_____
###Markdown
Fitting Linear Regression using `sklearn`
###Code
from sklearn.linear_model import LinearRegression
X = bos.drop('PRICE', axis = 1)
# This creates a LinearRegression object
lm = LinearRegression()
lm
###Output
_____no_output_____
###Markdown
What can you do with a LinearRegression object? ***Check out the scikit-learn [docs here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html). We have listed the main functions here. Main functions | Description--- | --- `lm.fit()` | Fit a linear model`lm.predit()` | Predict Y using the linear model with estimated coefficients`lm.score()` | Returns the coefficient of determination (R^2). *A measure of how well observed outcomes are replicated by the model, as the proportion of total variation of outcomes explained by the model* What output can you get?
###Code
# Look inside lm object
#lm.coef
#lm.intercept
###Output
_____no_output_____
###Markdown
Output | Description--- | --- `lm.coef_` | Estimated coefficients`lm.intercept_` | Estimated intercept Fit a linear model***The `lm.fit()` function estimates the coefficients the linear regression using least squares.
###Code
# Use all 13 predictors to fit linear regression model
lm.fit(X, bos.PRICE)
lm.coef_
###Output
_____no_output_____
###Markdown
**Your turn:** How would you change the model to not fit an intercept term? Would you recommend not having an intercept?
###Code
cm=lm = LinearRegression(fit_intercept=False)
cm.fit(X, bos.PRICE)
###Output
_____no_output_____
###Markdown
You should never assume to say that the intercept passes through the origin and hence it is always recommended to having an intercept. Estimated intercept and coefficientsLet's look at the estimated coefficients from the linear model using `1m.intercept_` and `lm.coef_`. After we have fit our linear regression model using the least squares method, we want to see what are the estimates of our coefficients $\beta_0$, $\beta_1$, ..., $\beta_{13}$: $$ \hat{\beta}_0, \hat{\beta}_1, \ldots, \hat{\beta}_{13} $$
###Code
print('Estimated intercept coefficient:', lm.intercept_)
print('Number of coefficients:', len(lm.coef_))
# The coefficients
#lm.coef_
#X.columns
import pandas as pd
LIST1=[lm.coef_]
pdata=pd.DataFrame(LIST1 ,columns=X.columns)
#pdata.columns= ['features', 'estimatedCoefficients']
pdata
###Output
_____no_output_____
###Markdown
Predict Prices We can calculate the predicted prices ($\hat{Y}_i$) using `lm.predict`. $$ \hat{Y}_i = \hat{\beta}_0 + \hat{\beta}_1 X_1 + \ldots \hat{\beta}_{13} X_{13} $$
###Code
# first five predicted prices
lm.predict(X)[0:5]
###Output
_____no_output_____
###Markdown
**Your turn:** * Histogram: Plot a histogram of all the predicted prices* Scatter Plot: Let's plot the true prices compared to the predicted prices to see they disagree (we did this with `statsmodels` before).
###Code
# your turn
predicted_prices=lm.predict(X)
predicted_prices
plt.hist(predicted_prices)
plt.title("Predicted prices")
plt.xlabel("Predicted prices")
plt.ylabel("Frequency")
plt.show()
plt.scatter(bos.PRICE,predicted_prices)
plt.xlabel("Predicted_Prices")
plt.ylabel("Housing Price")
plt.title("Predicted vs Original price")
###Output
_____no_output_____
###Markdown
Residual sum of squaresLet's calculate the residual sum of squares $$ S = \sum_{i=1}^N r_i = \sum_{i=1}^N (y_i - (\beta_0 + \beta_1 x_i))^2 $$
###Code
print(np.sum((bos.PRICE - lm.predict(X)) ** 2))
###Output
12231.217345703373
###Markdown
Mean squared error***This is simple the mean of the residual sum of squares.**Your turn:** Calculate the mean squared error and print it.
###Code
#your turn
print(np.mean((bos.PRICE - lm.predict(X)) ** 2))
###Output
24.17236629585647
###Markdown
Relationship between `PTRATIO` and housing price***Try fitting a linear regression model using only the 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
lm = LinearRegression()
lm.fit(X[['PTRATIO']], bos.PRICE)
msePTRATIO = np.mean((bos.PRICE - lm.predict(X[['PTRATIO']])) ** 2)
print(msePTRATIO)
###Output
62.65220001376927
###Markdown
We can also plot the fitted linear regression line.
###Code
plt.scatter(bos.PTRATIO, bos.PRICE)
plt.xlabel("Pupil-to-Teacher Ratio (PTRATIO)")
plt.ylabel("Housing Price")
plt.title("Relationship between PTRATIO and Price")
plt.plot(bos.PTRATIO, lm.predict(X[['PTRATIO']]), color='blue', linewidth=3)
plt.show()
###Output
_____no_output_____
###Markdown
Your turn***Try fitting a linear regression model using three independent variables1. 'CRIM' (per capita crime rate by town)2. 'RM' (average number of rooms per dwelling)3. 'PTRATIO' (pupil-teacher ratio by town)Calculate the mean squared error.
###Code
# your turn
lm = LinearRegression()
lm.fit(X[['PTRATIO','RM','CRIM']], bos.PRICE)
mse_PTRATIO = np.mean((bos.PRICE - lm.predict(X[['PTRATIO','RM','CRIM']])) ** 2)
print(mse_PTRATIO)
###Output
34.32379656468119
###Markdown
Other important things to think about when fitting a linear regression model*** **Linearity**. The dependent variable $Y$ is a linear combination of the regression coefficients and the independent variables $X$. **Constant standard deviation**. The SD of the dependent variable $Y$ should be constant for different values of X. e.g. PTRATIO **Normal distribution for errors**. The $\epsilon$ term we discussed at the beginning are assumed to be normally distributed. $$ \epsilon_i \sim N(0, \sigma^2)$$Sometimes the distributions of responses $Y$ may not be normally distributed at any given value of $X$. e.g. skewed positively or negatively. **Independent errors**. The observations are assumed to be obtained independently. e.g. Observations across time may be correlated Part 3: Training and Test Data sets Purpose of splitting data into Training/testing sets*** Let's stick to the linear regression example: We built our model with the requirement that the model fit the data well. As a side-effect, the model will fit THIS dataset well. What about new data? We wanted the model for predictions, right? One simple solution, leave out some data (for testing) and train the model on the rest This also leads directly to the idea of cross-validation, next section. ***One way of doing this is you can create training and testing data sets manually.
###Code
X_train = X[:-50]
X_test = X[-50:]
Y_train = bos.PRICE[:-50]
Y_test = bos.PRICE[-50:]
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
###Output
(456, 13)
(50, 13)
(456,)
(50,)
###Markdown
Another way, is to split the data into random train and test subsets using the function `train_test_split` in `sklearn.cross_validation`. Here's the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html).
###Code
from sklearn import cross_validation
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
X, bos.PRICE, test_size=0.33, random_state = 5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
###Output
(339, 13)
(167, 13)
(339,)
(167,)
###Markdown
**Your turn:** Let's build a linear regression model using our new training data sets. * Fit a linear regression model to the training set* Predict the output on the test set
###Code
# your turn
lm = LinearRegression()
lm.fit(X_train,Y_train)
###Output
_____no_output_____
###Markdown
**Your turn:**Calculate the mean squared error * using just the test data* using just the training dataAre they pretty similar or very different? What does that mean?
###Code
predicted_output1=lm.predict(X_test)
train_error = np.mean((Y_train - lm.predict(X_train)) ** 2)
print(train_error)
# your turn
test_error = np.mean((Y_test - lm.predict(X_test)) ** 2)
print(test_error)
###Output
28.541367275619013
###Markdown
Residual plots
###Code
plt.scatter(lm.predict(X_train), lm.predict(X_train) - Y_train, c='b', s=40, alpha=0.5)
plt.scatter(lm.predict(X_test), lm.predict(X_test) - Y_test, c='g', s=40)
plt.hlines(y = 0, xmin=0, xmax = 50)
plt.title('Residual Plot using training (blue) and test (green) data')
plt.ylabel('Residuals')
###Output
_____no_output_____ |
module-2/lab-probability-distribution/your_code/main.ipynb | ###Markdown
Before your start:- Read the README.md file- Comment as much as you can and use the resources (README.md file)- Happy learning!
###Code
# Import your libraries
###Output
_____no_output_____
###Markdown
Challenge 1 - Generate and Plot Normal Distributions Step 1: Generate samples and test normal distributionUse mean=50, standard_deviation=5, and sample_size=[10, 50, 500, 5000] to generate 4 random samples that are normally distributed. Test your normal distributions with [`scipy.stats.normaltest`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.normaltest.html).*Hint: Read the documentation for `scipy.stats.normaltest`. The function does not simply return Yes or No for whether your data is normal distribution. It returns the likelihood.*
###Code
# Your code here
mu, sigma = 50, 5
sample_size = pd.Series([10, 50, 500, 5000, 50000])
dists = sample_size.apply(lambda s: np.random.normal(mu, sigma, s))
tests = dists.apply(lambda d: stats.normaltest(d))
tests
###Output
/usr/local/lib/python3.7/site-packages/scipy/stats/stats.py:1394: UserWarning: kurtosistest only valid for n>=20 ... continuing anyway, n=10
"anyway, n=%i" % int(n))
###Markdown
Interpret the normaltest results and make your observations.
###Code
# Explain the test results here
###Output
_____no_output_____
###Markdown
Step 2: Plot normal distributionsUse matplotlib subplots to plot a histogram of each sample. *Hints:*- Use subplots to compare your figures side by side.- Your output should look like below:
###Code
# Your code here
f, ax = plt.subplots(1, 4)
f.set_figwidth(15)
f.subplots_adjust(wspace=1)
for i in range(0, 4):
ax[i].set_title('n=%s' % (sample_size[i]))
count, bins, ignored = ax[i].hist(dists[i], 20, density=True)
###Output
_____no_output_____
###Markdown
Compare the distributions above. What do you observe? Explain with the Central Limit Theorem.
###Code
# Your comment and explanation here
###Output
_____no_output_____
###Markdown
Challenge 2 - Plot Probability Mass Function (PMF) Background knowledge[PMF](https://en.wikipedia.org/wiki/Probability_mass_function) shows the probability distribution of a **discrete random variable**. A [discrete random variable](https://en.wikipedia.org/wiki/Random_variableDiscrete_random_variable) has random numeric values that are not continuous. For example, the number of people in a household can only be integers but not floats. Therefore the number of people in a household is a discrete variable. Question: We assume that the probability of clicking an Ad in a Youtube video is 0.15. We have a sample of 5 people who wathched the video and we want to plot the PMF for a binomial distribution.*Hint: use binom from `scipy.stats.binom`. Your output should look like below:*
###Code
# Your code here
from scipy.stats import binom
n, p = 5, 0.15
x = range(n)
dist = binom(n, p)
fig, ax = plt.subplots(1, 1)
plt.plot(x, dist.pmf(x))
plt.show()
###Output
_____no_output_____
###Markdown
Explain what you observe from the plot above
###Code
# Your comment here
###Output
_____no_output_____
###Markdown
Now plot PMP with 50, 500, and 5000 visitors.
###Code
# Your code here
###Output
_____no_output_____
###Markdown
What did you notice from the distribution plots? Comment your findings.
###Code
# Your comment here
###Output
_____no_output_____
###Markdown
Challenge 3 Reaserch the Poisson distribution. Write about your own understanding of the Poisson distribution.
###Code
# Your comment here
###Output
_____no_output_____
###Markdown
A website has an average of 300 visits per day. What is the probability of getting 320 visitors in a day?*Hint: use `scipy.stats.poisson`*
###Code
# Your code here
###Output
_____no_output_____
###Markdown
What is the probability of getting 60 visits?
###Code
# Your code here
###Output
_____no_output_____
###Markdown
Plot the distribution of the probability for getting 0-1000 visits.*Hints:* - Create a list to store the Poisson distribution probabilities for n=0 to 1000. Then plot the probabilities.- Your plot should look like below:
###Code
# your code here
from scipy.stats import poisson
N = np.arange(1000)
rv = poisson(300)
arr = []
for n in N:
arr.append(rv.pmf(n))
plt.plot(arr)
###Output
_____no_output_____ |
notebooks/TimeEval Bechmark Retry1 result analysis.ipynb | ###Markdown
TimeEval result analysis on the benchmark datasets (retry run 1)
###Code
# imports
import re
import json
import warnings
import pandas as pd
import numpy as np
import scipy as sp
from IPython.display import display, Markdown, Latex
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20, 8)
from pathlib import Path
from timeeval import Datasets
###Output
_____no_output_____
###Markdown
Configuration Define data and results folder:
###Code
# constants and configuration
data_path = Path("/home/projects/akita/data") / "benchmark-data" / "data-processed"
result_root_path = Path("/home/projects/akita/results")
experiment_result_folder = "2021-11-16_runtime-benchmark-retry1"
# build paths
result_paths = [d for d in result_root_path.iterdir() if d.is_dir()]
print("Available result directories:")
display(result_paths)
result_path = result_root_path / experiment_result_folder
print("\nSelecting:")
print(f"Data path: {data_path.resolve()}")
print(f"Result path: {result_path.resolve()}")
###Output
Available result directories:
###Markdown
Load results and dataset metadata:
###Code
# load results
print(f"Reading results from {result_path.resolve()}")
df = pd.read_csv(result_path / "results.csv")
# aggregate runtime
df["overall_time"] = df["execute_main_time"].fillna(0) + df["train_main_time"].fillna(0)
# add RANGE_PR_AUC if it is not part of the results
if "RANGE_PR_AUC" not in df.columns:
df["RANGE_PR_AUC"] = np.nan
# remove all duplicates (not necessary, but sometimes, we have some)
df = df.drop_duplicates()
# load dataset metadata
dmgr = Datasets(data_path)
###Output
Reading results from /home/projects/akita/results/2021-11-16_runtime-benchmark-retry1
###Markdown
Define utility functions
###Code
def load_scores_df(algorithm_name, dataset_id, repetition=1):
params_id = df.loc[(df["algorithm"] == algorithm_name) & (df["collection"] == dataset_id[0]) & (df["dataset"] == dataset_id[1]), "hyper_params_id"].item()
path = (
result_path /
algorithm_name /
params_id /
dataset_id[0] /
dataset_id[1] /
str(repetition) /
"anomaly_scores.ts"
)
return pd.read_csv(path, header=None)
###Output
_____no_output_____
###Markdown
Define plotting functions:
###Code
default_use_plotly = True
try:
import plotly.offline
except ImportError:
default_use_plotly = False
def plot_scores(algorithm_name, dataset_id, use_plotly: bool = default_use_plotly, **kwargs):
if not isinstance(algorithm_name, list):
algorithms = [algorithm_name]
else:
algorithms = algorithm_name
# deconstruct dataset ID
collection_name, dataset_name = dataset_id
# load dataset details
df_dataset = dmgr.get_dataset_df(dataset_id)
# check if dataset is multivariate
dataset_dim = df.loc[(df["collection"] == collection_name) & (df["dataset"] == dataset_name), "dataset_input_dimensionality"].unique().item()
dataset_dim = dataset_dim.lower()
auroc = {}
df_scores = pd.DataFrame(index=df_dataset.index)
skip_algos = []
algos = []
for algo in algorithms:
algos.append(algo)
# get algorithm metric results
try:
auroc[algo] = df.loc[(df["algorithm"] == algo) & (df["collection"] == collection_name) & (df["dataset"] == dataset_name), "ROC_AUC"].item()
except ValueError:
warnings.warn(f"No ROC_AUC score found! Probably {algo} was not executed on {dataset_id}.")
auroc[algo] = -1
skip_algos.append(algo)
continue
# load scores
training_type = df.loc[df["algorithm"] == algo, "algo_training_type"].values[0].lower().replace("_", "-")
try:
df_scores[algo] = load_scores_df(algo, dataset_id).iloc[:, 0]
except (ValueError, FileNotFoundError):
warnings.warn(f"No anomaly scores found! Probably {algo} was not executed on {dataset_id}.")
df_scores[algo] = np.nan
skip_algos.append(algo)
algorithms = [a for a in algos if a not in skip_algos]
if use_plotly:
return plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_id, **kwargs)
else:
return plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_id, **kwargs)
def plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_id, **kwargs):
import plotly.offline as py
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
from plotly.subplots import make_subplots
# Create plot
fig = make_subplots(2, 1)
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, i], name=df_dataset.columns[i]), 1, 1)
else:
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, 1], name="timeseries"), 1, 1)
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset["is_anomaly"], name="label"), 2, 1)
for algo in algorithms:
fig.add_trace(go.Scatter(x=df_scores.index, y=df_scores[algo], name=f"{algo}={auroc[algo]:.4f}"), 2, 1)
fig.update_xaxes(matches="x")
fig.update_layout(
title=f"Results of {','.join(np.unique(algorithms))} on {dataset_id}",
height=400
)
return py.iplot(fig)
def plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_id, **kwargs):
import matplotlib.pyplot as plt
# Create plot
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(20, 8))
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
axs[0].plot(df_dataset.index, df_dataset.iloc[:, i], label=df_dataset.columns[i])
else:
axs[0].plot(df_dataset.index, df_dataset.iloc[:, 1], label=f"timeseries")
axs[1].plot(df_dataset.index, df_dataset["is_anomaly"], label="label")
for algo in algorithms:
axs[1].plot(df_scores.index, df_scores[algo], label=f"{algo}={auroc[algo]:.4f}")
axs[0].legend()
axs[1].legend()
fig.suptitle(f"Results of {','.join(np.unique(algorithms))} on {dataset_id}")
fig.tight_layout()
return fig
def plot_boxplot(df, n_show = 20, title="Box plots", ax_label="values", fmt_label=lambda x: x, use_plotly=default_use_plotly):
n_show = n_show // 2
title = title + f" (worst {n_show} and best {n_show} algorithms)"
if use_plotly:
import plotly.offline as py
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
from plotly.subplots import make_subplots
fig = go.Figure()
for i, c in enumerate(df.columns):
fig.add_trace(go.Box(
x=df[c],
name=fmt_label(c),
boxpoints=False,
visible=None if i < n_show or i > len(df.columns)-n_show-1 else "legendonly"
))
fig.update_layout(
title={"text": title, "xanchor": "center", "x": 0.5},
xaxis_title=ax_label,
legend_title="Algorithms"
)
return py.iplot(fig)
else:
df_boxplot = pd.concat([df.iloc[:, :n_show], df.iloc[:, -n_show:]])
labels = df_boxplot.columns
labels = [fmt_label(c) for c in labels]
values = [df_boxplot[c].dropna().values for c in df_boxplot.columns]
fig = plt.figure()
ax = fig.gca()
#ax.boxplot(values, sym="", vert=True, meanline=True, showmeans=True, showfliers=False, manage_ticks=True)
ax.boxplot(values, vert=True, meanline=True, showmeans=True, showfliers=True, manage_ticks=True)
ax.set_ylabel(ax_label)
ax.set_title(title)
ax.set_xticklabels(labels, rotation=-45, ha="left", rotation_mode="anchor")
# add vline to separate bad and good algos
ymin, ymax = ax.get_ylim()
ax.vlines([n_show + 0.5], ymin, ymax, colors="black", linestyles="dashed")
fig.tight_layout()
return fig
def plot_algorithm_bars(df, y_name="ROC_AUC", title="Bar chart for algorithms", use_plotly=default_use_plotly):
if use_plotly:
fig = px.bar(df, x="algorithm", y=y_name)
py.iplot(fig)
else:
fig = plt.figure()
ax = fig.gca()
ax.bar(df["algorithm"], df[y_name], label=y_name)
ax.set_ylabel(y_name)
ax.set_title(title)
ax.set_xticklabels(df["algorithm"], rotation=-45, ha="left", rotation_mode="anchor")
ax.legend()
return fig
###Output
_____no_output_____
###Markdown
Analyze overall results on the GutenTAG datasets Overview
###Code
df[["algorithm", "collection", "dataset", "status", "ROC_AUC", "AVERAGE_PRECISION", "PR_AUC", "RANGE_PR_AUC", "execute_main_time", "hyper_params"]]
###Output
_____no_output_____
###Markdown
Algorithm problems grouped by algorithm training type
###Code
index_columns = ["algo_training_type", "algo_input_dimensionality", "algorithm"]
df_error_counts = df.pivot_table(index=index_columns, columns=["status"], values="repetition", aggfunc="count")
df_error_counts = df_error_counts.fillna(value=0).astype(np.int64)
df_error_counts = df_error_counts.reset_index().sort_values(by=["algo_input_dimensionality", "Status.ERROR"], ascending=False).set_index(index_columns)
df_error_counts["ALL"] = df_error_counts["Status.ERROR"] + df_error_counts["Status.OK"] + df_error_counts["Status.TIMEOUT"]
for tpe in ["SEMI_SUPERVISED", "SUPERVISED", "UNSUPERVISED"]:
if tpe in df_error_counts.index:
print(tpe)
if default_use_plotly:
py.iplot(ff.create_table(df_error_counts.loc[tpe], index=True))
else:
display(df_error_counts.loc[tpe])
###Output
SEMI_SUPERVISED
###Markdown
Summary
###Code
df_error_summary = pd.DataFrame(df_error_counts.sum(axis=0))
df_error_summary.columns = ["count"]
all_count = df_error_summary.loc["ALL", "count"]
df_error_summary["percentage"] = df_error_summary / all_count
df_error_summary.style.format({"percentage": "{:06.2%}".format})
###Output
_____no_output_____
###Markdown
Inspect errors of a specific algorithm:
###Code
ok = "- OK -"
oom = "- OOM -"
timeout = "- TIMEOUT -"
error_mapping = {
"TimeoutError": timeout,
"status code '137'": oom,
"MemoryError: Unable to allocate": oom,
"ValueError: Expected 2D array, got 1D array instead": "Wrong shape error",
"could not broadcast input array from shape": "Wrong shape error",
"not aligned": "Wrong shape error", # shapes (20,) and (19,500) not aligned
"array must not contain infs or NaNs": "unexpected Inf or NaN",
"contains NaN": "unexpected Inf or NaN",
"cannot convert float NaN to integer": "unexpected Inf or NaN",
"Error(s) in loading state_dict": "Model loading error",
"EOFError": "Model loading error",
"Restoring from checkpoint failed": "Model loading error",
"RecursionError: maximum recursion depth exceeded in comparison": "Max recursion depth exceeded",
"but PCA is expecting": "BROKEN Exathlon DATASETS", # ValueError: X has 44 features, but PCA is expecting 43 features as input.
"input.size(-1) must be equal to input_size": "BROKEN Exathlon DATASETS",
"ValueError: The condensed distance matrix must contain only finite values.": "LinAlgError",
"LinAlgError": "LinAlgError",
"NameError: name 'nan' is not defined": "Not converged",
"Could not form valid cluster separation": "Not converged",
"contamination must be in": "Invariance/assumption not met",
"Data must not be constant": "Invariance/assumption not met",
"Cannot compute initial seasonals using heuristic method with less than two full seasonal cycles in the data": "Invariance/assumption not met",
"ValueError: Anom detection needs at least 2 periods worth of data": "Invariance/assumption not met",
"`dataset` input should have multiple elements": "Invariance/assumption not met",
"Cannot take a larger sample than population": "Invariance/assumption not met",
"num_samples should be a positive integer value": "Invariance/assumption not met",
"Cannot use heuristic method to compute initial seasonal and levels with less than periods + 10 datapoints": "Invariance/assumption not met",
"ValueError: The window size must be less than or equal to 0": "Invariance/assumption not met",
"The window size must be less than or equal to": "Incompatible parameters",
"window_size has to be greater": "Incompatible parameters",
"Set a higher piecewise_median_period_weeks": "Incompatible parameters",
"OutOfBoundsDatetime: cannot convert input with unit 'm'": "Incompatible parameters",
"`window_size` must be at least 4": "Incompatible parameters",
"elements of 'k' must be between": "Incompatible parameters",
"Expected n_neighbors <= n_samples": "Incompatible parameters",
"PAA size can't be greater than the timeseries size": "Incompatible parameters",
"All window sizes must be greater than or equal to": "Incompatible parameters",
"ValueError: __len__() should return >= 0": "Bug",
"stack expects a non-empty TensorList": "Bug",
"expected non-empty vector": "Bug",
"Found array with 0 feature(s)": "Bug",
"ValueError: On entry to DLASCL parameter number 4 had an illegal value": "Bug",
"Sample larger than population or is negative": "Bug",
"ZeroDivisionError": "Bug",
"IndexError": "Bug",
"status code '139'": "Bug",
"replacement has length zero": "Bug",
"missing value where TRUE/FALSE needed": "Bug",
"invalid subscript type 'list'": "Bug",
"subscript out of bounds": "Bug",
"invalid argument to unary operator": "Bug",
"negative length vectors are not allowed": "Bug",
"negative dimensions are not allowed": "Bug",
"`std` must be positive": "Bug",
"does not have key": "Bug", # State '1' does not have key '1'
"Less than 2 uniques breaks left": "Bug",
"The encoder for value is invalid": "Bug",
"arange: cannot compute length": "Bug",
"n_components=3 must be between 0 and min(n_samples, n_features)": "Bug",
}
def get_folder(index):
series = df.loc[index]
path = (
result_path /
series["algorithm"] /
series["hyper_params_id"] /
series["collection"] /
series["dataset"] /
str(series["repetition"])
)
return path
def category_from_logfile(logfile):
with logfile.open() as fh:
log = fh.read()
for error in error_mapping:
if error in log:
return error_mapping[error]
#print(log)
return "other"
def extract_category(series):
status = series["status"]
msg = series["error_message"]
if status == "Status.OK":
return ok
elif status == "Status.TIMEOUT":
return timeout
# status is ERROR:
elif "DockerAlgorithmFailedError" in msg:
path = get_folder(series.name) / "execution.log"
if path.exists():
return category_from_logfile(path)
return "DockerAlgorithmFailedError"
else:
m = re.search("^([\w]+)\(.*\)", msg)
if m:
error = m.group(1)
else:
error = msg
return f"TimeEval:{error}"
df["error_category"] = df.apply(extract_category, axis="columns", raw=False)
df_error_category_overview = df.pivot_table(index="error_category", columns="algorithm", values="repetition", aggfunc="count")
df_error_category_overview.insert(0, "ALL (sum)", df_error_category_overview.sum(axis=1))
with pd.option_context("display.max_rows", None, "display.max_columns", None):
display(df_error_category_overview.style.format("{:.0f}", na_rep=""))
###Output
_____no_output_____
###Markdown
- SAND Wrong shape error --> no anomaly in dataset (anomaly_window_size=0)- S-H-ESD (Twitter) incompatible parameter errors --> Cannot parse datetime index (would require further analysis- Left STAMPi incompatible parameter errors --> anomaly_window_size > n_init_train --> fixed, but **needs re-execution**- normal baseline TimeEval:KilledWorker --> is likely the source of these errors (on LTDB dataset) --> **try with Docker baseline**- TimeEval:ValueError --> all on genesis dataset: had an error in labeling --> fixed, but **needs re-execution**- Invariance/assumption not met errors: - Dataset Exathlon 1_2_100000_68-16 has too many anomalies (contamination > 0.5) - Left STAMPi has anomaly_window_size > n_init_train; those datasets cannot be processed by this algo - TripleES has error `Cannot compute initial seasonals using heuristic method with less than two full seasonal cycles in the data` --> no idea on how to fix; just bad datasets for this method?
###Code
df[
(df["error_category"] == "Incompatible parameters") &
(df["algorithm"] == "Left STAMPi")
]
df[
#(df["error_category"] == "TimeEval:KilledWorker") &
(df["algorithm"] == "VALMOD")
][["algorithm", "collection", "dataset", "status", "error_message", "error_category"]]
error_category = "Incompatible parameters"
df_invalid_params = df[(df["error_category"] == error_category)].groupby(by="algorithm")[["repetition"]].count().sort_values("repetition", ascending=False)
print(f"{error_category} error algorithms:")
display(df_invalid_params.T)
print(f"{error_category} error datasets:")
df_broken_datasets = df[(df["error_category"] == error_category)].groupby(by="dataset")[["repetition"]].count().sort_values("repetition", ascending=False)
display(df_broken_datasets.T)
###Output
Incompatible parameters error algorithms:
###Markdown
Algorithm quality assessment based on ROC_AUC
###Code
aggregations = ["min", "mean", "median", "max"]
dominant_aggregation = "mean"
df_overall_scores = df.pivot_table(index="algorithm", values="ROC_AUC", aggfunc=aggregations)
df_overall_scores.columns = aggregations
df_overall_scores = df_overall_scores.sort_values(by=dominant_aggregation, ascending=False)
df_asl = df.pivot(index="algorithm", columns=["collection", "dataset"], values="ROC_AUC")
df_asl = df_asl.dropna(axis=0, how="all").dropna(axis=1, how="all")
df_asl[dominant_aggregation] = df_asl.agg(dominant_aggregation, axis=1)
df_asl = df_asl.sort_values(by=dominant_aggregation, ascending=True)
df_asl = df_asl.drop(columns=dominant_aggregation).T
with pd.option_context("display.max_rows", None, "display.max_columns", None):
display(df_overall_scores.T)
n_show = 20
dataset_count_lut = (df_error_counts["Status.OK"] / df_error_counts["ALL"]).reset_index().set_index("algorithm").drop(columns=["algo_training_type", "algo_input_dimensionality"]).iloc[:, 0]
fmt_label = lambda c: f"{c} ({dataset_count_lut[c]:6.2%} of datasets)"
fig = plot_boxplot(df_asl, title="AUC_ROC box plots", ax_label="AUC_ROC score", fmt_label=fmt_label, n_show=n_show)
###Output
_____no_output_____ |
Programs/PyProg/HW3_HUA.ipynb | ###Markdown
homework 3Please write a function that returns N samples from a Normal distribution with a given mean and standard deviation. The only function that you are allowed to use is numpy.random.rand() which returns a random sample from a uniform distribution over [0,1). There are again many ways to doing this. Please implement the code yourself. Don't copy from the interne
###Code
%matplotlib inline
import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
IntroductionAs we know, the PDF of a 1-D normal distribution is$$f(x)=\frac{1}{{\sigma \sqrt {2\pi } }}e^{-\frac{(x-\mu)^2}{2{\sigma}^2}}$$Therefore, the CDF of this normal distribution should be$$F(x)=\frac{1}{{\sigma \sqrt {2\pi } }}\int_{-\infty}^{x}e^{-\frac{(\xi-\mu)^2}{2{\sigma}^2}}d\xi $$Here, we use 2-D normal distribution whose mean = 0 and standard deviate = 1 to study the CDF.$$ F(x,y)=\frac{1}{{2\pi}} \iint_{-\infty}^{(x,y)} e^{-\frac{\xi^2+\eta^2}{2}}d\xi d\eta $$Transform the equation above into the polar coordinates system, we have$$ F(r)=\frac{1}{{2\pi}} \int_0^{2\pi} \int_0^r \rho e^{-\frac{\rho^2}{2}}d\rho d\theta = 1-e^{-\frac{r^2}{2}}$$Thus, the inverse function of F(r) is$$ R = F^{-1}(z) = \sqrt{-2 ln(1-z)}$$Where z ~ U(0,1).Since $X = R cos(\theta)$, and $Y = R sin(\theta)$, we let $U_1 = 1-z$, and $U_2 = \theta/2\pi$, and we get$$X = \sqrt{-2 ln(U_1)}cos(2\pi U_2), Y = \sqrt{-2 ln(U_1)}sin(2\pi U_2)$$Where X and Y both satisfy the 1-D standard normal distribution.So, $X\sigma+\mu$ and $Y\sigma+\mu$ satisfy the 1-D normal distribution whose mean = $\mu$, and SD = $\sigma$.
###Code
def normal_sample(mean, stddev, N):
# put your code here. You can only use rand() for creating random numbers
#mean,stddev = np.double(mean),np.double(stddev)
U1 = np.random.rand(1,N)
U2 = np.random.rand(1,N)
X = np.sqrt(-2*np.log(U1))*np.cos(2*np.pi*U2)
Y = np.sqrt(-2*np.log(U2))*np.sin(2*np.pi*U1)
X = np.array(stddev*X+mean)
Y = np.array(stddev*Y+mean)
Norm = np.hstack((X,Y))
Hist = plt.hist([Norm[:]],bins=50)
# --------------------verification-------------------------
x0 = np.linspace(mean-4*stddev,mean+4*stddev,200)
Gauss = 1.0/stddev/np.sqrt(2*np.pi)*np.exp(-(x0-mean)**2 / (2*stddev**2))
plt.plot(x0,Gauss/max(Gauss)*max(Hist[0]),color = 'red',label='Verification')
plt.legend(fontsize=10)
plt.show()
return Hist[0]
h = normal_sample(-12,0.5,10000)
###Output
_____no_output_____ |
notebooks/behavior_of_latent_space.ipynb | ###Markdown
Deblend stamps randomly generated from DC2 data Load 10 DC2 images centred on galaxy. They have been generated using this notebook: https://github.com/BastienArcelin/dc2_img_generation/blob/main/notebooks/dc2_stamps_and_corresponding_parameters.ipynb
###Code
data_folder_path = pkg_resources.resource_filename('debvader', "data/")
image_path = os.path.join(data_folder_path + 'dc2_imgs/imgs_dc2.npy')
images = np.load(image_path, mmap_mode = 'c')
###Output
_____no_output_____
###Markdown
Visualize some of the images
###Code
fig, axes = plt.subplots(1,3, figsize = (12, 4))
for i in range (3):
axes[i].imshow(images[i,:,:,2]) # We plot only r-band here, but the images are multi-bands (ugrizy)
###Output
_____no_output_____
###Markdown
Now we can load the deblender
###Code
# First, define the parameters of the neural network, for this version of debvader, they are as follow:
nb_of_bands = 6
input_shape = (59, 59, nb_of_bands)
latent_dim = 32
filters = [32,64,128,256]
kernels = [3,3,3,3]
# We will load the weights of the network trained on DC2 images
survey = "dc2"
# Load the network using the load_deblender function
net, encoder, decoder, z = load_deblender(survey, input_shape, latent_dim, filters, kernels, return_encoder_decoder_z=True)
# We can visualize the network
net.summary()
###Output
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 59, 59, 6)] 0
_________________________________________________________________
model (Model) (None, 560) 3741224
_________________________________________________________________
multivariate_normal_tri_l (M ((None, 32), (None, 32)) 0
_________________________________________________________________
model_1 (Model) (None, 59, 59, 6) 4577228
=================================================================
Total params: 8,318,452
Trainable params: 3,741,212
Non-trainable params: 4,577,240
_________________________________________________________________
###Markdown
We now have everything to do the deblending. Let's use our network to deblend the DC2 images
###Code
output_images_mean, output_images_distribution = deblend(net, images)
###Output
_____no_output_____
###Markdown
Here the network outputs a distribution over the pixels for each pixel in each filter, this is why both the mean image and the corresponding distribution is outputed.Let's first visualize the mean images outputed by debvader.
###Code
fig, axes = plt.subplots(3,3, figsize = (12, 12))
for i in range (3):
axes[i,0].imshow(images[i,:,:,2]) # We plot only r-band here, but the images are multi-bands (ugrizy)
axes[i,1].imshow(output_images_mean[i,:,:,2]) # We plot only r-band here, but the output images are multi-bands (ugrizy)
axes[i,2].imshow(images[i,:,:,2]-output_images_mean[i,:,:,2])
import pandas as pd
root_dir = "/pbs/home/b/barcelin/sps_link/data/dc2_test/24.5/test/"
images_noiseless = np.load(root_dir+'img_noiseless_sample_2.npy', mmap_mode = 'c')
images_noisy = np.load(root_dir+'img_cropped_sample_2.npy', mmap_mode = 'c')
data = pd.read_csv(root_dir+'img_noiseless_data_2.csv')
latent_space_distribution_noiseless = z(tf.cast(images_noiseless[:1000], tf.float32))
latent_space_distribution_noisy = z(tf.cast(images_noisy[:1000], tf.float32))
#print(latent_space_distribution)
plt.plot(np.linspace(0,32, 32), np.mean(latent_space_distribution_noiseless.stddev().numpy() ,axis = 0), '.', color = 'blue', alpha = 0.3)
plt.plot(np.linspace(0,32, 32), np.mean(latent_space_distribution_noiseless.stddev().numpy() ,axis = 0), '.', color = 'red', alpha = 0.3)
fig, axes = plt.subplots(1,2 ,figsize = (20,5))
for i in range (2):
_ = axes[i].hist(np.concatenate(latent_space_distribution_noiseless.stddev().numpy(), axis = 0), bins = 100, alpha = 0.3 , label = 'noiseless')
_ = axes[i].hist(np.concatenate(latent_space_distribution_noisy.stddev().numpy(), axis = 0), bins = 100, alpha = 0.3, label = 'blended and noisy')
axes[0].legend()
axes[1].set_xlim(0, 0.8)
axes[1].set_ylim(0, 1000)
plt.title('standard deviation of latent space')
fig, axes = plt.subplots(1,2 ,figsize = (20,5))
for i in range (2):
_ = axes[i].hist(np.concatenate(latent_space_distribution_noiseless.mean().numpy(), axis = 0), bins = 100, alpha = 0.3 , label = 'noiseless')
_ = axes[i].hist(np.concatenate(latent_space_distribution_noisy.mean().numpy(), axis = 0), bins = 100, alpha = 0.3, label = 'blended and noisy')
axes[0].legend()
axes[1].set_xlim(-5, 5)
axes[1].set_yscale('log')
plt.title('means of latent space')
###Output
_____no_output_____
###Markdown
Now let's visualise as a function of blendedness
###Code
latent_space_distribution_noiseless.stddev().numpy().shape
fig, axes = plt.subplots(1,2 ,figsize = (20,5))
for i in range (2):
_ = axes[i].plot(data['blendedness'][:1000], np.mean(latent_space_distribution_noiseless.stddev().numpy(), axis = 1), '.',color = 'blue', alpha = 0.3 , label = 'noiseless')
_ = axes[i].plot(data['blendedness'][:1000], np.mean(latent_space_distribution_noisy.stddev().numpy(), axis = 1),'.', color = 'red', alpha = 0.3 , label = 'noisy')
axes[0].legend()
axes[0].set_xscale('log')
axes[0].set_title('mean std in latent space as a function of blendedness')
###Output
_____no_output_____
###Markdown
Now we can look at what the images of the standard deviation look like for each example, and how we can sample this distribution in each pixel.
###Code
output_uncertainty_mean = output_images_distribution.mean().numpy() # Extract the mean of the distribution. Same image as output_images_mean.
output_uncertainty_std = output_images_distribution.stddev().numpy() # Extract the standard deviation of the distribution.
output_uncertainty_sample = tf.math.reduce_mean(output_images_distribution.sample(100), axis = 0).numpy() # Sample 100 times the distribution in each pixel and produce a mean image.
fig, axes = plt.subplots(3,6, figsize = (28, 12))
for i in range (3):
f1 = axes[i,0].imshow(images[i,:,:,2])
f2 = axes[i,1].imshow(output_uncertainty_mean[i,:,:,2])
f3 = axes[i,2].imshow(output_uncertainty_std[i,:,:,2])
f4 = axes[i,3].imshow(output_uncertainty_sample[i,:,:,2])
f5 = axes[i,4].imshow(images[i,:,:,2] - output_uncertainty_mean[i,:,:,2])
f6 = axes[i,5].imshow(images[i,:,:,2] - output_uncertainty_sample[i,:,:,2])
fig.colorbar(f1, ax = axes[i,0])
fig.colorbar(f2, ax = axes[i,1])
fig.colorbar(f3, ax = axes[i,2])
fig.colorbar(f4, ax = axes[i,3])
fig.colorbar(f5, ax = axes[i,4])
fig.colorbar(f6, ax = axes[i,5])
axes[i,0].set_title('Input')
axes[i,1].set_title('output mean flux')
axes[i,2].set_title('output std of flux per pixel')
axes[i,3].set_title('output mean \n of 100 sample')
axes[i,4].set_title('target - output mean')
axes[i,5].set_title('target - output \n 100 sample')
###Output
_____no_output_____ |
Fashion_Data_Notebook.ipynb | ###Markdown
CNN With Fashion Data An x layer CNN for fashion (clothing?) type detection trained on an MNIST dataset from Zolando Research
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline
# what does this ^ mean?
np.random.seed(2) # number of epochs? (steps)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix # look this up
import itertools
from keras.utils.np_utils import to_categorical # "convert to one-hot-encoding??"
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D #Layers of Network?
from keras.optimizers import RMSprop # look this up
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
sns.set(style = 'dark', context = 'notebook', palette = 'deep')
train = pd.read_csv('fashion-mnist_train.csv')
test = pd.read_csv('fashion-mnist_test.csv')
# digit recognizer tutorial's 'test' doesn't have a 'label'
# column, but this fashion data has a label column on both
# the test and train data, need to drop from both when checking
# checking for missing or corrupted images
Y_train = train['label']
X_train = train.drop(labels = ['label'], axis = 1)
Y_test = test['label']
X_test = test.drop(labels = ['label'], axis = 1)
del train #freeing space? (taking less time for model training?)
del test
g = sns.countplot(Y_train)
g1 = sns.countplot(Y_test)
Y_train.value_counts()
Y_test.value_counts()
# Is this ^ all supposed to be the same count?
# Why is there only one plot?
X_train.isnull().any().describe() # checking train for missing data
X_test.isnull().any().describe() # checking test for missing data
# Missing data returned as True ('top')?
X_train = X_train / 255.0 # Can I pick another number besides
X_test = X_test / 255.0 # 255.0 for slower convergence when
# normalizing the data?
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
# Reshaping in 3 dimensions, but what is the -1?
# Create dictionary of target classes
label_dict = {
0: 'Cute_lil_top',
1: 'Hot_pants',
2: 'Hoodie',
3: 'Dress_Up',
4: 'Bundles_up_bitches',
5: 'Toe_flaunters',
6: 'Shirt',
7: 'Sneakaaas',
8: 'Hold_my_purse',
9: 'Booties'
}
#wtf is label encoding?! Robot binary talk?
Y_train = to_categorical(Y_train, num_classes = 10)
random_seed = 2
#Split training and make validation sets
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state = random_seed)
# ^10% of this data validated, 90% for training
g = plt.imshow(X_train[42985][:,:,0])
#Setting CNN Model
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5), padding = 'Same',
activation = 'relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5), padding = 'Same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3), padding = 'Same',
activation = 'relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3), padding = 'Same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2,2), strides = (2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'softmax'))
optimizer = RMSprop(lr = 0.001, rho = 0.9, epsilon = 1e-08, decay = 0.0)
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 50 # Changing Epochs doesn't necessarily improve accuracy in this data's case
batch_size = 86 # Why 86?
# With Data Augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
# Fitting Model With Data Augmentation
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
# Without Data Augmentation (Fitting model)
history = model.fit(X_train, Y_train, batch_size = batch_size, epochs = epochs, validation_data = (X_val, Y_val), verbose = 2)
# Confusion Matrix (also needed for error display)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Predict the values from the validation dataset
Y_pred = model.predict(X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_val,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(10))
# Display some error results
# Errors are difference between predicted labels and true labels
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
# predict results
results = model.predict(X_test)
# select the index with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
###Output
_____no_output_____ |
notebook/analisi_ultimi_14_giorni_sud.ipynb | ###Markdown
Analisi dati regioni Italia del sud degli ultimi 14 giorni disponibiliI dati elaborati sono quelli presenti nel file * dpc-covid19-ita-regioni.json * nella directory * dati-json *. **Elaborazione dei dati:*** estrazione del dataset* trovo la data massima contenuta nel dataset* calcolo la data di riferimento per trovare i dati dei 14 giorni precedenti* estraggo dei sotto dataset con i dati delle regioni rilevati per l'analisi in questione relativi al periodo temporale in esame
###Code
import pandas as pd
pd.plotting.register_matplotlib_converters()
dataset = pd.read_json('../dati-json/dpc-covid19-ita-regioni.json')
dataset['data'] = pd.to_datetime(dataset['data'])
dataset.set_index('data', inplace=True)
max_date = dataset.index.max()
from datetime import timedelta
ref_date = max_date - timedelta(days=14)
regioni = ['Abruzzo', 'Basilicata', 'Calabria', 'Campania', 'Molise', 'Puglia', 'Sardegna', 'Sicilia']
data_filter = (dataset.index > ref_date) & (dataset.denominazione_regione.isin(regioni))
filtered_set = dataset[data_filter].loc[:, ['denominazione_regione', 'ricoverati_con_sintomi', 'terapia_intensiva', 'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi', 'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti', 'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening', 'totale_casi', 'tamponi', 'casi_testati']]
subsets = []
for r in regioni:
subsets.append((r, filtered_set[filtered_set.denominazione_regione == r].sort_index()))
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
default_figsize = (24, 6)
default_titlesize = 20
default_padding = 8
###Output
_____no_output_____
###Markdown
Andamento ricoverati con sintomi
###Code
plt.figure(figsize=default_figsize)
for (r, s) in subsets:
plt.plot(s.index, s['ricoverati_con_sintomi'].values, label=r)
plt.title('Ricoverati con sintomi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Andamento occupazione delle terapie intensive
###Code
plt.figure(figsize=default_figsize)
col_name = 'terapia_intensiva'
for (r, s) in subsets:
plt.plot(s.index, s['terapia_intensiva'].values, label=r)
plt.title('Terapia intensiva', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Andamento totale attualmente positiviCalcolati con la formula: $ totale\_ospedalizzati + isolamento\_domiciliare $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in subsets:
plt.plot(s.index, s['totale_positivi'].values, label=r)
plt.title('Totale positivi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Andamento della variazione totale positiviCalcolati con la formula: $ totale\_positivi\ giorno\ corrente - totale\_positivi\ giorno\ precedente $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in subsets:
plt.plot(s.index, s['variazione_totale_positivi'].values, label=r)
plt.title('Variazione totale positivi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Andamento nuovi positiviCalcolati con la formula: $ totale\_casi\ giorno\ corrente - totale\_casi\ giorno\ precedente $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in subsets:
plt.plot(s.index, s['nuovi_positivi'].values, label=r)
plt.title('Nuovi positivi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Andamento decedutiIl dato รจ cumulativo.
###Code
plt.figure(figsize=default_figsize)
col_name = 'deceduti'
for (r, s) in subsets:
plt.plot(s.index, s['deceduti'].values, label=r)
plt.title('Deceduti', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Analisi variazione dei dati forniti in modo cumulativo
###Code
diff_col = ['ricoverati_con_sintomi', 'terapia_intensiva', 'totale_ospedalizzati', 'isolamento_domiciliare', 'dimessi_guariti', 'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening', 'tamponi', 'casi_testati']
diff_subsets = []
for (r, s) in subsets:
diff_subsets.append((r, s.loc[:, diff_col].diff()))
###Output
_____no_output_____
###Markdown
Variazione ricoverati con sintomiCalcolata con la formula: $ ricoverati\_con\_sintomi\ giorno\ X - ricoverati\_con\_sintomi\ giorno\ X-1 $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in diff_subsets:
plt.plot(s.index, s['ricoverati_con_sintomi'].values, label=r)
plt.title('Variazione ricoverati con sintomi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Variazione terapie intensiveCalcolata con la formula: $ terapia\_intensiva\ giorno\ X - terapia\_intensiva\ giorno\ X-1 $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in diff_subsets:
plt.plot(s.index, s['terapia_intensiva'].values, label=r)
plt.title('Variazione terapia intensiva', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Variazione decedutiCalcolata con la formula: $ deceduti\ giorno\ X - deceduti\ giorno\ X-1 $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in diff_subsets:
plt.plot(s.index, s['deceduti'].values, label=r)
plt.title('Variazione deceduti', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Variazione tamponiCalcolata con la formula: $ tamponi\ giorno\ X - tamponi\ giorno\ X-1 $
###Code
plt.figure(figsize=default_figsize)
for (r, s) in diff_subsets:
plt.plot(s.index, s['tamponi'].values, label=r)
plt.title('Variazione tamponi', fontsize=default_titlesize, pad=default_padding)
plt.legend()
plt.show()
###Output
_____no_output_____ |
Chapter_Preprocessing/Wrapper_Methods_backward_SFS.ipynb | ###Markdown
Chapter: Data Preprocessing Topic: Wrapper Method: Backward SFS
###Code
# read data
import numpy as np
VSdata = np.loadtxt('VSdata.csv', delimiter=',')
# separate X and y
y = VSdata[:,0]
X = VSdata[:,1:]
# scale data
from sklearn.preprocessing import StandardScaler
xscaler = StandardScaler()
X_scaled = xscaler.fit_transform(X)
yscaler = StandardScaler()
y_scaled = yscaler.fit_transform(y[:,None])
# SFS-based variable selection
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.linear_model import LinearRegression
BSFS = SequentialFeatureSelector(LinearRegression(), n_features_to_select=10, direction='backward', cv=5).fit(X_scaled, y_scaled)
# check selected inputs
print('Inputs selected: ', BSFS.get_support(indices=True)+1) # returns integer index of the features selected
# reduce X to only top relevant inputs
X_relevant = BSFS.transform(X)
###Output
_____no_output_____ |
Numpy/More_elaborate_arrays.ipynb | ###Markdown
More data types Casting **โBiggerโ type wins in mixed-type operations:**
###Code
import numpy as np
np.array([1, 2, 3]) + 1.5
###Output
_____no_output_____
###Markdown
**Assignment never changes the type!**
###Code
a = np.array([1, 2, 3])
a.dtype
a[0] = 1.9 # <-- float is truncated to integer
a
a[0] = 4
a
###Output
_____no_output_____
###Markdown
**Forced casts:**
###Code
a = np.array([1.7, 1.2, 1.6])
b = a.astype(int) # truncates to int
b
###Output
_____no_output_____
###Markdown
**Rounding:**
###Code
a = np.array([1.7, 1.2, 1.6, 2.2, 3.8])
b= np.around(a)
b
c = np.around(a).astype(int)
c
###Output
_____no_output_____
###Markdown
Different data type sizes
###Code
np.array([1], dtype=int).dtype
np.iinfo(np.int32).max, 2**31 - 1
np.iinfo(np.int64).max, 2**63 - 1
###Output
_____no_output_____
###Markdown
Structured data types* sensor_code (4-character string)* position (float)* value (float)
###Code
samples = np.zeros((6,), dtype=[('sensor_code', 'S4'), ('position', float), ('value', float)])
samples.ndim
samples.shape
samples.dtype.names
samples[:] = [('ALFA', 1, 0.37), ('BETA', 1, 0.11), ('TAU', 1, 0.13), ('ALFA', 1.5, 0.37), ('ALFA', 3, 0.11), ('TAU', 1.2, 0.13)]
samples
###Output
_____no_output_____
###Markdown
**Field access works by indexing with field names:**
###Code
samples['sensor_code']
samples['value']
samples[0]
samples[0]['sensor_code']
###Output
_____no_output_____
###Markdown
**Multiple fields at once:**
###Code
samples[['position', 'value']]
###Output
_____no_output_____
###Markdown
**Fancy indexing works, as usual:**
###Code
samples[samples['sensor_code'] == 'ALFA']
###Output
_____no_output_____
###Markdown
maskedarray: dealing with (propagation of) missing data ** * For floats one could use NaNโs, but masks work for all types:**
###Code
x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
x
y = np.ma.array([1, 2, 3, 4], mask=[0, 1, 1, 1])
y
x + y
###Output
_____no_output_____
###Markdown
** * Masking versions of common functions:**
###Code
np.ma.sqrt([1, -1, 2, -2])
###Output
_____no_output_____ |
ML_6_Horse_Classificatiol.ipynb | ###Markdown
ML Horse Classification**Kapil Nagwanshi**
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
%matplotlib inline
cd 'gdrive/My Drive/Colab Notebooks'
animals = pd.read_csv("horse.csv")
animals.head()
target = animals['outcome']
target.unique()
animals =animals.drop(['outcome'], axis =1)
category_variables = ['surgery', 'age','temp_of_extremities','peripheral_pulse',
'mucous_membrane', 'capillary_refill_time', 'pain', 'peristalsis',
'abdominal_distention', 'nasogastric_tube', 'nasogastric_reflux' ,
'rectal_exam_feces', 'abdomen', 'abdomo_appearance' ,'surgical_lesion',
'cp_data']
for category in category_variables:
animals[category]=pd.get_dummies(animals[category])
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
X,y = animals.values,target.values
le = LabelEncoder()
y = le.fit_transform(y)
xtrain,xtest,ytrain, ytest =train_test_split(X,y,test_size=0.2,random_state=1)
from sklearn.tree import DecisionTreeClassifier
print(xtrain.shape)
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values = np.nan,strategy='most_frequent')
xtrain = imp.fit_transform(xtrain)
xtest = imp.fit_transform(xtest)
classifier = DecisionTreeClassifier()
classifier.fit(xtrain,ytrain)
ypredict = classifier.predict(xtest)
from sklearn.metrics import accuracy_score
accuracy =accuracy_score(ypredict,ytest)
print(accuracy)
from sklearn.ensemble import RandomForestClassifier
rfc =RandomForestClassifier()
rfc.fit(xtrain,ytrain)
ypredict = rfc.predict(xtest)
print(accuracy_score(ypredict,ytest))
###Output
_____no_output_____ |
site/zh-cn/guide/eager.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Eager Execution ๅจ TensorFlow.org ไธๆฅ็ ๅจ Google Colab ไธญ่ฟ่ก ๅจ GitHub ไธๆฅ็ๆบไปฃ็ ไธ่ฝฝ็ฌ่ฎฐๆฌ TensorFlow ็ Eager Execution ๆฏไธ็งๅฝไปคๅผ็ผ็จ็ฏๅข๏ผๅฏ็ซๅณ่ฏไผฐ่ฟ็ฎ๏ผๆ ้ๆๅปบ่ฎก็ฎๅพ๏ผ่ฟ็ฎไผ่ฟๅๅ
ทไฝ็ๅผ๏ผ่้ๆๅปบไพ็จๅ่ฟ่ก็่ฎก็ฎๅพใ่ฟๆ ท่ฝไฝฟๆจ่ฝปๆพๅ
ฅ้จ TensorFlow ๅนถ่ฐ่ฏๆจกๅ๏ผๅๆถไนๅๅฐไบๆ ทๆฟไปฃ็ ใ่ฆ่ท้ๆฌๆๅ่ฟ่กๅญฆไน ๏ผ่ฏทๅจไบคไบๅผ `python` ่งฃ้ๅจไธญ่ฟ่กไปฅไธไปฃ็ ็คบไพใEager Execution ๆฏ็จไบ็ ็ฉถๅๅฎ้ช็็ตๆดปๆบๅจๅญฆไน ๅนณๅฐ๏ผๅ
ทๅคไปฅไธ็นๆง๏ผ- *็ด่ง็็้ข* - ่ช็ถๅฐ็ป็ปไปฃ็ ็ปๆๅนถไฝฟ็จ Python ๆฐๆฎ็ปๆใๅฟซ้่ฟญไปฃๅฐๆจกๅๅๅฐๆฐๆฎใ- *ๆดๆนไพฟ็่ฐ่ฏๅ่ฝ* - ็ดๆฅ่ฐ็จ่ฟ็ฎไปฅๆฃๆฅๆญฃๅจ่ฟ่ก็ๆจกๅๅนถๆต่ฏๆดๆนใไฝฟ็จๆ ๅ Python ่ฐ่ฏๅทฅๅ
ท็ซๅณๆฅๅ้่ฏฏใ- *่ช็ถ็ๆงๅถๆต* - ไฝฟ็จ Python ่้่ฎก็ฎๅพๆงๅถๆต๏ผ็ฎๅไบๅจๆๆจกๅ็่ง่ใEager Execution ๆฏๆๅคง้จๅ TensorFlow ่ฟ็ฎๅ GPU ๅ ้ใๆณจ๏ผๅฏ็จ Eager Execution ๅๅฏ่ฝไผๅขๅ ๆไบๆจกๅ็ๅผ้ใๆไปฌๆญฃๅจๆ็ปญๆน่ฟๅ
ถๆง่ฝ๏ผๅฆๆๆจ้ๅฐ้ฎ้ข๏ผ่ฏท[ๆไบค้่ฏฏๆฅๅ](https://github.com/tensorflow/tensorflow/issues)ๅนถๅไบซๆจ็ๅบๅใ ่ฎพ็ฝฎๅๅบๆฌ็จๆณ
###Code
import os
import tensorflow as tf
import cProfile
###Output
_____no_output_____
###Markdown
ๅจ Tensorflow 2.0 ไธญ๏ผ้ป่ฎคๅฏ็จ Eager Executionใ
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
็ฐๅจๆจๅฏไปฅ่ฟ่ก TensorFlow ่ฟ็ฎ๏ผ็ปๆๅฐ็ซๅณ่ฟๅ๏ผ
###Code
x = [[2.]]
m = tf.matmul(x, x)
print("hello, {}".format(m))
###Output
_____no_output_____
###Markdown
ๅฏ็จ Eager Execution ไผๆนๅ TensorFlow ่ฟ็ฎ็่กไธบๆนๅผ - ็ฐๅจๅฎไปฌไผ็ซๅณ่ฏไผฐๅนถๅฐๅผ่ฟๅ็ป Pythonใ`tf.Tensor` ๅฏน่ฑกไผๅผ็จๅ
ทไฝๅผ๏ผ่้ๆๅ่ฎก็ฎๅพไธญ่็น็็ฌฆๅทๅฅๆใ็ฑไบๆ ้ๆๅปบ่ฎก็ฎๅพๅนถ็จๅๅจไผ่ฏไธญ่ฟ่ก๏ผๅฏไปฅ่ฝปๆพไฝฟ็จ `print()` ๆ่ฐ่ฏ็จๅบๆฃๆฅ็ปๆใ่ฏไผฐใ่พๅบๅๆฃๆฅๅผ ้ๅผไธไผไธญๆญ่ฎก็ฎๆขฏๅบฆ็ๆต็จใEager Execution ๅฏไปฅๅพๅฅฝๅฐ้
ๅ [NumPy](http://www.numpy.org/) ไฝฟ็จใNumPy ่ฟ็ฎๆฅๅ `tf.Tensor` ๅๆฐใTensorFlow `tf.math` ่ฟ็ฎไผๅฐ Python ๅฏน่ฑกๅ NumPy ๆฐ็ป่ฝฌๆขไธบ `tf.Tensor` ๅฏน่ฑกใ`tf.Tensor.numpy` ๆนๆณไผไปฅ NumPy `ndarray` ็ๅฝขๅผ่ฟๅ่ฏฅๅฏน่ฑก็ๅผใ
###Code
a = tf.constant([[1, 2],
[3, 4]])
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
# Obtain numpy value from a tensor:
print(a.numpy())
# => [[1 2]
# [3 4]]
###Output
_____no_output_____
###Markdown
ๅจๆๆงๅถๆตEager Execution ็ไธไธชไธป่ฆไผๅฟๆฏ๏ผๅจๆง่กๆจกๅๆถ๏ผไธปๆบ่ฏญ่จ็ๆๆๅ่ฝๅๅฏ็จใๅ ๆญค๏ผ็ผๅ [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz) ไน็ฑป็ไปฃ็ ไผๅพๅฎนๆ๏ผ
###Code
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
fizzbuzz(15)
###Output
_____no_output_____
###Markdown
่ฟๆฎตไปฃ็ ๅ
ทๆไพ่ตไบๅผ ้ๅผ็ๆกไปถ่ฏญๅฅๅนถไผๅจ่ฟ่กๆถ่พๅบ่ฟไบๅผใ Eager ่ฎญ็ป ่ฎก็ฎๆขฏๅบฆ[่ชๅจๅพฎๅ](https://en.wikipedia.org/wiki/Automatic_differentiation)ๅฏนๅฎ็ฐๆบๅจๅญฆไน ็ฎๆณ๏ผไพๅฆ็จไบ่ฎญ็ป็ฅ็ป็ฝ็ป็[ๅๅไผ ๆญ](https://en.wikipedia.org/wiki/Backpropagation)๏ผๅๅๆ็จใๅจ Eager Execution ๆ้ด๏ผ่ฏทไฝฟ็จ `tf.GradientTape` ่ท่ธช่ฟ็ฎไปฅไพฟ็จๅ่ฎก็ฎๆขฏๅบฆใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.GradientTape` ๆฅ่ฎญ็ปๅ/ๆ่ฎก็ฎๆขฏๅบฆใ่ฟๅฏนๅคๆ็่ฎญ็ปๅพช็ฏ็นๅซๆ็จใ็ฑไบๅจๆฏๆฌก่ฐ็จๆ้ด้ฝๅฏ่ฝ่ฟ่กไธๅ่ฟ็ฎ๏ผๆๆๅๅไผ ้็่ฟ็ฎ้ฝไผ่ฎฐๅฝๅฐโๆกๅธฆโไธญใ่ฆ่ฎก็ฎๆขฏๅบฆ๏ผ่ฏทๅๅๆญๆพๆกๅธฆ๏ผ็ถๅไธขๅผใ็นๅฎ `tf.GradientTape` ๅช่ฝ่ฎก็ฎไธไธชๆขฏๅบฆ๏ผๅ็ปญ่ฐ็จไผๅผๅ่ฟ่กๆถ้่ฏฏใ
###Code
w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)
###Output
_____no_output_____
###Markdown
่ฎญ็ปๆจกๅไปฅไธ็คบไพๅๅปบไบไธไธชๅคๅฑๆจกๅ๏ผ่ฏฅๆจกๅไผๅฏนๆ ๅ MNIST ๆๅๆฐๅญ่ฟ่กๅ็ฑปใ็คบไพๆผ็คบไบๅจ Eager Execution ็ฏๅขไธญๆๅปบๅฏ่ฎญ็ป่ฎก็ฎๅพ็ไผๅๅจๅๅฑ APIใ
###Code
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
###Output
_____no_output_____
###Markdown
ๅณไฝฟๆฒกๆ่ฎญ็ป๏ผไนๅฏไปฅๅจ Eager Execution ไธญ่ฐ็จๆจกๅๅนถๆฃๆฅ่พๅบ๏ผ
###Code
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
###Output
_____no_output_____
###Markdown
่ฝ็ถ Keras ๆจกๅๆๅ
็ฝฎ่ฎญ็ปๅพช็ฏ๏ผไฝฟ็จ `fit` ๆนๆณ๏ผ๏ผไฝๆๆถๆจ้่ฆ่ฟ่กๆดๅค่ชๅฎไนใไธ้ขๆฏไธไธชไฝฟ็จ Eager Execution ๅฎ็ฐ่ฎญ็ปๅพช็ฏ็็คบไพ๏ผ
###Code
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
###Output
_____no_output_____
###Markdown
ๆณจ๏ผ่ฏทๅจ `tf.debugging` ไธญไฝฟ็จๆญ่จๅฝๆฐๆฃๆฅๆกไปถๆฏๅฆๆ็ซใ่ฟๅจ Eager Execution ๅ่ฎก็ฎๅพๆง่กไธญๅๆๆใ
###Code
def train_step(images, labels):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, mnist_model.trainable_variables)
optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train(epochs):
for epoch in range(epochs):
for (batch, (images, labels)) in enumerate(dataset):
train_step(images, labels)
print ('Epoch {} finished'.format(epoch))
train(epochs = 3)
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
###Output
_____no_output_____
###Markdown
ๅ้ๅไผๅๅจ`tf.Variable` ๅฏน่ฑกไผๅญๅจๅจ่ฎญ็ปๆ้ด่ฎฟ้ฎ็ๅฏๅใ็ฑปไผผไบ `tf.Tensor` ็ๅผ๏ผไปฅๆด็ฎๅๅฐๅฎ็ฐ่ชๅจๅพฎๅใๅ้็้ๅๅๅ
ถ่ฟ็ฎๆนๆณๅฏไปฅๅฐ่ฃ
ๅฐๅฑๆๆจกๅไธญใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[่ชๅฎไน Keras ๅฑๅๆจกๅ](https://render.githubusercontent.com/view/keras/custom_layers_and_models.ipynb)ใๅฑๅๆจกๅไน้ด็ไธป่ฆๅบๅซๅจไบๆจกๅๆทปๅ ไบๅฆไธๆนๆณ๏ผ`Model.fit`ใ`Model.evaluate` ๅ `Model.save`ใไพๅฆ๏ผไธ้ข็่ชๅจๅพฎๅ็คบไพๅฏไปฅๆนๅไธบ๏ผ
###Code
class Linear(tf.keras.Model):
def __init__(self):
super(Linear, self).__init__()
self.W = tf.Variable(5., name='weight')
self.B = tf.Variable(10., name='bias')
def call(self, inputs):
return inputs * self.W + self.B
# A toy dataset of points around 3 * x + 2
NUM_EXAMPLES = 2000
training_inputs = tf.random.normal([NUM_EXAMPLES])
noise = tf.random.normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
# The loss function to be optimized
def loss(model, inputs, targets):
error = model(inputs) - targets
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return tape.gradient(loss_value, [model.W, model.B])
###Output
_____no_output_____
###Markdown
ไธไธๆญฅ๏ผ1. ๅๅปบๆจกๅใ2. ๆๅคฑๅฝๆฐๅฏนๆจกๅๅๆฐ็ๅฏผๆฐใ3. ๅบไบๅฏผๆฐ็ๅ้ๆดๆฐ็ญ็ฅใ
###Code
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
steps = 300
for i in range(steps):
grads = grad(model, training_inputs, training_outputs)
optimizer.apply_gradients(zip(grads, [model.W, model.B]))
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs)))
print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy()))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅ้ๅฐไธ็ดๅญๅจ๏ผ็ด่ณๅ ้คๅฏน Python ๅฏน่ฑก็ๆๅไธไธชๅผ็จ๏ผๅนถๅ ้ค่ฏฅๅ้ใ ๅบไบๅฏน่ฑก็ไฟๅญ `tf.keras.Model` ๅ
ๆฌไธไธชๆนไพฟ็ `save_weights` ๆนๆณ๏ผๆจๅฏไปฅ้่ฟ่ฏฅๆนๆณ่ฝปๆพๅๅปบๆฃๆฅ็น๏ผ
###Code
model.save_weights('weights')
status = model.load_weights('weights')
###Output
_____no_output_____
###Markdown
ๆจๅฏไปฅไฝฟ็จ `tf.train.Checkpoint` ๅฎๅ
จๆงๅถๆญค่ฟ็จใๆฌ้จๅๆฏ[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)็็ผฉ็ฅ็ใ
###Code
x = tf.Variable(10.)
checkpoint = tf.train.Checkpoint(x=x)
x.assign(2.) # Assign a new value to the variables and save.
checkpoint_path = './ckpt/'
checkpoint.save('./ckpt/')
x.assign(11.) # Change the variable after saving.
# Restore values from the checkpoint
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path))
print(x) # => 2.0
###Output
_____no_output_____
###Markdown
่ฆไฟๅญๅๅ ่ฝฝๆจกๅ๏ผ`tf.train.Checkpoint` ไผๅญๅจๅฏน่ฑก็ๅ
้จ็ถๆ๏ผ่ๆ ้้่ๅ้ใ่ฆ่ฎฐๅฝ `model`ใ`optimizer` ๅๅ
จๅฑๆญฅ้ชค็็ถๆ๏ผ่ฏทๅฐๅฎไปฌไผ ้ๅฐ `tf.train.Checkpoint`๏ผ
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint_dir = 'path/to/model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=optimizer,
model=model)
root.save(checkpoint_prefix)
root.restore(tf.train.latest_checkpoint(checkpoint_dir))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅจ่ฎธๅค่ฎญ็ปๅพช็ฏไธญ๏ผไผๅจ่ฐ็จ `tf.train.Checkpoint.restore` ๅๅๅปบๅ้ใ่ฟไบๅ้ๅฐๅจๅๅปบๅ็ซๅณๆขๅค๏ผๅนถไธๅฏไปฅไฝฟ็จๆญ่จๆฅ็กฎไฟๆฃๆฅ็นๅทฒๅฎๅ
จๅ ่ฝฝใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)ใ ้ขๅๅฏน่ฑก็ๆๆ `tf.keras.metrics` ไผ่ขซๅญๅจไธบๅฏน่ฑกใๅฏไปฅ้่ฟๅฐๆฐๆฐๆฎไผ ้็ปๅฏ่ฐ็จๅฏน่ฑกๆฅๆดๆฐๆๆ ๏ผๅนถไฝฟ็จ `tf.keras.metrics.result` ๆนๆณๆฃ็ดข็ปๆ๏ผไพๅฆ๏ผ
###Code
m = tf.keras.metrics.Mean("loss")
m(0)
m(5)
m.result() # => 2.5
m([8, 9])
m.result() # => 5.5
###Output
_____no_output_____
###Markdown
ๆ่ฆๅ TensorBoard[TensorBoard](https://tensorflow.google.cn/tensorboard) ๆฏไธ็งๅฏ่งๅๅทฅๅ
ท๏ผ็จไบไบ่งฃใ่ฐ่ฏๅไผๅๆจกๅ่ฎญ็ป่ฟ็จใๅฎไฝฟ็จๅจๆง่ก็จๅบๆถ็ผๅ็ๆ่ฆไบไปถใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.summary` ่ฎฐๅฝๅ้ๆ่ฆใไพๅฆ๏ผ่ฆๆฏ 100 ไธช่ฎญ็ปๆญฅ้ชค่ฎฐๅฝไธๆฌก `loss` ็ๆ่ฆ๏ผ่ฏท่ฟ่กไปฅไธไปฃ็ ๏ผ
###Code
logdir = "./tb/"
writer = tf.summary.create_file_writer(logdir)
steps = 1000
with writer.as_default(): # or call writer.set_as_default() before the loop.
for i in range(steps):
step = i + 1
# Calculate loss with your real train function.
loss = 1 - 0.001 * step
if step % 100 == 0:
tf.summary.scalar('loss', loss, step=step)
!ls tb/
###Output
_____no_output_____
###Markdown
่ชๅจๅพฎๅ้ซ็บงไธป้ข ๅจๆๆจกๅ`tf.GradientTape` ไนๅฏไปฅ็จไบๅจๆๆจกๅใไธ้ข่ฟไธช[ๅๆบฏ็บฟๆ็ดข](https://wikipedia.org/wiki/Backtracking_line_search)็ฎๆณ็คบไพ็่ตทๆฅๅฐฑๅๆฎ้็ NumPy ไปฃ็ ๏ผไฝๅฎ็ๆงๅถๆตๆฏ่พๅคๆ๏ผๅญๅจๆขฏๅบฆไธๅฏๅพฎๅ๏ผ
###Code
def line_search_step(fn, init_x, rate=1.0):
with tf.GradientTape() as tape:
# Variables are automatically tracked.
# But to calculate a gradient from a tensor, you must `watch` it.
tape.watch(init_x)
value = fn(init_x)
grad = tape.gradient(value, init_x)
grad_norm = tf.reduce_sum(grad * grad)
init_value = value
while value > init_value - rate * grad_norm:
x = init_x - rate * grad
value = fn(x)
rate /= 2.0
return x, value
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ่ชๅฎไนๆขฏๅบฆๆฏ้ๅๆขฏๅบฆ็ไธ็ง็ฎๅๆนๆณใๅจๅๅๅฝๆฐไธญ๏ผๅฎไน็ธๅฏนไบ่พๅ
ฅใ่พๅบๆไธญ้ด็ปๆ็ๆขฏๅบฆใไพๅฆ๏ผไธ้ขๆฏๅจๅๅไผ ้ไธญ่ฃๅชๆขฏๅบฆ่ๆฐ็ไธ็ง็ฎๅๆนๆณ๏ผ
###Code
@tf.custom_gradient
def clip_gradient_by_norm(x, norm):
y = tf.identity(x)
def grad_fn(dresult):
return [tf.clip_by_norm(dresult, norm), None]
return y, grad_fn
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ้ๅธธ็จๆฅไธบ่ฟ็ฎๅบๅๆไพๆฐๅผ็จณๅฎ็ๆขฏๅบฆ๏ผ
###Code
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# The gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# However, x = 100 fails because of numerical instability.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๅจๆญคไพไธญ๏ผ`log1pexp` ๅฝๆฐๅฏไปฅ้่ฟ่ชๅฎไนๆขฏๅบฆ่ฟ่กๅๆ็ฎๅใไธ้ข็ๅฎ็ฐ้็จไบๅจๅๅไผ ้ๆ้ด่ฎก็ฎ็ `tf.exp(x)` ๅผ๏ผ้่ฟๆถ้คๅไฝ่ฎก็ฎไฝฟๅ
ถๅๅพๆดๅ ้ซๆ๏ผ
###Code
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# As before, the gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# And the gradient computation also works at x = 100.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๆง่ฝๅจ Eager Execution ๆ้ด๏ผ่ฎก็ฎไผ่ชๅจๅๆตๅฐ GPUใๅฆๆๆณๆงๅถ่ฎก็ฎ่ฟ่ก็ไฝ็ฝฎ๏ผๅฏๅฐๅ
ถๆพๅจ `tf.device('/gpu:0')` ๅ๏ผๆ CPU ็ญๆๅ๏ผไธญ๏ผ
###Code
import time
def measure(x, steps):
# TensorFlow initializes a GPU the first time it's used, exclude from timing.
tf.matmul(x, x)
start = time.time()
for i in range(steps):
x = tf.matmul(x, x)
# tf.matmul can return before completing the matrix multiplication
# (e.g., can return after enqueing the operation on a CUDA stream).
# The x.numpy() call below will ensure that all enqueued operations
# have completed (and will also copy the result to host memory,
# so we're including a little more than just the matmul operation
# time).
_ = x.numpy()
end = time.time()
return end - start
shape = (1000, 1000)
steps = 200
print("Time to multiply a {} matrix by itself {} times:".format(shape, steps))
# Run on CPU:
with tf.device("/cpu:0"):
print("CPU: {} secs".format(measure(tf.random.normal(shape), steps)))
# Run on GPU, if available:
if tf.config.experimental.list_physical_devices("GPU"):
with tf.device("/gpu:0"):
print("GPU: {} secs".format(measure(tf.random.normal(shape), steps)))
else:
print("GPU: not found")
###Output
_____no_output_____
###Markdown
ๅฏไปฅๅฐ `tf.Tensor` ๅฏน่ฑกๅคๅถๅฐไธๅ่ฎพๅคๆฅๆง่กๅ
ถ่ฟ็ฎ๏ผ
###Code
if tf.config.experimental.list_physical_devices("GPU"):
x = tf.random.normal([10, 10])
x_gpu0 = x.gpu()
x_cpu = x.cpu()
_ = tf.matmul(x_cpu, x_cpu) # Runs on CPU
_ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Eager Execution ๅจ TensorFlow.org ไธๆฅ็ ๅจ Google Colab ไธญ่ฟ่ก ๅจ GitHub ไธญๆฅ็ๆบไปฃ็ ไธ่ฝฝ็ฌ่ฎฐๆฌ TensorFlow ็ Eager Execution ๆฏไธ็งๅฝไปคๅผ็ผ็จ็ฏๅข๏ผๅฏ็ซๅณ่ฏไผฐ่ฟ็ฎ๏ผๆ ้ๆๅปบ่ฎก็ฎๅพ๏ผ่ฟ็ฎไผ่ฟๅๅ
ทไฝ็ๅผ๏ผ่้ๆๅปบไพ็จๅ่ฟ่ก็่ฎก็ฎๅพใ่ฟๆ ท่ฝไฝฟๆจ่ฝปๆพๅ
ฅ้จ TensorFlow ๅนถ่ฐ่ฏๆจกๅ๏ผๅๆถไนๅๅฐไบๆ ทๆฟไปฃ็ ใ่ฆ่ท้ๆฌๆๅ่ฟ่กๅญฆไน ๏ผ่ฏทๅจไบคไบๅผ `python` ่งฃ้ๅจไธญ่ฟ่กไปฅไธไปฃ็ ็คบไพใEager Execution ๆฏ็จไบ็ ็ฉถๅๅฎ้ช็็ตๆดปๆบๅจๅญฆไน ๅนณๅฐ๏ผๅ
ทๅคไปฅไธ็นๆง๏ผ- *็ด่ง็็้ข* - ่ช็ถๅฐ็ป็ปไปฃ็ ็ปๆๅนถไฝฟ็จ Python ๆฐๆฎ็ปๆใๅฟซ้่ฟญไปฃๅฐๆจกๅๅๅฐๆฐๆฎใ- *ๆดๆนไพฟ็่ฐ่ฏๅ่ฝ* - ็ดๆฅ่ฐ็จ่ฟ็ฎไปฅๆฃๆฅๆญฃๅจ่ฟ่ก็ๆจกๅๅนถๆต่ฏๆดๆนใไฝฟ็จๆ ๅ Python ่ฐ่ฏๅทฅๅ
ท็ซๅณๆฅๅ้่ฏฏใ- *่ช็ถ็ๆงๅถๆต* - ไฝฟ็จ Python ่้่ฎก็ฎๅพๆงๅถๆต๏ผ็ฎๅไบๅจๆๆจกๅ็่ง่ใEager Execution ๆฏๆๅคง้จๅ TensorFlow ่ฟ็ฎๅ GPU ๅ ้ใๆณจ๏ผๅฏ็จ Eager Execution ๅๅฏ่ฝไผๅขๅ ๆไบๆจกๅ็ๅผ้ใๆไปฌๆญฃๅจๆ็ปญๆน่ฟๅ
ถๆง่ฝ๏ผๅฆๆๆจ้ๅฐ้ฎ้ข๏ผ่ฏท[ๆไบค้่ฏฏๆฅๅ](https://github.com/tensorflow/tensorflow/issues)ๅนถๅไบซๆจ็ๅบๅใ ่ฎพ็ฝฎๅๅบๆฌ็จๆณ
###Code
import os
import tensorflow as tf
import cProfile
###Output
_____no_output_____
###Markdown
ๅจ Tensorflow 2.0 ไธญ๏ผ้ป่ฎคๅฏ็จ Eager Executionใ
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
็ฐๅจๆจๅฏไปฅ่ฟ่ก TensorFlow ่ฟ็ฎ๏ผ็ปๆๅฐ็ซๅณ่ฟๅ๏ผ
###Code
x = [[2.]]
m = tf.matmul(x, x)
print("hello, {}".format(m))
###Output
_____no_output_____
###Markdown
ๅฏ็จ Eager Execution ไผๆนๅ TensorFlow ่ฟ็ฎ็่กไธบๆนๅผโ็ฐๅจๅฎไปฌไผ็ซๅณ่ฏไผฐๅนถๅฐๅผ่ฟๅ็ป Pythonใ`tf.Tensor` ๅฏน่ฑกไผๅผ็จๅ
ทไฝๅผ๏ผ่้ๆๅ่ฎก็ฎๅพไธญ่็น็็ฌฆๅทๅฅๆใ็ฑไบๆ ้ๆๅปบ่ฎก็ฎๅพๅนถ็จๅๅจไผ่ฏไธญ่ฟ่ก๏ผๅฏไปฅ่ฝปๆพไฝฟ็จ `print()` ๆ่ฐ่ฏ็จๅบๆฃๆฅ็ปๆใ่ฏไผฐใ่พๅบๅๆฃๆฅๅผ ้ๅผไธไผไธญๆญ่ฎก็ฎๆขฏๅบฆ็ๆต็จใEager Execution ๅฏไปฅๅพๅฅฝๅฐ้
ๅ [NumPy](http://www.numpy.org/) ไฝฟ็จใNumPy ่ฟ็ฎๆฅๅ `tf.Tensor` ๅๆฐใTensorFlow `tf.math` ่ฟ็ฎไผๅฐ Python ๅฏน่ฑกๅ NumPy ๆฐ็ป่ฝฌๆขไธบ `tf.Tensor` ๅฏน่ฑกใ`tf.Tensor.numpy` ๆนๆณไผไปฅ NumPy `ndarray` ็ๅฝขๅผ่ฟๅ่ฏฅๅฏน่ฑก็ๅผใ
###Code
a = tf.constant([[1, 2],
[3, 4]])
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
# Obtain numpy value from a tensor:
print(a.numpy())
# => [[1 2]
# [3 4]]
###Output
_____no_output_____
###Markdown
ๅจๆๆงๅถๆตEager Execution ็ไธไธชไธป่ฆไผๅฟๆฏ๏ผๅจๆง่กๆจกๅๆถ๏ผไธปๆบ่ฏญ่จ็ๆๆๅ่ฝๅๅฏ็จใๅ ๆญค๏ผ็ผๅ [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz) ไน็ฑป็ไปฃ็ ไผๅพๅฎนๆ๏ผ
###Code
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
fizzbuzz(15)
###Output
_____no_output_____
###Markdown
่ฟๆฎตไปฃ็ ๅ
ทๆไพ่ตไบๅผ ้ๅผ็ๆกไปถ่ฏญๅฅๅนถไผๅจ่ฟ่กๆถ่พๅบ่ฟไบๅผใ Eager ่ฎญ็ป ่ฎก็ฎๆขฏๅบฆ[่ชๅจๅพฎๅ](https://en.wikipedia.org/wiki/Automatic_differentiation)ๅฏนๅฎ็ฐๆบๅจๅญฆไน ็ฎๆณ๏ผไพๅฆ็จไบ่ฎญ็ป็ฅ็ป็ฝ็ป็[ๅๅไผ ๆญ](https://en.wikipedia.org/wiki/Backpropagation)๏ผๅๅๆ็จใๅจ Eager Execution ๆ้ด๏ผ่ฏทไฝฟ็จ `tf.GradientTape` ่ท่ธช่ฟ็ฎไปฅไพฟ็จๅ่ฎก็ฎๆขฏๅบฆใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.GradientTape` ๆฅ่ฎญ็ปๅ/ๆ่ฎก็ฎๆขฏๅบฆใ่ฟๅฏนๅคๆ็่ฎญ็ปๅพช็ฏ็นๅซๆ็จใ็ฑไบๅจๆฏๆฌก่ฐ็จๆ้ด้ฝๅฏ่ฝ่ฟ่กไธๅ่ฟ็ฎ๏ผๆๆๅๅไผ ้็่ฟ็ฎ้ฝไผ่ฎฐๅฝๅฐโๆกๅธฆโไธญใ่ฆ่ฎก็ฎๆขฏๅบฆ๏ผ่ฏทๅๅๆญๆพๆกๅธฆ๏ผ็ถๅไธขๅผใ็นๅฎ `tf.GradientTape` ๅช่ฝ่ฎก็ฎไธไธชๆขฏๅบฆ๏ผๅ็ปญ่ฐ็จไผๅผๅ่ฟ่กๆถ้่ฏฏใ
###Code
w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)
###Output
_____no_output_____
###Markdown
่ฎญ็ปๆจกๅไปฅไธ็คบไพๅๅปบไบไธไธชๅคๅฑๆจกๅ๏ผ่ฏฅๆจกๅไผๅฏนๆ ๅ MNIST ๆๅๆฐๅญ่ฟ่กๅ็ฑปใ็คบไพๆผ็คบไบๅจ Eager Execution ็ฏๅขไธญๆๅปบๅฏ่ฎญ็ป่ฎก็ฎๅพ็ไผๅๅจๅๅฑ APIใ
###Code
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
###Output
_____no_output_____
###Markdown
ๅณไฝฟๆฒกๆ่ฎญ็ป๏ผไนๅฏไปฅๅจ Eager Execution ไธญ่ฐ็จๆจกๅๅนถๆฃๆฅ่พๅบ๏ผ
###Code
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
###Output
_____no_output_____
###Markdown
่ฝ็ถ Keras ๆจกๅๆๅ
็ฝฎ่ฎญ็ปๅพช็ฏ๏ผไฝฟ็จ `fit` ๆนๆณ๏ผ๏ผไฝๆๆถๆจ้่ฆ่ฟ่กๆดๅค่ชๅฎไนใไธ้ขๆฏไธไธชไฝฟ็จ Eager Execution ๅฎ็ฐ่ฎญ็ปๅพช็ฏ็็คบไพ๏ผ
###Code
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
###Output
_____no_output_____
###Markdown
ๆณจ๏ผ่ฏทๅจ `tf.debugging` ไธญไฝฟ็จๆญ่จๅฝๆฐๆฃๆฅๆกไปถๆฏๅฆๆ็ซใ่ฟๅจ Eager Execution ๅ่ฎก็ฎๅพๆง่กไธญๅๆๆใ
###Code
def train_step(images, labels):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, mnist_model.trainable_variables)
optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train(epochs):
for epoch in range(epochs):
for (batch, (images, labels)) in enumerate(dataset):
train_step(images, labels)
print ('Epoch {} finished'.format(epoch))
train(epochs = 3)
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
###Output
_____no_output_____
###Markdown
ๅ้ๅไผๅๅจ`tf.Variable` ๅฏน่ฑกไผๅญๅจๅจ่ฎญ็ปๆ้ด่ฎฟ้ฎ็ๅฏๅใ็ฑปไผผไบ `tf.Tensor` ็ๅผ๏ผไปฅๆด็ฎๅๅฐๅฎ็ฐ่ชๅจๅพฎๅใๅ้็้ๅๅๅ
ถ่ฟ็ฎๆนๆณๅฏไปฅๅฐ่ฃ
ๅฐๅฑๆๆจกๅไธญใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[่ชๅฎไน Keras ๅฑๅๆจกๅ](https://render.githubusercontent.com/view/keras/custom_layers_and_models.ipynb)ใๅฑๅๆจกๅไน้ด็ไธป่ฆๅบๅซๅจไบๆจกๅๆทปๅ ไบๅฆไธๆนๆณ๏ผ`Model.fit`ใ`Model.evaluate` ๅ `Model.save`ใไพๅฆ๏ผไธ้ข็่ชๅจๅพฎๅ็คบไพๅฏไปฅๆนๅไธบ๏ผ
###Code
class Linear(tf.keras.Model):
def __init__(self):
super(Linear, self).__init__()
self.W = tf.Variable(5., name='weight')
self.B = tf.Variable(10., name='bias')
def call(self, inputs):
return inputs * self.W + self.B
# A toy dataset of points around 3 * x + 2
NUM_EXAMPLES = 2000
training_inputs = tf.random.normal([NUM_EXAMPLES])
noise = tf.random.normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
# The loss function to be optimized
def loss(model, inputs, targets):
error = model(inputs) - targets
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return tape.gradient(loss_value, [model.W, model.B])
###Output
_____no_output_____
###Markdown
ไธไธๆญฅ๏ผ1. ๅๅปบๆจกๅใ2. ๆๅคฑๅฝๆฐๅฏนๆจกๅๅๆฐ็ๅฏผๆฐใ3. ๅบไบๅฏผๆฐ็ๅ้ๆดๆฐ็ญ็ฅใ
###Code
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
steps = 300
for i in range(steps):
grads = grad(model, training_inputs, training_outputs)
optimizer.apply_gradients(zip(grads, [model.W, model.B]))
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs)))
print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy()))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅ้ๅฐไธ็ดๅญๅจ๏ผ็ด่ณๅ ้คๅฏน Python ๅฏน่ฑก็ๆๅไธไธชๅผ็จ๏ผๅนถๅ ้ค่ฏฅๅ้ใ ๅบไบๅฏน่ฑก็ไฟๅญ `tf.keras.Model` ๅ
ๆฌไธไธชๆนไพฟ็ `save_weights` ๆนๆณ๏ผๆจๅฏไปฅ้่ฟ่ฏฅๆนๆณ่ฝปๆพๅๅปบๆฃๆฅ็น๏ผ
###Code
model.save_weights('weights')
status = model.load_weights('weights')
###Output
_____no_output_____
###Markdown
ๆจๅฏไปฅไฝฟ็จ `tf.train.Checkpoint` ๅฎๅ
จๆงๅถๆญค่ฟ็จใๆฌ้จๅๆฏ[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)็็ผฉ็ฅ็ใ
###Code
x = tf.Variable(10.)
checkpoint = tf.train.Checkpoint(x=x)
x.assign(2.) # Assign a new value to the variables and save.
checkpoint_path = './ckpt/'
checkpoint.save('./ckpt/')
x.assign(11.) # Change the variable after saving.
# Restore values from the checkpoint
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path))
print(x) # => 2.0
###Output
_____no_output_____
###Markdown
่ฆไฟๅญๅๅ ่ฝฝๆจกๅ๏ผ`tf.train.Checkpoint` ไผๅญๅจๅฏน่ฑก็ๅ
้จ็ถๆ๏ผ่ๆ ้้่ๅ้ใ่ฆ่ฎฐๅฝ `model`ใ`optimizer` ๅๅ
จๅฑๆญฅ้ชค็็ถๆ๏ผ่ฏทๅฐๅฎไปฌไผ ้ๅฐ `tf.train.Checkpoint`๏ผ
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint_dir = 'path/to/model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=optimizer,
model=model)
root.save(checkpoint_prefix)
root.restore(tf.train.latest_checkpoint(checkpoint_dir))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅจ่ฎธๅค่ฎญ็ปๅพช็ฏไธญ๏ผไผๅจ่ฐ็จ `tf.train.Checkpoint.restore` ๅๅๅปบๅ้ใ่ฟไบๅ้ๅฐๅจๅๅปบๅ็ซๅณๆขๅค๏ผๅนถไธๅฏไปฅไฝฟ็จๆญ่จๆฅ็กฎไฟๆฃๆฅ็นๅทฒๅฎๅ
จๅ ่ฝฝใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)ใ ้ขๅๅฏน่ฑก็ๆๆ `tf.keras.metrics` ไผ่ขซๅญๅจไธบๅฏน่ฑกใๅฏไปฅ้่ฟๅฐๆฐๆฐๆฎไผ ้็ปๅฏ่ฐ็จๅฏน่ฑกๆฅๆดๆฐๆๆ ๏ผๅนถไฝฟ็จ `tf.keras.metrics.result` ๆนๆณๆฃ็ดข็ปๆ๏ผไพๅฆ๏ผ
###Code
m = tf.keras.metrics.Mean("loss")
m(0)
m(5)
m.result() # => 2.5
m([8, 9])
m.result() # => 5.5
###Output
_____no_output_____
###Markdown
ๆ่ฆๅ TensorBoard[TensorBoard](https://tensorflow.google.cn/tensorboard) ๆฏไธ็งๅฏ่งๅๅทฅๅ
ท๏ผ็จไบไบ่งฃใ่ฐ่ฏๅไผๅๆจกๅ่ฎญ็ป่ฟ็จใๅฎไฝฟ็จๅจๆง่ก็จๅบๆถ็ผๅ็ๆ่ฆไบไปถใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.summary` ่ฎฐๅฝๅ้ๆ่ฆใไพๅฆ๏ผ่ฆๆฏ 100 ไธช่ฎญ็ปๆญฅ้ชค่ฎฐๅฝไธๆฌก `loss` ็ๆ่ฆ๏ผ่ฏท่ฟ่กไปฅไธไปฃ็ ๏ผ
###Code
logdir = "./tb/"
writer = tf.summary.create_file_writer(logdir)
steps = 1000
with writer.as_default(): # or call writer.set_as_default() before the loop.
for i in range(steps):
step = i + 1
# Calculate loss with your real train function.
loss = 1 - 0.001 * step
if step % 100 == 0:
tf.summary.scalar('loss', loss, step=step)
!ls tb/
###Output
_____no_output_____
###Markdown
่ชๅจๅพฎๅ้ซ็บงไธป้ข ๅจๆๆจกๅ`tf.GradientTape` ไนๅฏไปฅ็จไบๅจๆๆจกๅใไธ้ข่ฟไธช[ๅๆบฏ็บฟๆ็ดข](https://wikipedia.org/wiki/Backtracking_line_search)็ฎๆณ็คบไพ็่ตทๆฅๅฐฑๅๆฎ้็ NumPy ไปฃ็ ๏ผไฝๅฎ็ๆงๅถๆตๆฏ่พๅคๆ๏ผๅญๅจๆขฏๅบฆไธๅฏๅพฎๅ๏ผ
###Code
def line_search_step(fn, init_x, rate=1.0):
with tf.GradientTape() as tape:
# Variables are automatically tracked.
# But to calculate a gradient from a tensor, you must `watch` it.
tape.watch(init_x)
value = fn(init_x)
grad = tape.gradient(value, init_x)
grad_norm = tf.reduce_sum(grad * grad)
init_value = value
while value > init_value - rate * grad_norm:
x = init_x - rate * grad
value = fn(x)
rate /= 2.0
return x, value
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ่ชๅฎไนๆขฏๅบฆๆฏ้ๅๆขฏๅบฆ็ไธ็ง็ฎๅๆนๆณใๅจๅๅๅฝๆฐไธญ๏ผๅฎไน็ธๅฏนไบ่พๅ
ฅใ่พๅบๆไธญ้ด็ปๆ็ๆขฏๅบฆใไพๅฆ๏ผไธ้ขๆฏๅจๅๅไผ ้ไธญ่ฃๅชๆขฏๅบฆ่ๆฐ็ไธ็ง็ฎๅๆนๆณ๏ผ
###Code
@tf.custom_gradient
def clip_gradient_by_norm(x, norm):
y = tf.identity(x)
def grad_fn(dresult):
return [tf.clip_by_norm(dresult, norm), None]
return y, grad_fn
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ้ๅธธ็จๆฅไธบ่ฟ็ฎๅบๅๆไพๆฐๅผ็จณๅฎ็ๆขฏๅบฆ๏ผ
###Code
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# The gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# However, x = 100 fails because of numerical instability.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๅจๆญคไพไธญ๏ผ`log1pexp` ๅฝๆฐๅฏไปฅ้่ฟ่ชๅฎไนๆขฏๅบฆ่ฟ่กๅๆ็ฎๅใไธ้ข็ๅฎ็ฐ้็จไบๅจๅๅไผ ้ๆ้ด่ฎก็ฎ็ `tf.exp(x)` ๅผ๏ผ้่ฟๆถ้คๅไฝ่ฎก็ฎไฝฟๅ
ถๅๅพๆดๅ ้ซๆ๏ผ
###Code
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# As before, the gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# And the gradient computation also works at x = 100.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๆง่ฝๅจ Eager Execution ๆ้ด๏ผ่ฎก็ฎไผ่ชๅจๅๆตๅฐ GPUใๅฆๆๆณๆงๅถ่ฎก็ฎ่ฟ่ก็ไฝ็ฝฎ๏ผๅฏๅฐๅ
ถๆพๅจ `tf.device('/gpu:0')` ๅ๏ผๆ CPU ็ญๆๅ๏ผไธญ๏ผ
###Code
import time
def measure(x, steps):
# TensorFlow initializes a GPU the first time it's used, exclude from timing.
tf.matmul(x, x)
start = time.time()
for i in range(steps):
x = tf.matmul(x, x)
# tf.matmul can return before completing the matrix multiplication
# (e.g., can return after enqueing the operation on a CUDA stream).
# The x.numpy() call below will ensure that all enqueued operations
# have completed (and will also copy the result to host memory,
# so we're including a little more than just the matmul operation
# time).
_ = x.numpy()
end = time.time()
return end - start
shape = (1000, 1000)
steps = 200
print("Time to multiply a {} matrix by itself {} times:".format(shape, steps))
# Run on CPU:
with tf.device("/cpu:0"):
print("CPU: {} secs".format(measure(tf.random.normal(shape), steps)))
# Run on GPU, if available:
if tf.config.experimental.list_physical_devices("GPU"):
with tf.device("/gpu:0"):
print("GPU: {} secs".format(measure(tf.random.normal(shape), steps)))
else:
print("GPU: not found")
###Output
_____no_output_____
###Markdown
ๅฏไปฅๅฐ `tf.Tensor` ๅฏน่ฑกๅคๅถๅฐไธๅ่ฎพๅคๆฅๆง่กๅ
ถ่ฟ็ฎ๏ผ
###Code
if tf.config.experimental.list_physical_devices("GPU"):
x = tf.random.normal([10, 10])
x_gpu0 = x.gpu()
x_cpu = x.cpu()
_ = tf.matmul(x_cpu, x_cpu) # Runs on CPU
_ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Eager Execution ๅจ TensorFlow.org ไธๆฅ็ ๅจ Google Colab ไธญ่ฟ่ก ๅจ GitHub ไธๆฅ็ๆบไปฃ็ ไธ่ฝฝ็ฌ่ฎฐๆฌ TensorFlow ็ Eager Execution ๆฏไธ็งๅฝไปคๅผ็ผ็จ็ฏๅข๏ผๅฏ็ซๅณ่ฏไผฐ่ฟ็ฎ๏ผๆ ้ๆๅปบ่ฎก็ฎๅพ๏ผ่ฟ็ฎไผ่ฟๅๅ
ทไฝ็ๅผ๏ผ่้ๆๅปบไพ็จๅ่ฟ่ก็่ฎก็ฎๅพใ่ฟๆ ท่ฝไฝฟๆจ่ฝปๆพๅ
ฅ้จ TensorFlow ๅนถ่ฐ่ฏๆจกๅ๏ผๅๆถไนๅๅฐไบๆ ทๆฟไปฃ็ ใ่ฆ่ท้ๆฌๆๅ่ฟ่กๅญฆไน ๏ผ่ฏทๅจไบคไบๅผ `python` ่งฃ้ๅจไธญ่ฟ่กไปฅไธไปฃ็ ็คบไพใEager Execution ๆฏ็จไบ็ ็ฉถๅๅฎ้ช็็ตๆดปๆบๅจๅญฆไน ๅนณๅฐ๏ผๅ
ทๅคไปฅไธ็นๆง๏ผ- *็ด่ง็็้ข* - ่ช็ถๅฐ็ป็ปไปฃ็ ็ปๆๅนถไฝฟ็จ Python ๆฐๆฎ็ปๆใๅฟซ้่ฟญไปฃๅฐๆจกๅๅๅฐๆฐๆฎใ- *ๆดๆนไพฟ็่ฐ่ฏๅ่ฝ* - ็ดๆฅ่ฐ็จ่ฟ็ฎไปฅๆฃๆฅๆญฃๅจ่ฟ่ก็ๆจกๅๅนถๆต่ฏๆดๆนใไฝฟ็จๆ ๅ Python ่ฐ่ฏๅทฅๅ
ท็ซๅณๆฅๅ้่ฏฏใ- *่ช็ถ็ๆงๅถๆต* - ไฝฟ็จ Python ่้่ฎก็ฎๅพๆงๅถๆต๏ผ็ฎๅไบๅจๆๆจกๅ็่ง่ใEager Execution ๆฏๆๅคง้จๅ TensorFlow ่ฟ็ฎๅ GPU ๅ ้ใๆณจ๏ผๅฏ็จ Eager Execution ๅๅฏ่ฝไผๅขๅ ๆไบๆจกๅ็ๅผ้ใๆไปฌๆญฃๅจๆ็ปญๆน่ฟๅ
ถๆง่ฝ๏ผๅฆๆๆจ้ๅฐ้ฎ้ข๏ผ่ฏท[ๆไบค้่ฏฏๆฅๅ](https://github.com/tensorflow/tensorflow/issues)ๅนถๅไบซๆจ็ๅบๅใ ่ฎพ็ฝฎๅๅบๆฌ็จๆณ
###Code
import os
import tensorflow as tf
import cProfile
###Output
_____no_output_____
###Markdown
ๅจ Tensorflow 2.0 ไธญ๏ผ้ป่ฎคๅฏ็จ Eager Executionใ
###Code
tf.executing_eagerly()
###Output
_____no_output_____
###Markdown
็ฐๅจๆจๅฏไปฅ่ฟ่ก TensorFlow ่ฟ็ฎ๏ผ็ปๆๅฐ็ซๅณ่ฟๅ๏ผ
###Code
x = [[2.]]
m = tf.matmul(x, x)
print("hello, {}".format(m))
###Output
_____no_output_____
###Markdown
ๅฏ็จ Eager Execution ไผๆนๅ TensorFlow ่ฟ็ฎ็่กไธบๆนๅผโ็ฐๅจๅฎไปฌไผ็ซๅณ่ฏไผฐๅนถๅฐๅผ่ฟๅ็ป Pythonใ`tf.Tensor` ๅฏน่ฑกไผๅผ็จๅ
ทไฝๅผ๏ผ่้ๆๅ่ฎก็ฎๅพไธญ่็น็็ฌฆๅทๅฅๆใ็ฑไบๆ ้ๆๅปบ่ฎก็ฎๅพๅนถ็จๅๅจไผ่ฏไธญ่ฟ่ก๏ผๅฏไปฅ่ฝปๆพไฝฟ็จ `print()` ๆ่ฐ่ฏ็จๅบๆฃๆฅ็ปๆใ่ฏไผฐใ่พๅบๅๆฃๆฅๅผ ้ๅผไธไผไธญๆญ่ฎก็ฎๆขฏๅบฆ็ๆต็จใEager Execution ๅฏไปฅๅพๅฅฝๅฐ้
ๅ [NumPy](http://www.numpy.org/) ไฝฟ็จใNumPy ่ฟ็ฎๆฅๅ `tf.Tensor` ๅๆฐใTensorFlow `tf.math` ่ฟ็ฎไผๅฐ Python ๅฏน่ฑกๅ NumPy ๆฐ็ป่ฝฌๆขไธบ `tf.Tensor` ๅฏน่ฑกใ`tf.Tensor.numpy` ๆนๆณไผไปฅ NumPy `ndarray` ็ๅฝขๅผ่ฟๅ่ฏฅๅฏน่ฑก็ๅผใ
###Code
a = tf.constant([[1, 2],
[3, 4]])
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
# Obtain numpy value from a tensor:
print(a.numpy())
# => [[1 2]
# [3 4]]
###Output
_____no_output_____
###Markdown
ๅจๆๆงๅถๆตEager Execution ็ไธไธชไธป่ฆไผๅฟๆฏ๏ผๅจๆง่กๆจกๅๆถ๏ผไธปๆบ่ฏญ่จ็ๆๆๅ่ฝๅๅฏ็จใๅ ๆญค๏ผ็ผๅ [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz) ไน็ฑป็ไปฃ็ ไผๅพๅฎนๆ๏ผ
###Code
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
fizzbuzz(15)
###Output
_____no_output_____
###Markdown
่ฟๆฎตไปฃ็ ๅ
ทๆไพ่ตไบๅผ ้ๅผ็ๆกไปถ่ฏญๅฅๅนถไผๅจ่ฟ่กๆถ่พๅบ่ฟไบๅผใ Eager ่ฎญ็ป ่ฎก็ฎๆขฏๅบฆ[่ชๅจๅพฎๅ](https://en.wikipedia.org/wiki/Automatic_differentiation)ๅฏนๅฎ็ฐๆบๅจๅญฆไน ็ฎๆณ๏ผไพๅฆ็จไบ่ฎญ็ป็ฅ็ป็ฝ็ป็[ๅๅไผ ๆญ](https://en.wikipedia.org/wiki/Backpropagation)๏ผๅๅๆ็จใๅจ Eager Execution ๆ้ด๏ผ่ฏทไฝฟ็จ `tf.GradientTape` ่ท่ธช่ฟ็ฎไปฅไพฟ็จๅ่ฎก็ฎๆขฏๅบฆใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.GradientTape` ๆฅ่ฎญ็ปๅ/ๆ่ฎก็ฎๆขฏๅบฆใ่ฟๅฏนๅคๆ็่ฎญ็ปๅพช็ฏ็นๅซๆ็จใ็ฑไบๅจๆฏๆฌก่ฐ็จๆ้ด้ฝๅฏ่ฝ่ฟ่กไธๅ่ฟ็ฎ๏ผๆๆๅๅไผ ้็่ฟ็ฎ้ฝไผ่ฎฐๅฝๅฐโๆกๅธฆโไธญใ่ฆ่ฎก็ฎๆขฏๅบฆ๏ผ่ฏทๅๅๆญๆพๆกๅธฆ๏ผ็ถๅไธขๅผใ็นๅฎ `tf.GradientTape` ๅช่ฝ่ฎก็ฎไธไธชๆขฏๅบฆ๏ผๅ็ปญ่ฐ็จไผๅผๅ่ฟ่กๆถ้่ฏฏใ
###Code
w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
print(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)
###Output
_____no_output_____
###Markdown
่ฎญ็ปๆจกๅไปฅไธ็คบไพๅๅปบไบไธไธชๅคๅฑๆจกๅ๏ผ่ฏฅๆจกๅไผๅฏนๆ ๅ MNIST ๆๅๆฐๅญ่ฟ่กๅ็ฑปใ็คบไพๆผ็คบไบๅจ Eager Execution ็ฏๅขไธญๆๅปบๅฏ่ฎญ็ป่ฎก็ฎๅพ็ไผๅๅจๅๅฑ APIใ
###Code
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
###Output
_____no_output_____
###Markdown
ๅณไฝฟๆฒกๆ่ฎญ็ป๏ผไนๅฏไปฅๅจ Eager Execution ไธญ่ฐ็จๆจกๅๅนถๆฃๆฅ่พๅบ๏ผ
###Code
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
###Output
_____no_output_____
###Markdown
่ฝ็ถ Keras ๆจกๅๆๅ
็ฝฎ่ฎญ็ปๅพช็ฏ๏ผไฝฟ็จ `fit` ๆนๆณ๏ผ๏ผไฝๆๆถๆจ้่ฆ่ฟ่กๆดๅค่ชๅฎไนใไธ้ขๆฏไธไธชไฝฟ็จ Eager Execution ๅฎ็ฐ่ฎญ็ปๅพช็ฏ็็คบไพ๏ผ
###Code
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
###Output
_____no_output_____
###Markdown
ๆณจ๏ผ่ฏทๅจ `tf.debugging` ไธญไฝฟ็จๆญ่จๅฝๆฐๆฃๆฅๆกไปถๆฏๅฆๆ็ซใ่ฟๅจ Eager Execution ๅ่ฎก็ฎๅพๆง่กไธญๅๆๆใ
###Code
def train_step(images, labels):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, mnist_model.trainable_variables)
optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train(epochs):
for epoch in range(epochs):
for (batch, (images, labels)) in enumerate(dataset):
train_step(images, labels)
print ('Epoch {} finished'.format(epoch))
train(epochs = 3)
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
###Output
_____no_output_____
###Markdown
ๅ้ๅไผๅๅจ`tf.Variable` ๅฏน่ฑกไผๅญๅจๅจ่ฎญ็ปๆ้ด่ฎฟ้ฎ็ๅฏๅใ็ฑปไผผไบ `tf.Tensor` ็ๅผ๏ผไปฅๆด็ฎๅๅฐๅฎ็ฐ่ชๅจๅพฎๅใๅ้็้ๅๅๅ
ถ่ฟ็ฎๆนๆณๅฏไปฅๅฐ่ฃ
ๅฐๅฑๆๆจกๅไธญใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[่ชๅฎไน Keras ๅฑๅๆจกๅ](https://render.githubusercontent.com/view/keras/custom_layers_and_models.ipynb)ใๅฑๅๆจกๅไน้ด็ไธป่ฆๅบๅซๅจไบๆจกๅๆทปๅ ไบๅฆไธๆนๆณ๏ผ`Model.fit`ใ`Model.evaluate` ๅ `Model.save`ใไพๅฆ๏ผไธ้ข็่ชๅจๅพฎๅ็คบไพๅฏไปฅๆนๅไธบ๏ผ
###Code
class Linear(tf.keras.Model):
def __init__(self):
super(Linear, self).__init__()
self.W = tf.Variable(5., name='weight')
self.B = tf.Variable(10., name='bias')
def call(self, inputs):
return inputs * self.W + self.B
# A toy dataset of points around 3 * x + 2
NUM_EXAMPLES = 2000
training_inputs = tf.random.normal([NUM_EXAMPLES])
noise = tf.random.normal([NUM_EXAMPLES])
training_outputs = training_inputs * 3 + 2 + noise
# The loss function to be optimized
def loss(model, inputs, targets):
error = model(inputs) - targets
return tf.reduce_mean(tf.square(error))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return tape.gradient(loss_value, [model.W, model.B])
###Output
_____no_output_____
###Markdown
ไธไธๆญฅ๏ผ1. ๅๅปบๆจกๅใ2. ๆๅคฑๅฝๆฐๅฏนๆจกๅๅๆฐ็ๅฏผๆฐใ3. ๅบไบๅฏผๆฐ็ๅ้ๆดๆฐ็ญ็ฅใ
###Code
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
print("Initial loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
steps = 300
for i in range(steps):
grads = grad(model, training_inputs, training_outputs)
optimizer.apply_gradients(zip(grads, [model.W, model.B]))
if i % 20 == 0:
print("Loss at step {:03d}: {:.3f}".format(i, loss(model, training_inputs, training_outputs)))
print("Final loss: {:.3f}".format(loss(model, training_inputs, training_outputs)))
print("W = {}, B = {}".format(model.W.numpy(), model.B.numpy()))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅ้ๅฐไธ็ดๅญๅจ๏ผ็ด่ณๅ ้คๅฏน Python ๅฏน่ฑก็ๆๅไธไธชๅผ็จ๏ผๅนถๅ ้ค่ฏฅๅ้ใ ๅบไบๅฏน่ฑก็ไฟๅญ `tf.keras.Model` ๅ
ๆฌไธไธชๆนไพฟ็ `save_weights` ๆนๆณ๏ผๆจๅฏไปฅ้่ฟ่ฏฅๆนๆณ่ฝปๆพๅๅปบๆฃๆฅ็น๏ผ
###Code
model.save_weights('weights')
status = model.load_weights('weights')
###Output
_____no_output_____
###Markdown
ๆจๅฏไปฅไฝฟ็จ `tf.train.Checkpoint` ๅฎๅ
จๆงๅถๆญค่ฟ็จใๆฌ้จๅๆฏ[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)็็ผฉ็ฅ็ใ
###Code
x = tf.Variable(10.)
checkpoint = tf.train.Checkpoint(x=x)
x.assign(2.) # Assign a new value to the variables and save.
checkpoint_path = './ckpt/'
checkpoint.save('./ckpt/')
x.assign(11.) # Change the variable after saving.
# Restore values from the checkpoint
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_path))
print(x) # => 2.0
###Output
_____no_output_____
###Markdown
่ฆไฟๅญๅๅ ่ฝฝๆจกๅ๏ผ`tf.train.Checkpoint` ไผๅญๅจๅฏน่ฑก็ๅ
้จ็ถๆ๏ผ่ๆ ้้่ๅ้ใ่ฆ่ฎฐๅฝ `model`ใ`optimizer` ๅๅ
จๅฑๆญฅ้ชค็็ถๆ๏ผ่ฏทๅฐๅฎไปฌไผ ้ๅฐ `tf.train.Checkpoint`๏ผ
###Code
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint_dir = 'path/to/model_dir'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
root = tf.train.Checkpoint(optimizer=optimizer,
model=model)
root.save(checkpoint_prefix)
root.restore(tf.train.latest_checkpoint(checkpoint_dir))
###Output
_____no_output_____
###Markdown
ๆณจ๏ผๅจ่ฎธๅค่ฎญ็ปๅพช็ฏไธญ๏ผไผๅจ่ฐ็จ `tf.train.Checkpoint.restore` ๅๅๅปบๅ้ใ่ฟไบๅ้ๅฐๅจๅๅปบๅ็ซๅณๆขๅค๏ผๅนถไธๅฏไปฅไฝฟ็จๆญ่จๆฅ็กฎไฟๆฃๆฅ็นๅทฒๅฎๅ
จๅ ่ฝฝใๆๅ
ณ่ฏฆ็ปไฟกๆฏ๏ผ่ฏทๅ้
[ๆฃๆฅ็น่ฎญ็ปๆๅ](https://render.githubusercontent.com/view/checkpoint.ipynb)ใ ้ขๅๅฏน่ฑก็ๆๆ `tf.keras.metrics` ไผ่ขซๅญๅจไธบๅฏน่ฑกใๅฏไปฅ้่ฟๅฐๆฐๆฐๆฎไผ ้็ปๅฏ่ฐ็จๅฏน่ฑกๆฅๆดๆฐๆๆ ๏ผๅนถไฝฟ็จ `tf.keras.metrics.result` ๆนๆณๆฃ็ดข็ปๆ๏ผไพๅฆ๏ผ
###Code
m = tf.keras.metrics.Mean("loss")
m(0)
m(5)
m.result() # => 2.5
m([8, 9])
m.result() # => 5.5
###Output
_____no_output_____
###Markdown
ๆ่ฆๅ TensorBoard[TensorBoard](https://tensorflow.google.cn/tensorboard) ๆฏไธ็งๅฏ่งๅๅทฅๅ
ท๏ผ็จไบไบ่งฃใ่ฐ่ฏๅไผๅๆจกๅ่ฎญ็ป่ฟ็จใๅฎไฝฟ็จๅจๆง่ก็จๅบๆถ็ผๅ็ๆ่ฆไบไปถใๆจๅฏไปฅๅจ Eager Execution ไธญไฝฟ็จ `tf.summary` ่ฎฐๅฝๅ้ๆ่ฆใไพๅฆ๏ผ่ฆๆฏ 100 ไธช่ฎญ็ปๆญฅ้ชค่ฎฐๅฝไธๆฌก `loss` ็ๆ่ฆ๏ผ่ฏท่ฟ่กไปฅไธไปฃ็ ๏ผ
###Code
logdir = "./tb/"
writer = tf.summary.create_file_writer(logdir)
steps = 1000
with writer.as_default(): # or call writer.set_as_default() before the loop.
for i in range(steps):
step = i + 1
# Calculate loss with your real train function.
loss = 1 - 0.001 * step
if step % 100 == 0:
tf.summary.scalar('loss', loss, step=step)
!ls tb/
###Output
_____no_output_____
###Markdown
่ชๅจๅพฎๅ้ซ็บงไธป้ข ๅจๆๆจกๅ`tf.GradientTape` ไนๅฏไปฅ็จไบๅจๆๆจกๅใไธ้ข่ฟไธช[ๅๆบฏ็บฟๆ็ดข](https://wikipedia.org/wiki/Backtracking_line_search)็ฎๆณ็คบไพ็่ตทๆฅๅฐฑๅๆฎ้็ NumPy ไปฃ็ ๏ผไฝๅฎ็ๆงๅถๆตๆฏ่พๅคๆ๏ผๅญๅจๆขฏๅบฆไธๅฏๅพฎๅ๏ผ
###Code
def line_search_step(fn, init_x, rate=1.0):
with tf.GradientTape() as tape:
# Variables are automatically tracked.
# But to calculate a gradient from a tensor, you must `watch` it.
tape.watch(init_x)
value = fn(init_x)
grad = tape.gradient(value, init_x)
grad_norm = tf.reduce_sum(grad * grad)
init_value = value
while value > init_value - rate * grad_norm:
x = init_x - rate * grad
value = fn(x)
rate /= 2.0
return x, value
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ่ชๅฎไนๆขฏๅบฆๆฏ้ๅๆขฏๅบฆ็ไธ็ง็ฎๅๆนๆณใๅจๅๅๅฝๆฐไธญ๏ผๅฎไน็ธๅฏนไบ่พๅ
ฅใ่พๅบๆไธญ้ด็ปๆ็ๆขฏๅบฆใไพๅฆ๏ผไธ้ขๆฏๅจๅๅไผ ้ไธญ่ฃๅชๆขฏๅบฆ่ๆฐ็ไธ็ง็ฎๅๆนๆณ๏ผ
###Code
@tf.custom_gradient
def clip_gradient_by_norm(x, norm):
y = tf.identity(x)
def grad_fn(dresult):
return [tf.clip_by_norm(dresult, norm), None]
return y, grad_fn
###Output
_____no_output_____
###Markdown
่ชๅฎไนๆขฏๅบฆ้ๅธธ็จๆฅไธบ่ฟ็ฎๅบๅๆไพๆฐๅผ็จณๅฎ็ๆขฏๅบฆ๏ผ
###Code
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# The gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# However, x = 100 fails because of numerical instability.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๅจๆญคไพไธญ๏ผ`log1pexp` ๅฝๆฐๅฏไปฅ้่ฟ่ชๅฎไนๆขฏๅบฆ่ฟ่กๅๆ็ฎๅใไธ้ข็ๅฎ็ฐ้็จไบๅจๅๅไผ ้ๆ้ด่ฎก็ฎ็ `tf.exp(x)` ๅผ๏ผ้่ฟๆถ้คๅไฝ่ฎก็ฎไฝฟๅ
ถๅๅพๆดๅ ้ซๆ๏ผ
###Code
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
def grad_log1pexp(x):
with tf.GradientTape() as tape:
tape.watch(x)
value = log1pexp(x)
return tape.gradient(value, x)
# As before, the gradient computation works fine at x = 0.
grad_log1pexp(tf.constant(0.)).numpy()
# And the gradient computation also works at x = 100.
grad_log1pexp(tf.constant(100.)).numpy()
###Output
_____no_output_____
###Markdown
ๆง่ฝๅจ Eager Execution ๆ้ด๏ผ่ฎก็ฎไผ่ชๅจๅๆตๅฐ GPUใๅฆๆๆณๆงๅถ่ฎก็ฎ่ฟ่ก็ไฝ็ฝฎ๏ผๅฏๅฐๅ
ถๆพๅจ `tf.device('/gpu:0')` ๅ๏ผๆ CPU ็ญๆๅ๏ผไธญ๏ผ
###Code
import time
def measure(x, steps):
# TensorFlow initializes a GPU the first time it's used, exclude from timing.
tf.matmul(x, x)
start = time.time()
for i in range(steps):
x = tf.matmul(x, x)
# tf.matmul can return before completing the matrix multiplication
# (e.g., can return after enqueing the operation on a CUDA stream).
# The x.numpy() call below will ensure that all enqueued operations
# have completed (and will also copy the result to host memory,
# so we're including a little more than just the matmul operation
# time).
_ = x.numpy()
end = time.time()
return end - start
shape = (1000, 1000)
steps = 200
print("Time to multiply a {} matrix by itself {} times:".format(shape, steps))
# Run on CPU:
with tf.device("/cpu:0"):
print("CPU: {} secs".format(measure(tf.random.normal(shape), steps)))
# Run on GPU, if available:
if tf.config.experimental.list_physical_devices("GPU"):
with tf.device("/gpu:0"):
print("GPU: {} secs".format(measure(tf.random.normal(shape), steps)))
else:
print("GPU: not found")
###Output
_____no_output_____
###Markdown
ๅฏไปฅๅฐ `tf.Tensor` ๅฏน่ฑกๅคๅถๅฐไธๅ่ฎพๅคๆฅๆง่กๅ
ถ่ฟ็ฎ๏ผ
###Code
if tf.config.experimental.list_physical_devices("GPU"):
x = tf.random.normal([10, 10])
x_gpu0 = x.gpu()
x_cpu = x.cpu()
_ = tf.matmul(x_cpu, x_cpu) # Runs on CPU
_ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0
###Output
_____no_output_____ |
docs/notebooks/inference-example.ipynb | ###Markdown
Crop Lungs from datasetImagine we created a deep learning network which analyzes a specific lung disease. We want to apply the network now on a new dataset. To preprocess the dataset, we need to do two things: 1. Filter all volumes, where the lungs are present2. Crop the lungs from the volumeIn this tutorial we will go through the preprocessing, and it will be explained how the bpreg package can help us. 1. Download DataFor this tutorial, a subset of the [TCGA-KIRC ](https://wiki.cancerimagingarchive.net/display/Public/TCGA-KIRC580038695f8cd691bda43dda71b4093c69c7318)dataset from the TCIA is used. The DICOM files were transformed to nifti files. To save the information in the DICOM metadata, an additional meta-data file was created with the metadata inside. Moreover, to reduce the size of the dataset and to remove CT scans with few slices, nifti files with a size greater than 35 MB and smaller than 5 MB were removed. Please download the data from Zenodo: https://zenodo.org/record/5141950.YQFiMNaxWEA 2. Analyze datasetFirs, we want to analyze the dataset from Zenodo.
###Code
input_path = "data/TCGA-KIRC/TCGA-KIRC_nifti"
meta_data = "data/TCGA-KIRC/TCGA-KIRC_meta_data.xlsx"
output_path = "data/TCGA-KIRC/TCGA-KIRC_json"
if not os.path.exists(output_path): os.mkdir(output_path)
print(f"The dataset contains: {len(os.listdir(input_path))} CT volumes.")
###Output
The dataset contains: 291 CT volumes.
###Markdown
2.1 Analyze DICOM tag BodyPartExaminedOne approach to obtain the Examined Body Part from a CT volume is to analyze the DICOM tag BodyPartExamined. Unfortunately, the tag is not very precise and often misleading or empty.
###Code
df = pd.read_excel(meta_data)
plot_dicomexamined_distribution(df, column="BodyPartExamined", count_column="filepath")
###Output
_____no_output_____
###Markdown
3. Body Part Regression InferenceWe can create for every nifti file a corresponding json output file. The json file contains metadata to the examined body part. Moreover, in the output path an additional `README.md` file is saved, where the metadata inside the json-files is explained.
###Code
bpreg_inference(input_path, output_path)
###Output
JSON-file already exists. Skip file: TCGA-B8-4154_07-27-2003-CT-ABDOMEN-PELVIS-W-CONT-78543_2.000000-AbdPelvis--5.0--B31f-92442_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5551_02-18-2004-CT-RENAL-Mass-79313_3.000000-Arterial--5.0--B31f-51058_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4151_05-19-2003-CT-RENAL-Mass-57015_2.000000-Unenhanced---5.0--B30f-16470_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5162_10-06-2003-CT-RENAL-Mass-58198_3.000000-Arterial-Phase--5.0--B30f-49138_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_04-02-1995-Abdomen020APRoutine-Adult-68078_2.000000-kids-wo--5.0--B40s-64573_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_07-24-1992-Abdomen01AbdPelvisRoutine-Adult-36102_6.000000-3-min-delay--5.0--B40f-40093_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5117_01-16-1989-ABD-RENAL-STONE-96405_2.000000-Helical-5s-89466_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_02-07-1992-Abdomen1-AP-ROUTINE-85865_2.000000-AP-Routine--5.0--B40f-19930_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-27-1997-Thorax040CAPRoutine-Adult-43440_5.000000-MIP-Chest-58582_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5553_04-22-2004-CT-RENAL-Mass-42997_5.000000-Venous-Phase--5.0--B30f-76145_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_2.000000-Kids-wo--5.0--B40f-60155_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_06-04-1986-CT-CHEST-W--ABDOMEN-W-91168_2.000000-54093_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5164_10-23-2003-CT-RENAL-Mass-38123_3.000000-ArterialPhase--5.0--B30f-99784_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_06-07-1999-CT-ABDOMEN-w-91624_4.000000-3-min-KIDS--5.0--B40f-77136_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54E_02-02-2005-CT-RENAL-Mass-86462_2.000000-Unenhanced---5.0--B30f-21264_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-25-1993-Abdomen020AbdomenRoutine-Adult-50570_4.000000-KIDS-WO--3.0--B40f--cor--cor-00686_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-25-1993-Abdomen020AbdomenRoutine-Adult-50570_5.000000-AP-Routine--3.0--B40f--cor-69493_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_12-26-1996-Thorax040CAPRoutine-Adult-05223_4.000000-MIP-Chest-axial-47074_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_03-19-1994-Thorax090ChestPEStudy-Adult-14214_7.000000-APRoutine--5.0--B40f-18675_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4620_05-09-2002-CT-RENAL-Mass-57135_5.000000-Nephro--5.0--B30f-49838_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5159_12-05-2003-CT-RENAL-MASS-WPelvis-71904_2.000000-I--Kidneys--5.0--B31f-67607_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4154_08-13-2003-UROGRAM-CT-57460_5.000000-Delay-5-min--5.0--B31f-83158_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_12-24-1985-CT-CAP-WO-WITH-72944_2.000000-53163_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_04-15-2004-CT-RENAL-Mass-29056_4.000000-ArterialPhase--5.0--B30f-15047_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_06-30-1992-CT-ChestAbdomenPelv-13897_3.000000-AXIAL-15230_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_08-11-2004-CT-RENAL-Mass-44170_5.000000-Nephro--5.0--B31f-42842_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_06-30-1991-Abdomen1-AP-ROUTINE-51457_3.000000-KIDNEY-DEL--5.0--B40f-48679_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6093_10-11-1991-Vascular06CTAAbdPost-stent-Adult-88752_2.000000-WO-Abd.---5.0--B40f-89347_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4153_08-14-2003-CT-RENAL-Mass-20519_3.000000-Arterial--5.0--B31f-26032_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_09-09-1990-CT-Urogram-1-04007_4.000000-AXIAL-39899_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4349_04-29-1990-CT-ABPEL-KIDNEY-PROTOCOL-96920_270.000000-SAG-53915_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_10-14-1991-CT-ABDOMEN--PELVIS-44801_2.000000-AXIAL-06743_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_02-28-1994-Abdomen020APRoutine-Adult-83665_5.000000-coronal-AP-Routine--3.0--B40f-80292_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_10-23-1992-Thorax02CAPRoutine-Adult-65199_5.000000-CAP-Routine--3.0--SPO--cor-10832_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_05-02-1992-Abdomen01APRoutine-Adult-37752_3.000000-AP-Routine--5.0--B40f-32007_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_09-09-1990-CT-Urogram-1-04007_2.000000-AXIAL-06978_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5709_03-11-1986-CT-ABDOMEN-12191_2.000000-C-90SEC-PREP-7.53.757.5-51603_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-25-1998-CT-CHEST-w--3D-Depend-WS-87799_7.000000-cor-45-Sec---cor-33514_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-13-1992-Thorax02CAPRoutine-Adult-75223_4.000000-DELAYED--5.0--B40f-23877_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-02-1993-Thorax02CAPRoutine-Adult-04146_3.000000-chestliver-wo--20.0--B40f-45044_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4153_08-14-2003-CT-RENAL-Mass-20519_2.000000-NonContrast--5.0--B31f-49477_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5707_11-02-1986-CT-ABD-WOW--PELVIS-W-39221_3.000000-AXIAL-SCANS-11369_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_02-28-1994-Abdomen020APRoutine-Adult-83665_2.000000-Kids-wo--5.0--B40f-03834_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5545_03-13-2004-CT-ABDOMEN-PELVIS-W-CONT-12109_3.000000-AbdPelvis--5.0--B31f-77514_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_04-17-2000-CT-ABDOMEN-w--PELVIS-w-13547_4.000000-90-Sec--5.0--B40f-51214_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_02-25-1986-CT-CAP-WO-WITH-29227_3.000000-92886_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5551_02-18-2004-CT-RENAL-Mass-79313_2.000000-NonContrast--5.0--B31f-82632_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_02-26-1995-Abdomen083TypeICTUDoseSaving-Adult-94974_8.000000-Coronal-70-sec-AP--2.0--B25f-43558_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5707_11-02-1986-CT-ABD-WOW--PELVIS-W-39221_4.000000-AXIAL-SCANS-19304_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_04-17-2000-CT-ABDOMEN-w--PELVIS-w-13547_3.000000-45-Sec---5.0--B40f-37791_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_05-07-1993-CT-ChestAbdomenPelv-48148_3.000000-AXIAL-79433_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5549_03-17-2004-CT-RENAL-Mass-08744_3.000000-Arterial--5.0--B31f-23690_.nii.gz
Create body-part meta data file: TCGA-CW-5585_12-21-1992-Thorax02CAPRoutine-Adult-35967_6.000000-CAP-Routine--3.0--SPO--cor-54385_.json
WARNING: File TCGA-CW-5585_12-21-1992-Thorax02CAPRoutine-Adult-35967_6.000000-CAP-Routine--3.0--SPO--cor-54385_.nii.gz can not be converted to a 3-dimensional volume of the size 128x128xz
JSON-file already exists. Skip file: TCGA-B0-5099_12-29-1984-CT-ABDOMEN-49423_2.000000-AXIAL-SCANS-79032_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_06-05-1998-Abdomen037KidneyBiphase-Adult-87366_2.000000-45-Sec---5.0--B40f-90571_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-19-1995-Thorax040CAPRoutine-Adult-80338_4.000000-MIP-axial-58426_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5115_06-11-1992-AbdomenSTONES-Adult-45957_3.000000-STONES--LUNG-54997_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5159_04-06-2005-CT-ABDOMEN-PELVIS-W-CONT-88493_2.000000-AbdPelvis--5.0--B31f-20872_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_05-07-1999-CT-ABDOMEN-w--PELVIS-w-37408_2.000000-AP-routine--5.0--B40f-56016_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_12-24-1985-CT-CAP-WO-WITH-72944_4.000000-Recon-2-39633_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_04-30-1993-Thorax02CAPRoutine-Adult-90240_3.000000-CAP-Routine--3.0--B40f--cor-55444_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-23-1993-Thorax02CAPRoutine-Adult-15977_3.000000-CAP-Routine--3.0--SPO--cor-07317_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-22-1998-Thorax040CAPRoutine-Adult-38862_2.000000-CA-routine--5.0--B40f-42567_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5085_07-14-1987-AP-WWO-12724_3.000000-19898_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_04-02-1995-Abdomen020APRoutine-Adult-68078_3.000000-Coronal-kids-wo--3.0--B40f-35627_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_02-25-1986-CT-CAP-WO-WITH-29227_2.000000-03341_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5164_11-27-2003-CT-ABDOMEN-PELVIS-WO-CONT-59966_2.000000-AbdomenPelvis--5.0--B31f-93858_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4153_08-14-2003-CT-RENAL-Mass-20519_5.000000-Delay-Nephro--5.0--B31f-95636_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_12-28-2003-CT-RENAL-Mass-54722_2.000000-Unenhanced--5.0--B31f-96460_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_04-12-1997-Abdomen037KidneyBiphasePostNephrectomy-Adult-02501_2.000000-KIDNEY-WO--5.0--B40f-48745_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-22-1998-Thorax040CAPRoutine-Adult-38862_4.000000-MIP-Chest-axial-28830_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4351_07-11-1990-FORFILE-CT-CHABPEL---CD-48554_102.000000-ROUTINE-CHEST-97260_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5117_12-22-1992-CT-ABDOMEN-WITH-CONTRA-36509_2.000000-ABD-WO-86747_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_01-10-1993-Abdomen01AbdomenRoutine-Adult-05857_6.000000-Kidney-Delays--5.0--B40f-14321_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_03-24-1996-Thorax040CAPRoutine-Adult-83575_5.000000-MIP-axial-48684_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4349_04-29-1990-CT-ABPEL-KIDNEY-PROTOCOL-96920_100.000000-Bind1546851..69-98531_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-30-1992-Abdomen020APRoutine-Adult-47614_2.000000-AP-Routine--5.0--B40f-61861_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5159_12-05-2003-CT-RENAL-MASS-WPelvis-71904_5.000000-Nephro--5.0--B31f-85645_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5115_06-11-1992-AbdomenSTONES-Adult-45957_2.000000-STONES--ST-16828_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4349_04-29-1990-CT-ABPEL-KIDNEY-PROTOCOL-96920_3.000000-PRE-CONTRAST-50541_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4620_05-09-2002-CT-RENAL-Mass-57135_4.000000-cor-thin-mip-15533_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5546_06-02-2005-CT-ABDOMENPELVIS--RENAL-COLIC-86908_2.000000-Renal-Colic--5.0--B31f-17550_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-25-1993-Abdomen020AbdomenRoutine-Adult-50570_3.000000-AP-Routine--5.0--B40f-82698_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_03-26-1991-hernia-59338_2.000000-AXIAL-65789_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-03-1991-Thorax02CAPRoutine-Adult-63431_3.000000-DELAYS--5.0--B40f-01380_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_03-28-1997-Thorax040CAPRoutine-Adult-62781_5.000000-MIP-Chest-87509_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_06-07-1999-CT-ABDOMEN-w-91624_2.000000-Abd---5.0--B40f-44734_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_04-12-1997-Abdomen037KidneyBiphasePostNephrectomy-Adult-02501_9.000000-Excretory--5.0--B40f-23428_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4148_04-02-2003-CT-RENAL-Mass-68197_4.000000-nephrographic--5.0--B30s-23204_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5549_10-06-2004-CT-ABDOMEN-PELVIS-W-CONT-94129_2.000000-AbdPel--5.0--B31f-61908_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5549_03-17-2004-CT-RENAL-Mass-08744_2.000000-Unenhanced--5.0--B31f-34129_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5163_11-18-2003-CT-ABDOMEN-PELVIS-W-CONT-49089_5.000000-Abd-delays-3min--3.0--SPO--cor-84210_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_5.000000-APRoutine--5.0--B40f-08656_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_08-21-1993-Thorax02CAPRoutine-Adult-78094_6.000000-AP-ROUTINE--3.0--B40f-15846_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_06-10-1991-Abdomen01APRoutine-Adult-61720_2.000000-70-seconds--5.0--B40f-39135_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_07-08-1994-Thorax040CAPRoutine-Adult-42293_3.000000-Coronal-CAP-Routine--3.0--B40f-47742_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_02-04-1993-Thorax040CAPRoutine-Adult-60088_5.000000-CAP-routine--3.0--SPO--cor--cor-30216_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5164_10-23-2003-CT-RENAL-Mass-38123_5.000000-VenousPhase--5.0--B30f-56183_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_09-25-2003-CT-RENAL-Mass-21003_3.000000-Arterial--5.0--B31f-43367_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_02-28-1994-Abdomen020APRoutine-Adult-83665_3.000000-coronal-Kids-wo--3.0--B40f--cor-46639_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5115_06-17-1992-AP-82538_3.000000-NEPHROGRAPHIC-PHASE-11129_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_10.000000-coronal-3-min-delay--3.0--B40f--cor-71184_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4622_01-11-2004-CT-CHEST-WO-CONTRAST-80068_3.000000-Chest-Thin-Mip-Coronal-93724_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5163_11-18-2003-CT-CHEST---WCONTRAST-11237_2.000000-ChestAbdPel--5.0--B31f-00121_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_04-12-1997-Abdomen037KidneyBiphasePostNephrectomy-Adult-02501_6.000000-MIP-Chest-17246_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_08-30-1991-Abdomen01AbdomenRoutine-Adult-36686_4.000000-Abd-DELAY--5.0--B40f-19406_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_02-04-1993-Thorax040CAPRoutine-Adult-60088_3.000000-kidneys-wo--3.0--SPO--cor--cor--cor-24832_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_04-12-1997-Abdomen037KidneyBiphasePostNephrectomy-Adult-02501_7.000000-Nephro--5.0--B40f-19275_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4151_05-19-2003-CT-RENAL-Mass-57015_3.000000-Arterial-Phase--5.0--B30f-32552_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_08-11-2004-CT-RENAL-Mass-44170_3.000000-Arterial--5.0--B31f-38930_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_11.000000-coronal-3-min-delay--3.0--B40f-44716_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5106_04-09-1987-AP-86222_3.000000-88339_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5553_04-22-2004-CT-RENAL-Mass-42997_3.000000-Arterial-Phase--5.0--B30f-94309_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5550_03-10-2004-CT-RENAL-Mass-76610_3.000000-ArterialPhase--5.0--B30f-84474_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-19-1995-Thorax040CAPRoutine-Adult-80338_2.000000-CA-Routine--5.0--B40f-21316_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4148_06-21-2004-CT-ABDOMEN-PELVIS-W-CONT-73741_2.000000-AbdPelvis--5.0--B31f-47876_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5550_03-10-2004-CT-RENAL-Mass-76610_2.000000-NonContrast--5.0--B30f-12406_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_04-30-1993-Thorax02CAPRoutine-Adult-90240_2.000000-CAP-Routine--5.0--B40f-28195_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5162_11-16-2003-CT-ABDOMEN-PELVIS-W-CONT-93993_2.000000-AbdomenPel--5.0--B31f-58516_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54E_11-02-2005-CT-ABDOMEN-PELVIS-W-CONT-43122_2.000000-AbdomenPelvis--5.0--B31f-50747_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_12-23-1990-Thorax2-CAP-ROUTINE-07108_2.000000-CAP-routine--5.0--B40f-52760_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4154_08-13-2003-UROGRAM-CT-57460_6.000000-Delay-5-min--Coronal-Thin-Mip-48956_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5085_07-14-1987-AP-WWO-12724_2.000000-29915_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_08-03-1991-Abdomen1-AP-ROUTINE-65425_2.000000-AP-Routine--5.0--B40f-43138_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_02-04-1993-Thorax040CAPRoutine-Adult-60088_2.000000-kidneys-wo--5.0--B40f-77027_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4335_10-01-1986-CT-CHABPEL-KIDNEY-PROTOCOL-64314_5.000000-91388_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_06-10-1991-Abdomen01APRoutine-Adult-61720_3.000000-120-seconds--5.0--B40f-03464_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4620_05-09-2002-CT-RENAL-Mass-57135_3.000000-Arterial--5.0--B30f-52306_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-25-1998-CT-CHEST-w--3D-Depend-WS-87799_6.000000-MIP-Axial-Chest--20-02068_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4712_08-04-1991-CAP-WO-63405_2.000000-CAP--WO-CONTRAST-89059_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_4.000000-coronal-Kids-wo--3.0--B40f--cor-85062_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54D_09-19-2005-CT-ABDOMEN-PELVIS-WO-CONT-70571_2.000000-AbdomenPel--5.0--B31f-10967_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54D_01-06-2005-CT-ABDOMEN-PELVIS-W-CONT-56869_3.000000-AbdomenPelvis--5.0--B31f-47294_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4153_05-05-2004-CT-ABDOMEN-PELVIS-WO-CONT-69226_2.000000-AbdPel--5.0--B31f-10886_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_10-28-1990-CT-ABDOMEN-WITHOUT-CON-53530_2.000000-05747_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_06-03-1994-Thorax040CAPRoutine-Adult-15514_3.000000-Coronal-CAP-Routine--3.0--B40f-07109_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5106_04-09-1987-AP-86222_2.000000-77477_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6090_08-21-1993-Thorax02CAPRoutine-Adult-78094_3.000000-AP-ROUTINE--5.0--B40f-60622_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4154_08-13-2003-UROGRAM-CT-57460_2.000000-NonContrast--5.0--B31f-07302_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5115_06-17-1992-AP-82538_2.000000-WO-ABD-PELVIS-47462_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5162_10-06-2003-CT-RENAL-Mass-58198_2.000000-Unenhanced---5.0--B30f-12094_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_01-10-1993-Abdomen01AbdomenRoutine-Adult-05857_8.000000-Kidney-Delays--3.0--SPO--cor-88104_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_12-24-1985-CT-CAP-WO-WITH-72944_3.000000-68250_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_03-19-1994-Thorax090ChestPEStudy-Adult-14214_9.000000-PE-MIP-axial-40181_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4762_03-14-1988-CT-ABPEL-W-CONTRAST-73732_2.000000-CT-ABDOMENPELVIS-49483_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_07-24-1992-Abdomen01AbdPelvisRoutine-Adult-36102_2.000000-kids-wo--5.0--B40f-15746_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4148_04-02-2003-CT-RENAL-Mass-68197_2.000000-unenhanced--5.0--B30s-14047_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_09-19-1991-CT-ABDOMEN-WITH-AND-WI-12069_2.000000-23176_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_04-15-2004-CT-RENAL-Mass-29056_3.000000-VenousPhase--5.0--B30f-73811_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6093_06-01-1991-CT-ABDOMEN--PELVIS-66030_2.000000-AXIAL-11107_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6093_10-11-1991-Vascular06CTAAbdPost-stent-Adult-88752_603.000000-MIP-13823_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_03-16-1991-kidney-ca-87042_2.000000-AXIAL-92330_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_06-04-1986-CT-CHEST-W--ABDOMEN-W-91168_4.000000-Recon-2-48831_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5159_12-05-2003-CT-RENAL-MASS-WPelvis-71904_3.000000-Arterial--5.0--B31f-96992_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_06-26-1993-Thorax040CAPRoutine-Adult-84537_3.000000-CAP-routine--3.0--SPO--cor-02393_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_09-25-2003-CT-RENAL-Mass-21003_4.000000-Arterial--5.0--SPO--cor-88630_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-30-1992-Abdomen020APRoutine-Adult-47614_5.000000-kidney-delays--5.0--B40f-92076_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5164_10-23-2003-CT-RENAL-Mass-38123_2.000000-NonContrast--5.0--B30f-18825_.nii.gz
JSON-file already exists. Skip file: TCGA-CJ-4642_07-23-1995-CHEST-WITH-06989_2.000000-CHEST-91436_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_03-23-1993-Thorax02CAPRoutine-Adult-15977_4.000000-CAP-Routine--20.0ial-B40f-47604_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_01-23-1997-Thorax040CAPRoutine-Adult-26910_5.000000-MIP-Axial-Chest-76861_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4821_10-28-1984-ABDOMEN-WWO-67734_3.000000-16317_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_10-25-1993-Abdomen020AbdomenRoutine-Adult-50570_2.000000-KIDS-WO--5.0--B20f-56162_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5553_04-22-2004-CT-RENAL-Mass-42997_2.000000-Unenhanced---5.0--B30f-63367_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_04-03-1994-Abdomen020AbdomenPelvisRoutine-Adult-58797_2.000000-AbdPel--5.0--B40f-59141_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_08-04-1991-CT-ABDOMEN--PELVIS-14568_3.000000-AXIAL-67739_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_01-25-1992-CT-ABDOMEN--PELVIS-89409_2.000000-AXIAL-04902_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4622_01-11-2004-CT-ABDOMEN-WO-CONTR-61119_2.000000-ChestAbd--5.0--B31f-03804_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_03-19-1994-Thorax090ChestPEStudy-Adult-14214_101.000000-tmp--PE--RTD-21344_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_02-25-1986-CT-CAP-WO-WITH-29227_4.000000-Recon-2-63479_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5550_10-13-2004-CT-ABDOMEN-PELVIS-W-CONT-60396_2.000000-AbdPel--5.0--B31f-57043_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_03-19-1994-Thorax090ChestPEStudy-Adult-14214_2.000000-Localizer--5.0--B40f-47573_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_12-28-2003-CT-RENAL-Mass-54722_5.000000-Nephrographic--5.0--B31f-55570_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_07-04-1992-CT-ABDOMEN--PELVIS-49837_2.000000-AXIAL-96761_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_04-12-1997-Abdomen037KidneyBiphasePostNephrectomy-Adult-02501_4.000000-Chest-Routine--5.0--B40f-49176_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6093_03-16-1991-EVT-12311_2.000000-AXIAL-61927_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_06-04-1991-Thorax2-CAP-ROUTINE-38873_3.000000-delay-kids--5.0--B40f-71793_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_10-26-1990-CT-ABDOMEN-WITH-CONTRA-83943_2.000000-74581_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_12-28-2003-CT-RENAL-Mass-54722_3.000000-Arterial--5.0--B31f-20787_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_01-10-1993-Abdomen01AbdomenRoutine-Adult-05857_2.000000-AP-Routine--5.0--B40f-61286_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4712_12-09-1991-CT-ABDOMEN-WITH-CONTRA-96104_2.000000-HELICAL-5s-22599_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-02-1993-Thorax02CAPRoutine-Adult-04146_4.000000-kidney-wo--5.0--B40f-50992_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-27-1997-Thorax040CAPRoutine-Adult-43440_2.000000-CAbd-Routine--5.0--B40f-00455_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4799_05-21-1991-CT-CHABPEL-WO-CONTRAST-63310_3.000000-Recon-2-40749_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-12-1993-Thorax040CAPRoutine-Adult-90155_4.000000-CAP-Routine--3.0--B40f--cor-66680_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5162_10-06-2003-CT-RENAL-Mass-58198_5.000000-Venous-Phase--5.0--B30f-47461_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-02-1993-Thorax02CAPRoutine-Adult-04146_9.000000-90-sec-delay--3.0--B40f--cor--cor--cor-49179_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4151_05-19-2003-CT-RENAL-Mass-57015_4.000000-Arterial-Phase--5.0--SPO--cor-46490_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54E_02-02-2005-CT-RENAL-Mass-86462_5.000000-Venous-Phase--5.0--B30f-28157_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4151_04-16-2004-CT-ABDOMEN-PELVIS-W-CONT-84908_2.000000-AbdPel--5.0--B31f-57302_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5158_07-29-2004-CT-ABDOMEN-PELVIS-W-CONT-40508_2.000000-AbdomenPel--5.0--B31f-17467_.nii.gz
JSON-file already exists. Skip file: TCGA-CJ-4642_03-31-1995-CT-CAP-00064_3.000000-Recon-2-C-A-P-62781_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-02-1993-Thorax02CAPRoutine-Adult-04146_2.000000-chestliver-wo--5.0--B40f-06996_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_09-25-2003-CT-RENAL-Mass-21003_5.000000-Nephrographic--5.0--B31f-39456_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_09-19-1991-CT-ABDOMEN-WITH-AND-WI-12069_3.000000-04140_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_04-20-1997-Abdomen020APRoutineAbdomenPelvis-Adult-51162_4.000000-Biopsy-Abd--2.4--B30s-58797_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5588_09-21-1992-Abdomen01APRoutine-Adult-73165_3.000000-AP-W--5.0--B40f-44979_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5159_12-05-2003-CT-RENAL-MASS-WPelvis-71904_4.000000-Arterial--Coronal-Thin-Mip-07325_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_08-12-1995-Thorax040CAPRoutine-Adult-93399_4.000000-MIP-axial-62276_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5588_09-21-1992-Abdomen01APRoutine-Adult-73165_2.000000-KIDS-WO--5.0--B40f-10284_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4148_04-02-2003-CT-RENAL-Mass-68197_3.000000-arterial--5.0--B30s-92347_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_08-11-2004-CT-RENAL-Mass-44170_4.000000-Arterial--Coronal-Thin-Mip-89044_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_04-02-1995-Abdomen020APRoutine-Adult-68078_4.000000-ap-with--5.0--B40s-02132_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_09-23-1994-Thorax040CAPRoutine-Adult-20757_4.000000-Chest-MIP-axial-58684_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_7.000000-coronal-APRoutine--3.0--B40f-06552_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_04-13-1992-Thorax02CAPRoutine-Adult-75223_5.000000-CAP-Routine--2.0--B70f-71060_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4620_03-02-2004-CT-ABDOMEN-PELVIS-WO-CONT-60511_2.000000-AbdPelvis--5.0--B31f-11796_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_06-30-1991-Abdomen1-AP-ROUTINE-51457_2.000000-AP-Routine--5.0--B40f-27592_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5588_09-21-1992-Abdomen01APRoutine-Adult-73165_4.000000-3-MIN-DELAY--5.0--B40f-43245_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_01-16-1993-Abdomen020APRoutine-Adult-99778_4.000000-AP-wcont--3.0--SPO--cor-85249_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_05-02-1992-Abdomen01APRoutine-Adult-37752_2.000000-AP-Routine--5.0--B40f-89868_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_08-30-1991-Abdomen01AbdomenRoutine-Adult-36686_3.000000-Abd---5.0--B40f-17014_.nii.gz
JSON-file already exists. Skip file: TCGA-CJ-4642_01-06-1995-CAP-02936_2.000000-PRE-06709_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_9.000000-3-min-delay--5.0--B40f-18150_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5588_08-03-1991-CT-ABDOMEN--PELVIS-42602_2.000000-AXIAL-81631_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5549_03-17-2004-CT-RENAL-Mass-08744_5.000000-Nephrographic--5.0--B31f-56219_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5553_12-14-2004-CT-ABDOMEN-PELVIS-W-CONT-98595_3.000000-AbdPel--5.0--B31f-06781_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5590_09-15-1991-Thorax02CAPRoutine-Adult-75442_3.000000-kid-delay--5.0--B40f-40384_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_02-28-1994-Abdomen020APRoutine-Adult-83665_4.000000-AP-Routine--5.0--B40f-23417_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5707_10-30-1986-CT-ABDOMEN-WITH-CONTRA-51524_2.000000-C-58316_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5164_10-23-2003-CT-RENAL-Mass-38123_4.000000-ArterialPhase--5.0--B20f--cor-75844_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4335_10-01-1986-CT-CHABPEL-KIDNEY-PROTOCOL-64314_103.000000-19062_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5163_02-15-2005-CT-ABDOMEN-PELVIS-WO-CONT-57147_2.000000-AbdPelvis--5.0--B31f-14969_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_05-30-1998-Thorax040CAPRoutine-Adult-75456_4.000000-MIP-Chest-23009_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_12-21-1992-Thorax02CAPRoutine-Adult-35967_3.000000-Kidney-Delays---5.0--B40f-84640_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_05-03-1996-Abdomen020AbdomenRoutine-Adult-79450_2.000000-Kids-WO--5.0--B40f-37462_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54F_01-12-2006-CT-ABDOMEN-PELVIS-W-CONT-2-30707_2.000000-AbdPel--5.0--B31f-58305_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4821_10-28-1984-ABDOMEN-WWO-67734_2.000000-26930_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_10-11-1999-CT-Urogram-w-3D-64223_8.000000-Combined-Abd--3.0--B40f-88234_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5707_11-02-1986-CT-ABD-WOW--PELVIS-W-39221_2.000000-AXIAL-SCANS-34611_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_04-16-1986-ABDPEL-86938_2.000000-68370_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5550_03-10-2004-CT-RENAL-Mass-76610_5.000000-VenousPhase--5.0--B30f-32779_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5581_07-08-1994-Thorax040CAPRoutine-Adult-42293_4.000000-Chest-MIP-axial-27354_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5712_02-28-1987-A-P-94357_2.000000-62655_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_11-09-1995-Thorax040CAPRoutine-Adult-46851_4.000000-Axial-MIPS-54154_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-4845_06-04-1986-CT-CHEST-W--ABDOMEN-W-91168_3.000000-12863_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5110_06-29-1988-CT--ABDOMEN-W-CONTRAST-29337_2.000000-27230_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5585_02-23-1997-Thorax040CAPRoutine-Adult-01591_2.000000-Ch--abd-w--5.0--B40f-60735_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5588_11-30-1991-CT-ABDOMEN--PELVIS-25295_3.000000-AXIAL-85742_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_03-08-1996-CT-ABDOMEN--PELVIS-58090_2.000000-AP-Routine-85078_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4349_04-29-1990-CT-ABPEL-KIDNEY-PROTOCOL-96920_269.000000-COR-26295_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-5165_09-25-2003-CT-RENAL-Mass-21003_2.000000-Unenhanced--5.0--B31f-82314_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_06-05-1998-Abdomen037KidneyBiphase-Adult-87366_3.000000-90-Sec--5.0--B40f-52277_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-A54E_02-02-2005-CT-RENAL-Mass-86462_3.000000-Arterial-Phase--5.0--B30f-31739_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5589_02-24-1995-Abdomen020APRoutine-Adult-00352_8.000000-3-min-delay--5.0--B40f-18304_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6093_10-11-1991-Vascular06CTAAbdPost-stent-Adult-88752_602.000000-MIP-92049_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-5587_04-20-1997-Abdomen020APRoutineAbdomenPelvis-Adult-51162_2.000000-AP-Routine--5.0--B40f-94102_.nii.gz
JSON-file already exists. Skip file: TCGA-B8-4148_04-02-2003-CT-RENAL-Mass-68197_603.000000-cor-thin-mip-ss-19517_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6096_04-25-1997-Abdomen020AbdomenRoutine-Adult-75027_2.000000-AP-Routine--5.0--B20f-52705_.nii.gz
JSON-file already exists. Skip file: TCGA-BP-4351_07-11-1990-FORFILE-CT-CHABPEL---CD-48554_2.000000-ROUTINE-CHEST-59786_.nii.gz
JSON-file already exists. Skip file: TCGA-B0-5085_06-18-1987-e1-AP-18854_2.000000-69293_.nii.gz
JSON-file already exists. Skip file: TCGA-CW-6088_01-16-1993-Abdomen020APRoutine-Adult-99778_2.000000-AP-wcont--5.0--B40f-49970_.nii.gz
Create body-part meta data file: TCGA-B0-5115_06-17-1992-AP-82538_350.000000-Reformatted-34987_.json
###Markdown
4. AnalyzeLet's analyze the json metadata files. In each json file, several tags are saved. 4.1 Slice ScoresWe can for example plot the slice scores. The slice scores increase monotonously with slice height. With the help of the `look-up table` the slice scores can be mapped to concrete anatomies. 4.2 Body Part Examined tagThe body part examined tag is based on the predicted slice scores. For each volume, one tag is calculated. If the slope of the slice scores seem unrealistic, the tag is equal to NONE.
###Code
json_filepaths = [os.path.join(output_path, f) for f in os.listdir(output_path) if f.endswith(".json")]
nifti_filepaths = [os.path.join(input_path, f.replace(".json", ".nii.gz")) for f in os.listdir(output_path)]
x = load_json(json_filepaths[0])
x.keys()
plot_scores_interactive(json_filepaths, nifti_filepaths)
dftags = get_updated_bodypartexamined_from_json_files(output_path)
plot_dicomexamined_distribution(dftags, fontsize=12)
###Output
_____no_output_____
###Markdown
Based on the Body Part Examined tag, we are able to filter all CT volumes, where the CHEST is present.
###Code
# Filter files which include the chest
dfchest = dftags[dftags.tag.isin(["CHEST", "CHEST-ABDOMEN-PELVIS", "CHEST-ABDOMEN"])]
dfchest.reset_index(drop=False, inplace=True)
dfchest = dfchest.rename({"index": "json"}, axis=1)
dfchest = dfchest.drop(["count"], axis=1)
print(f"Files which include the chest: {dfchest.shape[0]}")
dfchest.sample(5)
###Output
Files which include the chest: 29
###Markdown
5. Crop the lungsFor the remaining CT volumes, we can crop the lungs by finding the expected scores for the landmark `lung_start` and `lung_end`. With these score boundaries, the appropriate region can be cropped in the CT volumes based on the slice score curves.
###Code
x = load_json(json_filepaths[0])
lookuptable = pd.DataFrame(x["look-up table"]).T
start_score = x["look-up table"]["lung_start"]["mean"]
end_score = x["look-up table"]["lung_end"]["mean"]
lookuptable.sort_values(by="mean")[["mean"]]
plot_tailored_volumes_interactive(dfchest,
start_score,
end_score,
json_base_path=output_path,
nifti_base_path=input_path)
###Output
_____no_output_____ |
04_data_integration/3. data_profiling_notebooks_ipynb/02_fd_check/fd_discovery_wines2016.ipynb | ###Markdown
Name: Rachel Fanti Coelho Lima Date: 12/2021 Subject: LM-18 73005B - Data Profiling - AY 2021-22 Task: Implementation of FD.
###Code
%matplotlib inline
# pip install PyQt5==5.9.2
import time
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import seaborn
import sys
#pip install pandas_profiling --user
#pip install PyQt5==5.9.2
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
###Output
_____no_output_____
###Markdown
Input Data
###Code
'''
df = pd.DataFrame([ [ 'C', '3', 'X', 722, 112 ],
[ 'A', '1', 'X', 289, 553 ],
[ 'A', '1', 'Y', 189, 583 ],
[ 'B', '1', 'X', 289, 513 ],
[ 'C', '1', 'X', 289, 553 ] ], columns = [a for a in 'abcde'])
'''
df = pd.read_csv('./../00_data/wines2016.csv', sep = ",", index_col=False)
df.columns=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
#df.columns = [x for x in range (0, len(df.columns))]
plot_lattice = True
schema = set(a for a in df.columns)
df.head()
schema
###Output
_____no_output_____
###Markdown
Layout functions
###Code
def layout_lattice(basket):
from itertools import combinations
from scipy.special import binom
G = nx.DiGraph()
len_max, pos = binom(len(basket), len(basket)/2), {}
for k in range(1, len(basket)+1):
sorted_nodes_at_level = sort([set(e) for e in combinations(basket, k)])
G.add_nodes_from(sorted_nodes_at_level)
for p, n in enumerate(sorted_nodes_at_level):
pos[n] = (p + 0.5 * (len_max - len(sorted_nodes_at_level)), k-len(basket))
nx.draw_networkx_nodes(G, pos, nodelist=sorted_nodes_at_level, node_color='tab:grey')
return G, pos
def mark_as_visited_sup(E, F):
if not plot_lattice: return
nx.draw_networkx_nodes(viz[0], viz[1], nodelist=sort(E), node_color='tab:red')
nx.draw_networkx_nodes(viz[0], viz[1], nodelist=sort(F), node_color='tab:green')
nx.draw_networkx_labels(viz[0], viz[1], font_size=8, font_color="whitesmoke")
plt.show()
plt.pause(1)
def mark_as_visited_gen(f, s):
if not plot_lattice: return
edges = [(sort_single(f), sort_single(s))]
viz[0].add_edges_from(edges)
nx.draw_networkx_edges(viz[0], viz[1], edgelist=edges, arrows=False)
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def sort_single(s):
return ''.join(sorted(list(s)))
def sort(F):
return sorted(sort_single(s) for s in F)
# original to keep
def pli_single(a, D):
pli = {}
for row in range(D.shape[0]):
key = D.at[row, a]
if key in pli:
pli[key].add(row)
else:
pli[key] = { row }
return list(v for v in pli.values() if len(v) > 1)
# original to keep
def pli_intersect(p1, p2):
pli, prob = {}, {}
for c, s in enumerate(p1):
for row in s:
prob[row] = c
for c, s in enumerate(p2):
for row in s:
if row in prob:
key = (c, prob[row])
if key in pli:
pli[key].add(row)
else:
pli[key] = { row }
return list(v for v in pli.values() if len(v) > 1)
def intersection_sets (set_1, set_2):
'''Finding the common chars for both strings
'''
result = set()
for e in set_1:
if e in set_2:
result.add(e)
return result
#rachel
def set_intersect(X, C):
'''
e.g.
Input:
X = {'b', 'a'}
C = {'d': {'a', 'b', 'c', 'd', 'e'}, 'e': {'a', 'b', 'c', 'd', 'e'}, 'b': {'a', 'b', 'c'}, 'c': {'a', 'b', 'c', 'd', 'e'}, 'a': {'a', 'c', 'e'}}
set_intersect(X, C)
Output:
{'ab': {'a', c'}}
'''
intersection = {}
X = sort_single(X)
for key, value in C.items():
if all (k in X for k in key):
if X in intersection:
intersection[X]= intersection_sets (intersection[X], value)
else:
intersection[X] = value
return intersection
#rachel
def get_cardinality (list_of_sets):
'''calculates the cardinality of a list of sets
e.g.
Input
list_of_sets = [{0, 4}, {1, 2}]
get_cardinality (list_of_sets)
Output
4
'''
count = 0
for s in list_of_sets: #for each set in list of sets
for e in s: # for each element in set
count += 1
return count
#rachel
def is_valid(X, A, pli):
''' X\A->A - Check if is a funtional dependency
Use key-error and pli to infer FDs (key error can be used with pli and stripped-pli):
X->y
pli(X)=pli(XY)
cr(X)=Cr(XY)
Therefore:
X\A->A
Cr(X\A) = Cr(X)
e.g.
Input:
X = {'b', 'd'}
A = 'b'
pli = {'d': [{1, 3, 4}], 'e': [{1, 4}], 'b': [{1, 2, 3, 4}], 'c': [{0, 1, 3, 4}], 'a': [{0, 4}, {1, 2}], 'ab': [{1, 2}], 'ac': [{0, 4}], 'ad': [], 'ae': [], 'bc': [{1, 3, 4}], 'bd': [{1, 3, 4}], 'be': [{1, 4}], 'cd': [{1, 3, 4}], 'ce': [{1, 4}], 'de': [{1, 4}]}
is_valid(X, A, pli)
Output:
True
'''
pli_X_wth_A = pli[sort_single(X).replace(A, '')]
key_error_X_wth_A = get_cardinality(pli_X_wth_A) - len(pli_X_wth_A)
pli_X = pli[sort_single(X)]
key_error_X = get_cardinality(pli_X) - len(pli_X)
if key_error_X_wth_A == key_error_X:
return True
else:
return False
#rachel
def is_key(X, pli):
''' Check if X is a (super)key
verify if key_error_X = 0
'''
pli_X = pli[sort_single(X)]
key_error_X = get_cardinality(pli_X) - len(pli_X)
if key_error_X == 0:
return True
else:
return False
###Output
_____no_output_____
###Markdown
Funtional dependency discovery algorithm
###Code
#rachel
def apriori_fd_discovery(schema, table):
'''
Ej: candidates LHSs [of size j?]
Fj: validated LHSs [of size j?]
Cj: candidates RHSs for LHSs [of size j?]
Format:
E: [{'c', 'b', 'a'}, {'d', 'c', 'b'}, {'e', 'c', 'b'}, {'d', 'b', 'e'}, {'d', 'c', 'e'}]
pli: {'ad': [], 'ae': [], 'abc': [], 'bcd': [{1, 3, 4}], 'bce': [{1, 4}], 'bde': [{1, 4}], 'cde': [{1, 4}]}
C = {'d': {'a', 'b', 'c', 'd', 'e'}, 'e': {'a', 'b', 'c', 'd', 'e'}, 'b': {'a', 'b', 'c', 'd', 'e'}, 'c': {'a', 'b', 'c', 'd', 'e'}, 'a': {'a', 'b', 'c', 'd', 'e'}}
'''
k=1
U = []
E, pli = [set(a) for a in schema], {a: pli_single(a, table) for a in schema} # single attribute sets and PLIs
C = {sort_single(a): schema for a in schema} # canidates sets for single attributes
F, U = prune(E, pli, C) # determine the non-unique ones
# print('\n','E:', E, '\n', 'pli:', pli, '\n', 'C:', C, '\n', 'F:', F)
while len(F)!=0: # while there are non-dependent LHSs
E, pli_next = candidates(F, pli) # create candidate LHSs with one attribute more and PLIs
pli.update(pli_next)
C, U_next_1 = dependencies (E, pli, C) # create candidate RHSs for new candidate LHSs
# print('\n','E:', E, '\n', 'pli:', pli, '\n', 'C:', C)
F, U_next_2 = prune(E, pli, C) # and determine the non-dependent LHSs
# increment again the LHSs size
U = U + U_next_1 + U_next_2
U = sorted(U, key = lambda i: (len(i[0]), i[0]))
# print('\n', 'U:', U)
# print('\n', 'F:', F)
print('--> done with level', k, ' - FDs: ', end ="")
print(['%s -> %s' % (sort_single(set(u[0])),u[1]) for u in U if len(u[0]) == k]),
k=k+1
return U # return the functional dependencies
# original to keep
def candidates(F, pli):
'''
Format:
E: [{'b', 'a'}, {'c', 'a'}, {'d', 'a'}, {'e', 'a'}, {'c', 'b'}]
pli: {'c': [{0, 1, 3, 4}], 'e': [{1, 4}], 'b': [{1, 2, 3, 4}], 'd': [{1, 3, 4}], 'ab': [{1, 2}], 'ac': [{0, 4}], 'ad': [], 'ae': [], 'bc': [{1, 3, 4}]}
'''
pli_next = pli.copy()
Fs, E_next = sort(F), []
for i1 in range(len(Fs)):
for i2 in range(i1+1, len(Fs)):
if Fs[i1][:-1] == Fs[i2][:-1] and Fs[i1][-1] < Fs[i2][-1]:
f = set(fs for fs in Fs[i1]).union({Fs[i2][-1]})
superset = len(f)
for i in f:
if f.difference({i}) in F:
superset -= 1
if not superset:
E_next.append(f)
pli_next[sort_single(f)] = pli_intersect(pli_next[sort_single(Fs[i1])],
pli_next[sort_single(Fs[i2])])
mark_as_visited_gen(f, set(s for s in Fs[i1]))
del pli[sort_single(Fs[i1])]
return E_next, pli_next
#rachel
def dependencies(E, pli, C):
'''
INPUTs:
E = [{'b', 'a'}, {'c', 'a'}, {'d', 'a'}, {'e', 'a'}, {'c', 'b'}, {'d', 'b'}, {'e', 'b'}, {'d', 'c'}, {'c', 'e'}, {'d', 'e'}]
pli = {'d': [{1, 3, 4}], 'e': [{1, 4}], 'b': [{1, 2, 3, 4}], 'c': [{0, 1, 3, 4}], 'a': [{0, 4}, {1, 2}], 'ab': [{1, 2}], 'ac': [{0, 4}], 'ad': [], 'ae': [], 'bc': [{1, 3, 4}], 'bd': [{1, 3, 4}], 'be': [{1, 4}], 'cd': [{1, 3, 4}], 'ce': [{1, 4}], 'de': [{1, 4}]}
C = {'d': {'a', 'b', 'c', 'd', 'e'}, 'e': {'a', 'b', 'c', 'd', 'e'}, 'b': {'a', 'b', 'c', 'd', 'e'}, 'c': {'a', 'b', 'c', 'd', 'e'}, 'a': {'a', 'b', 'c', 'd', 'e'}}
dependencies(E, pli, C)
E.g.
LHS = left hand size = X = {'b', 'c', 'e'}
RHS = right hand size = X-A = {'c', 'e'}
A = {'b'}
'''
Cd = {} # initialize the set of rhs candidates
U = []
for X in E: # traverse the lhs candidates
rhs = set_intersect(X, C) # Add rhs candidates from previous level
Cd.update(rhs)
for X in E: # traverse the lhs candidates (change for x, pli!!)
for A in sort_single(X):
if A in Cd[sort_single(X)]: # traverse the rhs candidates
if is_valid (X, A, pli): # check if valid using key error and pli
# print ('***FD***', X,'/',A, '-->', A) # then is a minimal funtional dependency [C1]
U_A = [x for x in X if x != A] # U-A
U.append((sort(U_A), A))
Cd[sort_single(X)] = Cd[sort_single(X)]-{A} # remove from RHS candidates [C1]
# print ('C1 - C[', X, ']:', Cd[sort_single(X)])
for B in (schema-X): # traverse the candidates
Cd[sort_single(X)] = Cd[sort_single(X)]-{B} # remove from RHS candidates [C2]
# print ('C2 - C[', X, ']:', Cd[sort_single(X)])
return Cd, U #return the pruned RHS candidates sets and the funtional dependencies
#rachel
def prune(E, pli, C):
'''
Formats:
E = [{'b', 'a'}, {'c', 'a'}, {'d', 'a'}, {'e', 'a'}, {'c', 'b'}, {'d', 'b'}, {'e', 'b'}, {'d', 'c'}, {'c', 'e'}, {'d', 'e'}]
pli = {'a': [{0, 4}, {1, 2}], 'c': [{0, 1, 3, 4}], 'd': [{1, 3, 4}], 'b': [{1, 2, 3, 4}], 'e': [{1, 4}], 'ad': [], 'ae': [], 'ab': [{1, 2}], 'ac': [{0, 4}], 'bc': [{1, 3, 4}], 'bd': [{1, 3, 4}], 'be': [{1, 4}], 'cd': [{1, 3, 4}], 'ce': [{1, 4}], 'de': [{1, 4}], 'abc': [], 'bcd': [{1, 3, 4}], 'bce': [{1, 4}], 'bde': [{1, 4}], 'cde': [{1, 4}]}
C = {'ab': {'a', 'b', 'c', 'd', 'e'}, 'ac': {'a', 'b', 'c', 'd', 'e'}, 'ad': {'a', 'b', 'c', 'd', 'e'}, 'ae': {'a', 'b', 'c', 'd', 'e'}, 'bc': '{'a', 'b', 'c', 'd', 'e'}, 'bd': {'d'}, 'be': {'e'}, 'cd': {'d'}, 'ce': {'e'}, 'de': {'e'}}
X = {'d', 'a'}
U = []
'''
U=[]
F = E.copy() # initialize the set of LHSs candidates
for X in E: # traverse the candidates
if len(C[sort_single(X)])==0: # if a LHS there is no RHS candidate
F.remove(X) # remove the LHS form the prefix tree
if is_key(X, pli): # check if is a (super)key using key-error and pli
print (sort_single(X) ,'is key')
for A in ([x for x in C[sort_single(X)] if x not in sort_single(X)]): # traverse the RHSs candidates of X [and iterate in each candidate C[X] - X
keys_XA_B = [sort_single(X.union(A)).replace(B,"") for B in sort_single(X)] #C[X+A-B] if RHS in all C of augmented LHS subsets [C3]
# print ('keys_XA_B', keys_XA_B)
if all (key in C for key in keys_XA_B): # [Check if exist all augmented LHS subsets]
# print ('all keys present')
if all(A in C[k] for k in keys_XA_B): #if RHS in all C of augmented LHS subsets [C3]
# print ('***FD***', sort_single(X), "-->", A, 'Minimal') # then it is a minimal FD
U.append((sort(list(X)), A))
F.remove(X) # remove the LHS from the prefix tree (everything after this will not be minimal) [key-pruning]
mark_as_visited_sup(E, F)
return F, U # return the pruned set of candidates and the funtional dependencies
###Output
_____no_output_____
###Markdown
Run
###Code
if plot_lattice:
viz = layout_lattice(schema)
plt.ion()
# run it
print(df.head())
print('')
print('Funtional dependency discovery on', sort_single(schema))
'''
profile = ProfileReport(table, title="dataset", explorative=True)
profile.to_file("./report.html")
'''
fd_start = time.time()
fd = apriori_fd_discovery(schema, df)
# print it
print('\n\ndiscovered', len(fd), 'minimal FDs in', time.time() - fd_start, ' seconds')
print('\n***All FDs:')
print(['%s -> %s' % (sort_single(set(u[0])),u[1]) for u in fd]),
if plot_lattice:
plt.ioff()
plt.show()
###Output
a b c d e f g h i \
0 NaN NaN NaN NaN Torre Gaia NaN NaN NaN NaN
1 NaN NaN NaN NaN Terre Stregate NaN NaN NaN NaN
2 NaN NaN NaN NaN Cantine Terranera NaN NaN NaN NaN
3 NaN NaN NaN NaN Santimartini NaN NaN NaN NaN
4 NaN NaN NaN NaN Donnachiara NaN NaN NaN NaN
j k l \
0 Via Boscocupo 11 Dugenta Benevento
1 C.da Santa Lucia SS87 Guardia Sanframondi Benevento
2 Via Sandro Pertini Grottolella Avellino
3 via Bebiana, 107/A Solopaca Benevento
4 Via Stazione Montefalcione Avellino
m n \
0 41.122790, 14.462032 http://www.tenutatorregaia.it/code/home.php
1 41.249499, 14.572098 http://www.terrestregate.it/it/
2 40.967256, 14.790662 http://www.cantineterranera.it/terranera/index...
3 41.194201, 14.528736 http://www.santimartini.it/index.php?lang=it
4 40.955245, 14.878085 http://www.donnachiara.com/
o p
0 0824 906054 NaN
1 0824 817857 NaN
2 0825 671455 NaN
3 0824 971254 NaN
4 0825 977135 NaN
Funtional dependency discovery on abcdefghijklmnop
|
Image_to_Sketch.ipynb | ###Markdown
**LetsGrowMore(VIP) January 2022**TASK-I Beginner LevelImage manipulation using OpenCV ,Converting a Image into Sketch
###Code
import cv2
import matplotlib.pyplot as plt
from google.colab import files
uploaded=files.upload()
image = cv2.imread("Dog.jpeg")
image
plt.imshow(image[:,:,::-1])
plt.axis(False)
plt.show()
plt.imshow(image)
plt.axis(False)
plt.show()
RGB_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(RGB_image)
plt.axis(False)
plt.show()
grey_image=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(grey_image)
plt.axis(False)
plt.show()
invert_image=cv2.bitwise_not(grey_image)
plt.imshow(invert_image)
plt.axis(False)
plt.show()
blur_image=cv2.GaussianBlur(invert_image, (111,111),0)
plt.imshow(blur_image)
plt.axis(False)
plt.show()
invblur_image=cv2.bitwise_not(blur_image)
plt.imshow(invblur_image)
plt.axis(False)
plt.show()
sketch_image=cv2.divide(grey_image,invblur_image, scale=256.0)
plt.imshow(sketch_image)
plt.axis(False)
plt.show()
cv2.imwrite('Dog.jpeg', sketch_image)
rgb_sketch=cv2.cvtColor(sketch_image, cv2.COLOR_BGR2RGB)
plt.imshow(rgb_sketch)
plt.axis('off')
plt.show()
plt.figure(figsize=(14,8))
plt.subplot(1,2,1)
plt.title('Original image', size=18)
plt.imshow(RGB_image)
plt.axis('off')
plt.subplot(1,2,2)
plt.title('Sketch', size=18)
rgb_sketch=cv2.cvtColor(sketch_image, cv2.COLOR_BGR2RGB)
plt.imshow(rgb_sketch)
plt.axis('off')
plt.show()
###Output
_____no_output_____ |
11 Naive Bayes/homework/Gruen_Gianna_11_1.ipynb | ###Markdown
2. Use the LabelEncoder to convert the group names to numeric labels
###Code
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(newsgroups_df['group'])
le.transform(newsgroups_df['group'])
newsgroups_df['newsgroups_label'] = le.transform(newsgroups_df['group'])
newsgroups_df.head(3)
newsgroups_df.tail(3)
###Output
_____no_output_____
###Markdown
3. Pick out 10 words or phrases to use as manually created features. Doing an 80/20 train/test split, how well does a Naive Bayes classifier do? atheismgraphicsmotorcyclesbaseballhockeycryptmedspacegunsmideast
###Code
newsgroups_df['has_atheism'] = newsgroups_df['content'].str.contains('atheism')
newsgroups_df['has_graphics'] = newsgroups_df['content'].str.contains('graphics')
newsgroups_df['has_motorcycles'] = newsgroups_df['content'].str.contains('motocycles')
newsgroups_df['has_baseball'] = newsgroups_df['content'].str.contains('baseball')
newsgroups_df['has_hockey'] = newsgroups_df['content'].str.contains('hockey')
newsgroups_df['has_crypt'] = newsgroups_df['content'].str.contains('crypt')
newsgroups_df['has_med'] = newsgroups_df['content'].str.contains('med')
newsgroups_df['has_space'] = newsgroups_df['content'].str.contains('space')
newsgroups_df['has_guns'] = newsgroups_df['content'].str.contains('guns')
newsgroups_df['has_mideast'] = newsgroups_df['content'].str.contains('mideast')
newsgroups_df.columns
from sklearn.cross_validation import train_test_split
nbx_train, nbx_test, nby_train, nby_test = train_test_split(
newsgroups_df[['has_atheism', 'has_graphics', 'has_motorcycles', 'has_baseball',
'has_hockey', 'has_crypt', 'has_med', 'has_space', 'has_guns',
'has_mideast']], # the first is our FEATURES
newsgroups_df['newsgroups_label'], # the second parameter is the LABEL (0-16, southern us, brazilian, anything really)
test_size=0.2) # 80% training, 20% testing
from sklearn import naive_bayes
clf = naive_bayes.BernoulliNB()
clf.fit(nbx_train, nby_train)
clf.score(nbx_train, nby_train)
clf.score(nbx_test, nby_test)
###Output
_____no_output_____
###Markdown
4. Use a CountVectorizer to automatically create your list of features. Doing an 80/20 train/test split, how well can a Naive Bayes classifier do?
###Code
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1000)
#kernel dies without max features
vectorizer.fit(newsgroups_df['content'])
all_word_features = vectorizer.transform(newsgroups_df['content'])
all_word_features
x_train, x_test, y_train, y_test = train_test_split(
all_word_features,
newsgroups_df['newsgroups_label'],
test_size=0.2)
clf = naive_bayes.BernoulliNB()
clf.fit(x_train, y_train)
clf.score(x_train, y_train)
clf.score(x_test, y_test)
clf.predict(x_test)
# So Naive Bayes did better with the manually selected features.
###Output
_____no_output_____
###Markdown
5. PUSH THAT SCORE UP! You can adjust ngrams, max_features and any other options of the vectorizer, or try a decision tree or any other type of classifier. bc I couldn't run the Count Vectorizer without max_features and ngrams don't make sense here, bc it is more about single words(?), I would like to try Random Forests
###Code
from sklearn.ensemble import RandomForestClassifier
tree_clf = RandomForestClassifier()
tree_clf.fit(x_train, y_train)
print("Training score:", tree_clf.score(x_train, y_train))
print("Testing score:", tree_clf.score(x_test, y_test))
#Same results for Naive Bayes and Random forest?
###Output
_____no_output_____
###Markdown
6. Write 15 sentences that, when run against the predictor, are put in 15 separate newsgroups (list the names of the newsgroups).
###Code
sentences = [
"I believe atheism is also a religion",
"I love graphics",
"We rode a motorcycle on the weekend",
"I never played or watched baseball",
"Who likes hockey, really?!"
"You would want to encrypt your emails",
"I really love the pharma industry for all the medicine provided",
"How does medical care in space work out?",
"Do you have a collection of guns?",
"What's your favourite country in the mideast?",
"The US should pass a anti-gun law to control weapon sales",
"You should teach how to crypt in schools",
"Have you ever fired guns?",
"These are really cool graphics. How did you make them?",
"Mideast politics are really complicated."
]
sentences_words_features = vectorizer.transform(sentences)
sentences_words_features
clf = naive_bayes.BernoulliNB()
clf.fit(vectorizer.transform(newsgroups_df['content']), newsgroups_df['newsgroups_label'])
predictions = clf.predict(sentences_words_features)
predictions
le.inverse_transform(predictions)
# This is predicting badly, but it is also with the training data, so it is consistent in a way.
# I'd assume that this is a case for improving training data, to aslo improve the resule
# (ie go for word combinations instead of single words.)
###Output
_____no_output_____ |
Homework/my_solutions/HW3/hw3.ipynb | ###Markdown
Problem 1 In order to get a start on the problem, I wanted to simply plot out some terms of the sequence to see what was going on
###Code
def sequence(func, x0, num_terms, p):
seq = [x0]
for i in range(num_terms):
if (seq[-1] == p):
return np.array(seq)
seq.append(func(seq[-1]))
return np.array(seq)
###Output
_____no_output_____
###Markdown
Part A
###Code
func = lambda x : -16 + 6*x + (12/x)
p = 2
plt.plot(p - sequence(func, 1, 20, p))
plt.plot(p - sequence(func, 1.5, 20, p))
plt.plot(p - sequence(func, 1.6, 5, p))
plt.xlabel('n')
plt.ylabel('xn')
plt.show()
###Output
_____no_output_____
###Markdown
Part B
###Code
func = lambda x : (2/3)*x + (1/x**2)
p = 3**(1/3)
plt.plot(p - sequence(func, 1, 20, p))
plt.plot(p - sequence(func, 1.5, 20, p))
plt.plot(p - sequence(func, 2, 5, p))
plt.xlabel('n')
plt.ylabel('xn')
plt.show()
###Output
_____no_output_____
###Markdown
Part C
###Code
func = lambda x : 12/(1+x)
p = 3
plt.plot(p - sequence(func, 1, 20, p))
plt.plot(p - sequence(func, 1.5, 20, p))
plt.plot(p - sequence(func, 2, 20, p))
plt.plot(p - sequence(func, 4, 20, p))
plt.xlabel('n')
plt.ylabel('xn')
plt.show()
###Output
_____no_output_____
###Markdown
Problem 2
###Code
def newtons_method(func, func_prime, x0, max_iter=100, tol=1e-8):
history_x = []
history_fx = []
x = x0
fx = func(x)
history_x.append(x)
history_fx.append(fx)
for i in range(max_iter):
fx_prime = func_prime(x)
if (fx_prime == 0):
return x, history_x, history_fx
x -= (fx / fx_prime)
fx = func(x)
history_x.append(x)
history_fx.append(fx)
if np.abs(fx) <= tol or np.isinf(fx):
return x, history_x, history_fx
return x, history_x, history_fx
###Output
_____no_output_____
###Markdown
Part A
###Code
function = lambda x : (x - 3) * (x - 2)
function_prime = lambda x : 2*x - 5
xs = np.linspace(1, 4, 100)
plt.plot(xs, function_prime(xs))
plt.plot(xs[:-1], np.diff(function(xs)) / (xs[1] - xs[0]))
plt.xlabel('x')
plt.ylabel('y')
plt.title('f(x) = (x-3) * (x-2)')
plt.legend(['Derivative', 'np.diff'])
plt.show()
###Output
_____no_output_____
###Markdown
The two derivative methods seem close enough Part B
###Code
function = lambda x : (x - 3) * (x - 2)
function_prime = lambda x : 2*x - 5
root, x_his, fx_his = newtons_method(function, function_prime, 10)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - 3))
plt.xlabel('n')
plt.ylabel('|x_n - 3|')
plt.title('Newtons method for f(x) = (x-3) * (x-2)')
plt.legend(['Error'])
plt.show()
###Output
_____no_output_____
###Markdown
The error does decay to 0, and it does decay at the rate I would think (quadrtic convergence). This is because the root is simple, so Newton's Method guarantees at least quadtratic convergence. Part C
###Code
function = lambda x : (x-3)**2
function_prime = lambda x : 2*(x-3)
root, x_his, fx_his = newtons_method(function, function_prime, 10)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - 3))
plt.xlabel('n')
plt.ylabel('|x_n - 3|')
plt.title('Newtons method for f(x) = (x-3)^2')
plt.legend(['Error'])
plt.show()
###Output
_____no_output_____
###Markdown
This error decays slightly more slowly than before, which is what I would expect, since the derivative has a smaller magnitude for x > 3, so each step is getting closer to the root, but at a slower rate than before. The convergence is still quadtratic, since it is a well-defined Newton's method. In adition, the root is not simple, so the convergance is slower. Part D
###Code
# (i) 3*(x-3)
function_prime = lambda x : 3*(x-3)
root, x_his, fx_his = newtons_method(function, function_prime, 10)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - 3))
# (ii) 2*(x-3.1)
function_prime = lambda x : 2*(x-3.1)
root, x_his, fx_his = newtons_method(function, function_prime, 10)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - 3))
plt.xlabel('n')
plt.ylabel('|x_n - 3|')
plt.title('Newtons method for f(x) = (x-3)^2')
plt.legend(['Error for f\' = 3*(x-3)', 'Error for f\' = 2*(x-3.1)'])
plt.show()
###Output
_____no_output_____
###Markdown
The error for both incorrect derivatives eventually converge, but it is interesting to note that the error for 3\*(x-3) smoothly converges, while the 2\*(x-3.1) case has a slight hitch, i.e. it breifly increases after about 5 iterations. Part E
###Code
function = lambda x : (x-3)**2 + 1
function_prime = lambda x : 2*(x-3)
root, x_his, fx_his = newtons_method(function, function_prime, 10)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - 3))
plt.xlabel('n')
plt.ylabel('|x_n - 3|')
plt.title('Newtons method for f(x) = (x-3)^2')
plt.legend(['Error'])
plt.show()
xs = np.arange(0, 10)
plt.plot(xs, function(xs))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('(x-3)^2 + 1')
plt.show()
###Output
_____no_output_____
###Markdown
By plotting the function, it is clear that Newton's method is getting to the closest value to a root (3), and then going up the other side, oscillating back and forth near the minimum. AS expected, there is no root, so the error never converges. Problem 3 Part A
###Code
function = lambda x : (x - (1/3))**5
function_prime = lambda x : 5*((x - (1/3))**4)
root, x_his, fx_his = newtons_method(function, function_prime, 0.4, tol=MACHINE_EPSILON)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - (1/3)))
plt.xlabel('n')
plt.ylabel('|x_n + 1/3|')
plt.title('Newtons method for f(x) = (x-1/3)^5')
plt.legend(['Error'])
plt.show()
###Output
_____no_output_____
###Markdown
The error decays slowly, as expected, since the root has a multiplicity of $5$ Part B By hand, it is clear that $\mu(x) = \frac{(x-1/3)^5}{5(x-1/3)^4}$ which simplifies to $\frac{1}{5}(x-\frac{1}{3})$. Thus $\mu'(x) = \frac{1}{5}$
###Code
mu = lambda x : (1/5)*(x-1/3)
mu_prime = lambda x : 1/5
root, x_his, fx_his = newtons_method(mu, mu_prime, 0.4, tol=MACHINE_EPSILON)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - (1/3)))
plt.xlabel('n')
plt.ylabel('|x_n - 1/3|')
plt.title('Newtons method for mu(x) = 1/5*(x-1/3)')
plt.legend(['Error'])
plt.show()
with np.printoptions(precision=20, suppress=True):
print(np.abs(x_his - (1/3))[:20])
print(root)
###Output
[0.06666666666666671 0. ]
0.3333333333333333
###Markdown
The error converges in one step! This is expected since modified Newton's is meant to speed up the convergence Part C
###Code
f_coeffs = np.poly(5*[1/3])
f_prime_coeffs = np.polyder(f_coeffs)
f_prime_prime_coeffs = np.polyder(f_prime_coeffs)
f = lambda x : np.polyval(f_coeffs, x)
f_prime = lambda x : np.polyval(f_prime_coeffs, x)
f_prime_prime = lambda x : np.polyval(f_prime_prime_coeffs, x)
mu = lambda x : f(x) / f_prime(x)
mu_prime = lambda x : (f_prime(x)**2 - f(x)*f_prime_prime(x)) / (f_prime(x))**2
root, x_his, fx_his = newtons_method(mu, mu_prime, 0.4, tol=MACHINE_EPSILON)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - (1/3)))
plt.xlabel('n')
plt.yscale('log')
plt.ylabel('|x_n - 1/3| (log)')
plt.title('Newtons method for mu(x) = f(x) / f\'(x) (numpy poly edition)')
plt.legend(['Error'])
plt.show()
with np.printoptions(precision=20, suppress=True):
print(np.abs(x_his - (1/3))[:20])
###Output
[0.06666666666666671 0.00000000000007799317 0.12152777777785578
0.00000000000006511458 0.1956521739131086 0.0000000000000039968
0.20833333333333734 0.00000000000000460743 0.7499999999999953
0.00000000000000160982 0.08333333333333492 0.00000000000027738922
0.0833333333336107 0.00000000000031574743 0.2083333333336491
0.00000000000000272005 0.10500000000000273 0.00000000000016892043
0.08522727272744163 0.00000000000036876058]
###Markdown
The function here never converges. It is probably because of subtractive cancellation, where the terms will never be quite 0, even though they should be. The error gets really close to $0$, and then bounces back up. Part D
###Code
mu = lambda x : (1/5)*(x-1/2)
mu_prime = lambda x : 1/5
root, x_his, fx_his = newtons_method(mu, mu_prime, 0.4, tol=MACHINE_EPSILON)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - (1/2)))
plt.xlabel('n')
plt.ylabel('|x_n - 1/2|')
plt.title('Newtons method for mu(x) = 1/5*(x-1/2)')
plt.legend(['Error'])
plt.show()
with np.printoptions(precision=20, suppress=True):
print(np.abs(x_his - (1/2))[:20])
f_coeffs = np.poly(5*[1/2])
f_prime_coeffs = np.polyder(f_coeffs)
f_prime_prime_coeffs = np.polyder(f_prime_coeffs)
f = lambda x : np.polyval(f_coeffs, x)
f_prime = lambda x : np.polyval(f_prime_coeffs, x)
f_prime_prime = lambda x : np.polyval(f_prime_prime_coeffs, x)
mu = lambda x : 0 if f_prime(x) == 0 else f(x) / f_prime(x) # protect against divide by zero
mu_prime = lambda x : (f_prime(x)**2 - f(x)*f_prime_prime(x)) / (f_prime(x))**2
root, x_his, fx_his = newtons_method(mu, mu_prime, 0.4, tol=MACHINE_EPSILON)
x_his = np.array(x_his)
plt.plot(np.arange(len(x_his)), np.abs(x_his - (1/2)))
plt.xlabel('n')
plt.ylabel('|x_n - 1/2|')
plt.title('Newtons method for mu(x) = f(x) / f\'(x) (numpy poly edition)')
plt.legend(['Error'])
plt.show()
with np.printoptions(precision=20, suppress=True):
print(np.abs(x_his - (1/2))[:20])
###Output
[0.09999999999999998 0.00000000000009431345]
###Markdown
Here, the error converges in both implementations. This is probably due to the fact that $\frac{1}{3}$ is irrational, whereas $\frac{1}{2}$ is not. Therefore, the precision requried is finite, as 0.5 can be represented exactly in a computer Problem 4
###Code
happiness = lambda dog : dog**2 - 0.5*(np.exp(dog) - 1)
xs = np.linspace(0, 3)
plt.plot(xs, happiness(xs))
plt.xlabel('# dogs')
plt.ylabel('Happiness')
plt.title('Trying to achieve happiness using man\'s best friend')
plt.show()
###Output
_____no_output_____
###Markdown
A little sad, but understandable, that between owning 0.001 and 0.5ish of a dog, happiness is actually negative! What does negative happiness even look like? Could 2020 have reached this level?? To maximize this function, we can simply find the root of the derivative. If only we had a function to do this... oh wait! Recalling all the way back to the last problem, we have Newton's Method! So we need to find the first derivative and the second derivative, which is easy enough to do by hand
###Code
happiness_prime = lambda dog : 2*dog - 0.5*np.exp(dog)
happiness_double_prime = lambda dog : 2 - 0.5*np.exp(dog)
root, x_his, _ = newtons_method(happiness_prime, happiness_double_prime, 1.5, tol=MACHINE_EPSILON)
with np.printoptions(precision=20, suppress=True):
print(np.array(x_his)[:20])
plt.plot(xs, happiness(xs))
plt.plot(root, happiness(root), 'o', markersize=15)
plt.xlabel('# dogs')
plt.ylabel('Happiness')
plt.title('The quest to get maximum utility out of dogs')
plt.legend(['Happiness', 'Maximum happiness'])
plt.show()
print("Time to go buy %s dogs!"%root)
###Output
Time to go buy 2.1532923641103494 dogs!
|
adhoc/predict_calculated_fields.ipynb | ###Markdown
Predict calculated fieldsIdentify the most important features in predicting key calculated features, using random forest variable importance. Setup Imports
###Code
import pandas as pd
import numpy as np
from sklearn import ensemble
from matplotlib import pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Columns
###Code
COLS = [
'DSI',
'E00200',
'E00300',
'E00400',
'E00600',
'E00650',
'E00700',
'E00800',
'E00900',
'E01100',
'E01200',
'E01400',
'E01500',
'E01700',
'E02000',
'E02100',
'E02300',
'E02400',
'E03150',
'E03210',
'E03220',
'E03230',
'E03240',
'E03270',
'E03290',
'E03300',
'E03400',
'E03500',
'E07240',
'E07260',
'E07300',
'E07400',
'E07600',
'E09700',
'E09800',
'E09900',
'E11200',
'E17500',
'E18400',
'E18500',
'E19200',
'E19800',
'E20100',
'E20400',
'E24515',
'E24518',
'E26270',
'E27200',
'E32800',
'E58990',
'E62900',
'E87521',
'E87530',
'EIC',
'F2441',
'F6251',
'FDED',
'MARS',
'MIDR',
'N24',
'P08000',
'P22250',
'P23250',
'S006',
'XTOT']
CALCULATED_COLS = [
'E00100',
'E04600',
'P04470',
'E04800',
'E62100',
'E05800',
'E08800',
'E59560',
'E26190'
]
AGG_RECIDS = [999996, 999997, 999998, 999999]
###Output
_____no_output_____
###Markdown
Load
###Code
puf = pd.read_csv('~/puf2011.csv', usecols=COLS + CALCULATED_COLS + ['RECID'])
puf = puf[~puf.RECID.isin(AGG_RECIDS)].drop('RECID', axis=1)
###Output
_____no_output_____
###Markdown
Predict
###Code
rf = ensemble.RandomForestRegressor(n_estimators=50, random_state=0)
importances_list = []
%%time
for col in CALCULATED_COLS:
print('Analyzing ' + col + '...')
rf.fit(puf[COLS], puf[col])
importance = pd.DataFrame({
'x': COLS,
'y': col,
'importance': rf.feature_importances_
})
importances_list.append(importance)
importances = pd.concat(importances_list)
importances.sort_values('importance', ascending=False).head(10)
importances['importance_rank'] = importances.groupby('y').importance.rank(ascending=False)
max_imp = importances.groupby('x').importance_rank.max()
mean_imp = importances.groupby('x').importance_rank.mean()
imp_pivot = importances.pivot_table(
index='x', columns='y', values='importance_rank').loc[
mean_imp.sort_values(ascending=True).index.values]
imp_pivot.head(10)
sns.heatmap(imp_pivot.head(5), annot=True)
plt.title('Feature importance rank of raw Xs predicting calculated Xs')
plt.show()
###Output
_____no_output_____
###Markdown
Again without `S006`Since we're not using the synthesized version anyway.
###Code
cols_no_s006 = set(COLS) - set(['S006'])
importances_list2 = []
%%time
for col in CALCULATED_COLS:
print('Analyzing ' + col + '...')
rf.fit(puf[COLS], puf[col])
importance = pd.DataFrame({
'x': COLS,
'y': col,
'importance': rf.feature_importances_
})
importances_list2.append(importance)
importances2 = pd.concat(importances_list2)
importances2.sort_values('importance', ascending=False).head(10)
importances2['importance_rank'] = importances2.groupby('y').importance.rank(ascending=False)
max_imp2 = importances2.groupby('x').importance_rank.max()
mean_imp2 = importances2.groupby('x').importance_rank.mean()
imp_pivot2 = importances2.pivot_table(
index='x', columns='y', values='importance_rank').loc[
mean_imp2.sort_values(ascending=True).index.values]
imp_pivot2.head(10)
sns.heatmap(imp_pivot2.head(5), annot=True)
plt.title('Feature importance rank of raw Xs predicting calculated Xs')
plt.show()
###Output
_____no_output_____ |
Data-Science-Virtual-Machine/Samples/Notebooks/SQLDW_Explorations.ipynb | ###Markdown
License Information
###Code
This sample Jupyter Notebook is shared by Microsoft under the MIT license. Please check the LICENSE.txt file in the directory where this Jupyter Notebook is stored for license information and additional details.
###Output
_____no_output_____
###Markdown
NYC Data wrangling using Jupyter Notebook and SQL Data Warehouse This notebook demonstrates data exploration and feature generation using Python and SQL queries for data stored in Azure SQL Data Warehouse. We start with reading a sample of the data into a Pandas data frame and visualizing and exploring the data. We show how to use Python to execute SQL queries against the data and manipulate data directly within the Azure SQL Data Warehouse.This IPNB is accompanying material to the data Azure Data Science in Action walkthrough document (https://azure.microsoft.com/en-us/documentation/articles/machine-learning-data-science-process-sql-walkthrough/) and uses a sample of the New York City Taxi dataset (http://www.andresmh.com/nyctaxitrips/). Read data in Pandas frame for visualizations We start with loading a sample of the data in a Pandas data frame and performing some explorations on the sample. We assume that the nyctaxi table have been created and loaded from the taxi dataset mentioned earlier. If you haven't done this already please refer to the "LoadDataintoDW" notebook. Import required packages in this experiment
###Code
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pyodbc
import os
import tables
import time
###Output
_____no_output_____
###Markdown
Set plot inline
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
Initialize Database Credentials
###Code
SERVER_NAME = 'ENTER SERVER_NAME '
DATABASE_NAME = 'ENTER DATABASE_NAME'
USERID = 'ENTER USERID'
PASSWORD = 'ENTER PASSWORD'
DB_DRIVER = 'SQL Server Native Client 11.0'
NYCTAXITABLE_NAME = "nyctaxi"
NYCTAXISCHEMA = "nyctaxinb"
###Output
_____no_output_____
###Markdown
Create Database Connection
###Code
driver = 'DRIVER={' + DB_DRIVER + '}'
server = 'SERVER=' + SERVER_NAME
database = 'DATABASE=' + DATABASE_NAME
uid = 'UID=' + USERID
pwd = 'PWD=' + PASSWORD
CONNECTION_STRING = ';'.join([driver,server,database,uid,pwd, 'Encrypt=yes;TrustServerCertificate=no'])
print CONNECTION_STRING
conn = pyodbc.connect(CONNECTION_STRING)
###Output
_____no_output_____
###Markdown
Report number of rows and columns in table
###Code
nrows = pd.read_sql('''SELECT SUM(rows) FROM sys.partitions WHERE object_id = OBJECT_ID('nyctaxinb.nyctaxi')''', conn)
print 'Total number of rows = %d' % nrows.iloc[0,0]
ncols = pd.read_sql('''SELECT count(*) FROM information_schema.columns WHERE table_name = ('nyctaxi') AND
table_schema = ('nyctaxinb')''', conn)
print 'Total number of columns = %d' % ncols.iloc[0,0]
###Output
Total number of rows = 170399
Total number of columns = 23
###Markdown
Read-in data from SQL Data Warehouse
###Code
t0 = time.time()
#load only a small percentage of the data for some quick visuals
df1 = pd.read_sql('''select top 10000 * from nyctaxinb.nyctaxi t ''', conn)
t1 = time.time()
print 'Time to read the sample table is %f seconds' % (t1-t0)
print 'Number of rows and columns retrieved = (%d, %d)' % (df1.shape[0], df1.shape[1])
###Output
Time to read the sample table is 1.036000 seconds
Number of rows and columns retrieved = (10000, 23)
###Markdown
Descriptive Statistics Now we can explore the sample data. We start with looking at descriptive statistics for trip distance:
###Code
df1['trip_distance'].describe()
###Output
_____no_output_____
###Markdown
Box Plot Next we look at the box plot for trip distance to visualize quantiles
###Code
df1.boxplot(column='trip_distance',return_type='dict')
###Output
_____no_output_____
###Markdown
Distribution Plot
###Code
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
df1['trip_distance'].plot(ax=ax1,kind='kde', style='b-')
df1['trip_distance'].hist(ax=ax2, bins=100, color='k')
###Output
_____no_output_____
###Markdown
Binning trip_distance
###Code
trip_dist_bins = [0, 1, 2, 4, 10, 1000]
df1['trip_distance']
trip_dist_bin_id = pd.cut(df1['trip_distance'], trip_dist_bins)
trip_dist_bin_id
###Output
_____no_output_____
###Markdown
Bar and Line Plots The distribution of the trip distance values after binning looks like the following:
###Code
pd.Series(trip_dist_bin_id).value_counts()
###Output
_____no_output_____
###Markdown
We can plot the above bin distribution in a bar or line plot as below
###Code
pd.Series(trip_dist_bin_id).value_counts().plot(kind='bar')
pd.Series(trip_dist_bin_id).value_counts().plot(kind='line')
###Output
_____no_output_____
###Markdown
We can also use bar plots for visualizing the sum of passengers for each vendor as follows
###Code
vendor_passenger_sum = df1.groupby('vendor_id').passenger_count.sum()
print vendor_passenger_sum
vendor_passenger_sum.plot(kind='bar')
###Output
_____no_output_____
###Markdown
Scatterplot We plot a scatter plot between trip_time_in_secs and trip_distance to see if there is any correlation between them.
###Code
plt.scatter(df1['trip_time_in_secs'], df1['trip_distance'])
###Output
_____no_output_____
###Markdown
To further drill down on the relationship we can plot distribution side by side with the scatter plot (while flipping independentand dependent variables) as follows
###Code
df1_2col = df1[['trip_time_in_secs','trip_distance']]
pd.scatter_matrix(df1_2col, diagonal='hist', color='b', alpha=0.7, hist_kwds={'bins':100})
###Output
_____no_output_____
###Markdown
Similarly we can check the relationship between rate_code and trip_distance using a scatter plot
###Code
plt.scatter(df1['passenger_count'], df1['trip_distance'])
###Output
_____no_output_____
###Markdown
Correlation Pandas 'corr' function can be used to compute the correlation between trip_time_in_secs and trip_distance as follows:
###Code
df1[['trip_time_in_secs', 'trip_distance']].corr()
###Output
_____no_output_____
###Markdown
Sub-Sampling the Data in SQL In this section we used a sampled table we pregenerated by joining Trip and Fare data and taking a sub-sample of the full dataset. The sample data table named '' has been created and the data is loaded when you run the PowerShell script. Report number of rows and columns in the sampled table
###Code
nrows = pd.read_sql('''SELECT SUM(rows) FROM sys.partitions WHERE object_id = OBJECT_ID('nyctaxinb.nyctaxi')''', conn)
print 'Number of rows in sample = %d' % nrows.iloc[0,0]
ncols = pd.read_sql('''SELECT count(*) FROM information_schema.columns WHERE table_name = ('nyctaxi') AND
table_schema = ('nyctaxinb')''', conn)
print 'Number of columns in sample = %d' % ncols.iloc[0,0]
###Output
Number of rows in sample = 170399
Number of columns in sample = 23
###Markdown
We show some examples of exploring data using SQL in the sections below. We also show some useful visualizatios that you can use below. Note that you can read the sub-sample data in the table above in Azure Machine Learning directly using the SQL code in the reader module. Exploration in SQL In this section, we would be doing some explorations using SQL on the 1% sample data (that we created above). Tipped/Not Tipped Distribution
###Code
query = '''
SELECT tipped, count(*) AS tip_freq
FROM nyctaxinb.nyctaxi
GROUP BY tipped
'''
pd.read_sql(query, conn)
###Output
_____no_output_____
###Markdown
Tip Class Distribution
###Code
query = '''
SELECT tip_class, count(*) AS tip_freq
FROM nyctaxinb.nyctaxi
GROUP BY tip_class
'''
tip_class_dist = pd.read_sql(query, conn)
tip_class_dist
###Output
_____no_output_____
###Markdown
Plot the tip distribution by class
###Code
tip_class_dist['tip_freq'].plot(kind='bar')
###Output
_____no_output_____
###Markdown
Daily distribution of trips
###Code
query = '''
SELECT CONVERT(date, dropoff_datetime) as date, count(*) as c
from nyctaxinb.nyctaxi
group by CONVERT(date, dropoff_datetime)
'''
pd.read_sql(query,conn)
###Output
_____no_output_____
###Markdown
Trip distribution per medallion
###Code
query = '''select medallion,count(*) as c from nyctaxinb.nyctaxi group by medallion'''
pd.read_sql(query,conn)
###Output
_____no_output_____
###Markdown
Trip distribution by medallion and hack license
###Code
query = '''select medallion, hack_license,count(*) from nyctaxinb.nyctaxi group by medallion, hack_license'''
pd.read_sql(query,conn)
###Output
_____no_output_____
###Markdown
Trip time distribution
###Code
query = '''select trip_time_in_secs, count(*) from nyctaxinb.nyctaxi group by trip_time_in_secs order by count(*) desc'''
pd.read_sql(query,conn)
###Output
_____no_output_____
###Markdown
Trip distance distribution
###Code
query = '''select floor(trip_distance/5)*5 as tripbin, count(*) from nyctaxinb.nyctaxi
group by floor(trip_distance/5)*5 order by count(*) desc'''
pd.read_sql(query,conn)
###Output
_____no_output_____
###Markdown
Payment type distribution
###Code
query = '''select payment_type,count(*) from nyctaxinb.nyctaxi group by payment_type'''
pd.read_sql(query,conn)
query = '''select TOP 10 * from nyctaxinb.nyctaxi'''
pd.read_sql(query,conn)
###Output
_____no_output_____ |
jupyter_notebook/humam_sr_ch2.ipynb | ###Markdown
Statistical Rethinking Second EditionIni adalah kesimpulan yang didapatkan dari buku *Statistical Rethinking* tulisan Professor Richard McElreath. Buku ini berisi tentang pendekatan statistik untuk penelitian dengan cara Bayesian. Dalam buku ini, dapat diketemukan beberapa contoh kasus dan penerapan-nya dalam kode R yang akan kami terjemahkan ke dalam python.
###Code
import numpy as np
import itertools
###Output
_____no_output_____
###Markdown
Chapter 2: Small Worlds and Large WorldsPada bab ini, penulis berargumen bahwa dalam setiap penelitian selalu ada dua buah dunia. Pertama adalah dunia nyata dimana kasus terjadi dan yang kedua adalah dunia tempat penelitian dijalankan. Beberapa penelitian dalam buku ini adalah membentuk model yang juga menjadi representasi dari *small word*. Dalam upaya mengambil beberapa bagian dari dunia nyata dan memodelkannya dalam ruang isolasi, dibutuhkan asumsi-asumsi yang berkaitan dengan upaya tersebut. Asumsi ini ada karena *small world* tidak mungkin mereplikasi apa yang sebenernya terjadi--entah karena memang tidak ada model yang cocok atau membutuhkan usaha komputasi yang besar--dan memudahkan perhitungan untuk beberapa kasus. **Soal 1** Ada empat buah kelereng bewarna biru dan putih dan sebuah kantung. Tidak diketahui berapa jumlah kelerang yang biru dan yang putih. Estimasi jumlah kelereng berwarna biru dan putih berdasarkan hasil yang ditarik (dan dikembalikan lagi) dari kantung.
###Code
collection = itertools.combinations_with_replacement("WB", 4)
for num, val in enumerate(collection):
print(num, val)
###Output
0 ('W', 'W', 'W', 'W')
1 ('W', 'W', 'W', 'B')
2 ('W', 'W', 'B', 'B')
3 ('W', 'B', 'B', 'B')
4 ('B', 'B', 'B', 'B')
###Markdown
Setidaknya ada 5 kemungkinan bagaimana komposisi kelereng dalam kantung. Dalam buku ini, penulis mengatakan ini adalah dugaan dari komposisi dalam kantung. Tugas kami adalah mencari mana yang paling masuk akal. Asumsilah bahwa penarikan kelereng menghasilkan **Biru**, **Putih**, dan **Biru**.Mari kami telaah satu dari lima dugaan yaitu nomor 1. Dalam dugaan nomor 1, ada 4 kemungkinan yang terjadi saat pengambilan kelereng. dimana ada 3 warna putih dan satu warna biru. Pada pengambilan kelereng yang kedua, masing-masing kemungkinan akan memiliki 4 kemungkinan juga sehingga total kemungkinan menjadi $4^2=16$. Pada pengambilan ketiga masing masing dari 16 kemungikinan ini akan memiliki 4 kemungkinan lagi sehingga total kemungkinan adalah $4^3=64$
###Code
collection1 = itertools.product('WWWB', repeat=1)
print("First draw")
for num, val in enumerate(collection1):
print(num, val)
collection2 = itertools.product('WWWB', repeat=2)
print("Second draw")
for num, val in enumerate(collection2):
print(num, val)
collection3 = itertools.product('WWWB', repeat=3)
print("Third draw")
for num, val in enumerate(collection3):
print(num, val)
###Output
First draw
0 ('W',)
1 ('W',)
2 ('W',)
3 ('B',)
Second draw
0 ('W', 'W')
1 ('W', 'W')
2 ('W', 'W')
3 ('W', 'B')
4 ('W', 'W')
5 ('W', 'W')
6 ('W', 'W')
7 ('W', 'B')
8 ('W', 'W')
9 ('W', 'W')
10 ('W', 'W')
11 ('W', 'B')
12 ('B', 'W')
13 ('B', 'W')
14 ('B', 'W')
15 ('B', 'B')
Third draw
0 ('W', 'W', 'W')
1 ('W', 'W', 'W')
2 ('W', 'W', 'W')
3 ('W', 'W', 'B')
4 ('W', 'W', 'W')
5 ('W', 'W', 'W')
6 ('W', 'W', 'W')
7 ('W', 'W', 'B')
8 ('W', 'W', 'W')
9 ('W', 'W', 'W')
10 ('W', 'W', 'W')
11 ('W', 'W', 'B')
12 ('W', 'B', 'W')
13 ('W', 'B', 'W')
14 ('W', 'B', 'W')
15 ('W', 'B', 'B')
16 ('W', 'W', 'W')
17 ('W', 'W', 'W')
18 ('W', 'W', 'W')
19 ('W', 'W', 'B')
20 ('W', 'W', 'W')
21 ('W', 'W', 'W')
22 ('W', 'W', 'W')
23 ('W', 'W', 'B')
24 ('W', 'W', 'W')
25 ('W', 'W', 'W')
26 ('W', 'W', 'W')
27 ('W', 'W', 'B')
28 ('W', 'B', 'W')
29 ('W', 'B', 'W')
30 ('W', 'B', 'W')
31 ('W', 'B', 'B')
32 ('W', 'W', 'W')
33 ('W', 'W', 'W')
34 ('W', 'W', 'W')
35 ('W', 'W', 'B')
36 ('W', 'W', 'W')
37 ('W', 'W', 'W')
38 ('W', 'W', 'W')
39 ('W', 'W', 'B')
40 ('W', 'W', 'W')
41 ('W', 'W', 'W')
42 ('W', 'W', 'W')
43 ('W', 'W', 'B')
44 ('W', 'B', 'W')
45 ('W', 'B', 'W')
46 ('W', 'B', 'W')
47 ('W', 'B', 'B')
48 ('B', 'W', 'W')
49 ('B', 'W', 'W')
50 ('B', 'W', 'W')
51 ('B', 'W', 'B')
52 ('B', 'W', 'W')
53 ('B', 'W', 'W')
54 ('B', 'W', 'W')
55 ('B', 'W', 'B')
56 ('B', 'W', 'W')
57 ('B', 'W', 'W')
58 ('B', 'W', 'W')
59 ('B', 'W', 'B')
60 ('B', 'B', 'W')
61 ('B', 'B', 'W')
62 ('B', 'B', 'W')
63 ('B', 'B', 'B')
###Markdown
Tentu saja tidak semua kemungkinan diatas didukung oleh data yang kami miliki. Oleh karena itu, hasil yang terakhir yang berjumlah 64 akan disaring dan akan dikumpulkan mana yang cocok dengan data yang dimiliki. Muncul 3 buah kemungkinan yang didukung oleh data. Ini hanya satu dari banyak lima dugaan yang ada. Mari kami periksa bagaimana dugaan-dugaan yang kami miliki.
###Code
def conjecture_fitting(conjecture, data):
collection = itertools.product(conjecture, repeat=len(data))
arr_buff = []
for num, val in enumerate(collection):
arr = list(val)
if arr == data:
arr_buff.append(1)
return len(arr_buff)
print('Conjecture WWWW ',conjecture_fitting('WWWW', ['B', 'W', 'B']))
print('Conjecture WWWB ',conjecture_fitting('WWWB', ['B', 'W', 'B']))
print('Conjecture WWBB ',conjecture_fitting('WWBB', ['B', 'W', 'B']))
print('Conjecture WBBB ',conjecture_fitting('WBBB', ['B', 'W', 'B']))
print('Conjecture BBBB ',conjecture_fitting('BBBB', ['B', 'W', 'B']))
###Output
Conjecture WWWW 0
Conjecture WWWB 3
Conjecture WWBB 8
Conjecture WBBB 9
Conjecture BBBB 0
###Markdown
Menurut perhitungan yang kami buat, dugaan yang paling sesuai dengan data adalah dugaan ke 3 karena memiliki jumlah kemungkinan kombinasi yang sama dengan data paling tinggi dibandingkan dugaan lain. Oleh karena itu, memilih komposisi kelereng dugaan ke 3 adalah hal yang paling masuk akal. Asumsilah kami mengambil satu buah kelereng dan warananya adalah biru. Kita dapat menghitung ulang menggunakan cara yang sama dengan total kemungkinan adalah $4^4= 256$. Kita akan membandingkan dua cara, yaitu mengulang ulang perhitungan untuk 4 buah penarikan dan mengalikan kemungkinan sebelumnya (saat hanya memiliki data 3 buah penarikan) dengan kemungkinan yang baru (3 + 1 penarikan).
###Code
print("Recounting")
data = ['B', 'W', 'B', 'B']
print('Conjecture WWWW ',conjecture_fitting('WWWW', data))
print('Conjecture WWWB ',conjecture_fitting('WWWB', data))
print('Conjecture WWBB ',conjecture_fitting('WWBB', data))
print('Conjecture WBBB ',conjecture_fitting('WBBB', data))
print('Conjecture BBBB ',conjecture_fitting('BBBB', data))
print("\nUpdate Counting P N T")
prev_data = ['B', 'W', 'B']
next_data = ['B']
def update_conjecture(conjecture, prev_data, next_data):
prev_con = conjecture_fitting(conjecture, prev_data)
next_con = conjecture_fitting(conjecture, next_data)
return prev_con, next_con
prev_con, next_con = update_conjecture('WWWW', prev_data, next_data)
print('Conjecture WWWW ', prev_con, next_con, prev_con * next_con)
prev_con, next_con = update_conjecture('WWWB', prev_data, next_data)
print('Conjecture WWWB ', prev_con, next_con, prev_con * next_con)
prev_con, next_con = update_conjecture('WWBB', prev_data, next_data)
print('Conjecture WWBB ', prev_con, next_con, prev_con * next_con)
prev_con, next_con = update_conjecture('WBBB', prev_data, next_data)
print('Conjecture WBBB ', prev_con, next_con, prev_con * next_con)
prev_con, next_con = update_conjecture('BBBB', prev_data, next_data)
print('Conjecture BBBB ', prev_con, next_con, prev_con * next_con)
###Output
Recounting
Conjecture WWWW 0
Conjecture WWWB 3
Conjecture WWBB 16
Conjecture WBBB 27
Conjecture BBBB 0
Update Counting P N T
Conjecture WWWW 0 0 0
Conjecture WWWB 3 1 3
Conjecture WWBB 8 2 16
Conjecture WBBB 9 3 27
Conjecture BBBB 0 4 0
###Markdown
Ternyata hasil dari menggunakan perhitungan ulang dan memperbarui perhitungan menggunakan perhitungan sebelumnya menghasilkan hasil yang sama. Secara komputasi, menggukanan metode memperbarui perhitungan lebih unggul karena tidak terbentur kompleksitas $4^n$ yang dimiliki oleh perhitungan ulang. Ditanyakan proporsi antara air dan tanah pada suatu bola bumi. Informasi yang diberikan adalah hasil dari melempar bola bumi tersebut dan menangkapnya. Diambil letak jari manis pada tangan kanan untuk menentukan apakah hasil lemparan ini mendarat pada tanah atau air. Ini sebenernya mirip dengan kasus pelemparan koin, hanya saja untuk pelemparan koin khalayak umum biasanya mengasumsi koin jujur--kemungkinan muncul angka dan gambar adalah sama--sedangkan untuk bola bumi khalayak umum hanya memiliki asumsi bahwa air lebih banyak daripada tanah. Kita akan coba memodelkan pelemparan ini sambil menerjemahkan kode penulis dari R menjadi Python.Jumlah pelemparan yang menghasilkan air $W$ dilambangkan dengan proporsi $p$ sedangkan yang menghasilkan tanah $L$ dilambangkan dengan $1-p$. Dikarenakan ini adalah contoh kasus binomial, maka akan diterapkan distribusi binomial yang dapat dijabarkan sebagai$$Pr(W, L|p) = \frac{(W+L)!}{W!L!}p^W(1-p)^L$$Dibawah ini adalah class dari pelemparan yang sudah ada metode metode untuk membantu memudahkan pengolahan.
###Code
import numpy as np
from scipy.stats import binom, norm, beta
from sklearn.preprocessing import MinMaxScaler
from scipy.special import comb
import matplotlib.pyplot as plt
class globe_toss:
def __init__(self):
self.toss_history = []
self.probability_land = 0.5
self.probability_water = 0.5
self.prior_function = None
def toss(self, number_toss):
result = np.random.choice(["W", "L"], number_toss, p=[self.probability_water, self.probability_land])
self.toss_history = np.append(self.toss_history, result)
return
def reset_history(self):
self.toss_history = []
return
def get_water_total(self):
return np.count_nonzero(self.toss_history == "W")
def get_land_total(self):
return np.count_nonzero(self.toss_history == "L")
def get_binom_toss(self):
k = self.get_water_total()
n = len(self.toss_history)
p = self.probability_water
return binom.pmf(k, n, p)
def get_binom_grid(self, x_grid):
k = self.get_water_total()
n = len(self.toss_history)
return [binom.pmf(k, n, p) for p in x_grid]
def analytical_solution(self, n_grid=100):
a = self.get_water_total()
b = self.get_land_total()
return [beta.pdf(i, a + 1, b + 1) for i in np.linspace(0,1,n_grid)]
def grid_calculate_posterior(self, n_grid=100):
x_grid = np.linspace(0, 1, n_grid)
if self.prior_function is None:
y_prior = np.ones(n_grid)
else:
y_prior = np.array([self.prior_function(i) for i in x_grid])
likelihood = self.get_binom_grid(x_grid)
posterior = np.multiply(y_prior, likelihood) / np.sum(np.multiply(y_prior, likelihood))
return x_grid, self.normalize_array(posterior)
def normalize_array(self, array):
array = np.array(array)
arr_max = np.max(array)
arr_min = np.min(array)
interval = arr_max - arr_min
result = (array - arr_min) / interval
return result
def register_prior_function(self, prior_function):
self.prior_function = prior_function
def grid_approx_plot(self):
plt.figure(1, figsize = (11,6))
plt.title("Posterior Distribution (Grid Approximation)")
axes = plt.gca()
axes.set_xlim([-0.1, 1.1])
axes.set_xlabel("Probability of Water")
axes.set_ylabel("Posterior")
axes.grid()
x_grid, posterior = self.grid_calculate_posterior(5)
plt.plot(x_grid, posterior, label='5 Approx Normalized')
x_grid, posterior = self.grid_calculate_posterior(10)
plt.plot(x_grid, posterior, label='10 Approx Normalized')
x_grid, posterior = self.grid_calculate_posterior(25)
plt.plot(x_grid, posterior, label='25 Approx Normalized')
x_grid, posterior = self.grid_calculate_posterior(100)
plt.plot(x_grid, posterior, label='100 Approx Normalized')
x_grid = np.linspace(0,1,100)
plt.plot(
x_grid,
self.normalize_array(np.array(self.analytical_solution(100))),
label='Analytical Normalized'
)
plt.legend()
def quadratic_approximation_plot(self):
prior = 1
plt.figure(1, figsize = (11,6))
plt.title("Posterior Distribution (Quadratic Approximation)")
axes = plt.gca()
axes.set_xlim([-0.1, 1.1])
axes.set_xlabel("Probability of Water")
axes.set_ylabel("Posterior")
axes.grid()
plt.plot(x_grid, posterior)
toss1 = globe_toss()
# Override the history so it match with book's example
toss1.toss_history = np.array(['W', 'L', 'W', 'W', 'W', 'L', 'W', 'L', 'W'])
toss1.get_binom_toss()
###Output
_____no_output_____
###Markdown
Dalam ilmu probabilistik kemungkinan terjadinya dua kejadian pada saat yang sama dapat dituliskan sebagai$$Pr(a, b) = Pr(a|b) Pr(b)$$> Kemungkinan terjadinya kejadian $a$ dan $b$ adalah hasil perkalian dari kemungkinan kejadian $a$ apabila $b$ terjadi dan kemungkinan $b$ terjadi.Dalam kasus yang kami miliki $a$ sama dengan $W, L$ atau proporsi dari darat dan laut--dapat digabungkan karena apabila $W$ diketahui maka $L$ juga dapat diketahui--dan $b$ adalah kemungkinan mendaratkan air $p$. Yang kami cari disini adalah probablitas dari pendaratan di air $p$ apabila jumlah $W$ dan $L$ diketahui. Sehingga dapat ditulis$$\begin{align} &Pr(W, L, p) = Pr(W, L|p) Pr(p) \\ &Pr(W, L, p) = Pr(p | W, L) Pr(W, L) \\ &Pr(W, L|p) Pr(p) = Pr(p | W, L) Pr(W, L) \\ &Pr(p|W, L) = \frac{Pr(W, L| p) Pr(p)}{Pr(W, L)} \\\end{align}$$Ada empat komponen dalam persamaan diatas- $Pr(W, L|p)$ adalah apa yang disebut dengan **likelihood** data yang dimiliki untuk saat ini- $Pr(p)$ adalah apa yang disebut dengan **prior** yaitu probabilitas yang awal sebelum diperbarui- $Pr(W, L)$ adalah **averaging** agar probabilitas tidak lebih dari satu- $Pr(p|W, L)$ adalah **posterior** probabilitas yang didapatkan setelah memperbarui dengan data yang tersediaBerbekal pengetahuan ini kita dapat mengestimasi probabilitas dari pendaratan di air. Ada tiga metode numerik yang dapat diterapkan dalam estimasi ini yaitu *Grid Approximation*, *Quadratic Approximation*, dan *Markov Chain Monte Carlo*. Dimulai dengan Grid Approximation dahulu karena cukup intuitif.
###Code
toss1.grid_approx_plot()
###Output
_____no_output_____
###Markdown
Grid approximation ini adalah memberikan sebuah vector dengan ukuran $n$ dan memiliki batas $[0, 1]$ dan memasukan masing-masing vector ke persamaan. Semakin tinggi bentuk vectorisasi-nya, maka akan semakin akurat juga.> Ini kenapa kalo ngga di normalized semakin gede aproksimasinya semakin merunduk ya grafiknya? Quadratic approximation adalah menggunakan distribusi normal untuk mencari bentuk distribusi normal mana yang paling mirip dengan posterior. Ini dapat dikerjakan dengan mencari rerata dan standar deviasi yang paling mewakili. Kami mencari rerata dengan mencari puncak dari estimasi. Puncak tersebut akan menjadi rerata dari distribusi normal. Chapter 2 Practice**2M1**
###Code
toss2 = globe_toss()
toss2.toss_history = np.array(['W', 'W', 'W'])
toss2.grid_approx_plot()
toss3 = globe_toss()
toss3.toss_history = np.array(['W', 'W', 'W', 'L'])
toss3.grid_approx_plot()
toss4 = globe_toss()
toss4.toss_history = np.array(['L', 'W', 'W', 'L', 'W', 'W', 'L'])
toss4.grid_approx_plot()
###Output
_____no_output_____
###Markdown
**Note** Mengapa nilai grid approksimasi dengan nominal kecil seperti 5 memiliki puncak berbeda dengan aproksimasi yang memiliki nominal lebih besar? Ini terjadi karena adanya normalisasi sehingga setiap puncak harus berada pada titik 1.**2M2**
###Code
def step_function(x):
if x < 0.5:
return 0
elif x >= 0.5:
return 1
toss5 = globe_toss()
toss5.register_prior_function(step_function)
toss5.toss_history = np.array(['W', 'W', 'W', 'L'])
toss5.grid_approx_plot()
###Output
_____no_output_____
###Markdown
Seperti yang dapat dilihat pada hasil aproksimasi, ada nilai cutoff pada 0.5 dikarenakan prior dengan nilai yang lebih rendah dari 0.5 nilainya 0 dan menghasilkan plot yang flat 0 pada nilai sebelum 0.5**2M3**. Berdasarkan deskripsi diatas dapat dipetakan menjadi dua probabilitas, yang pertama adalah probabilitas dari mana yang akan dilemparkan, bumi atau mars, yang sama sama bernilai 0.5. Selanjutnya adalah kejadian tempat mendaratnya, apakah mendarat pada tanah atau air. Untuk bumi kemungkinan mendarat pada tanah adalah 0.3 dan pada air adalah 0.7. Untuk mars, mendarat pada air adalah 0 dan mendarat pada darat adalah 1.$Pr(Earth|land)$ dapat dihasilkan dari $\frac{Pr(Earth, land)}{Pr(land)}$. $Pr(land)$ dapat dihasilkan dari menjumlahkan kemungkinan land dan dibagi dengan asalnya $$Pr_{land} = \frac{Pr_{land}^{mars} + Pr_{land}^{earth}}{2} = \frac{1 + 0.3}{2} = 0.65$$$$Pr(Earth, land) = Pr(Earth) Pr_{land}^{earth} = 0.15 $$$$Pr(Earth|land) = \frac{0.15}{0.65} = 0.23$$ **2M3** Ada 6 kemungkinan yang bisa didapatkan dari penjumlahan kartu. Ini hasil dari perkalian jumlah kartu yang dimiliki dan sisi yang dimiliki setiap kartu. Yang ditanyakan adalah berapa kemungkinan sisi bawah kartu berwarna hitam apabila sisi atas kartu berwarna hitam.Kemungkinan mendapatkan kartu hitam adalah $0.5$. Tapi dikarenakan yang ditanyakan adalah kemungkinan kartu sisi bawah adalah hitam menjadikan sampel kartu hanya 2--yang keduanya hitam dan yang memiliki sisi hitam dan putih.$$\begin{align}Pr(Black, Black) &= Pr(Black | Black) Pr(Black) \\\frac{1}{3} &= Pr(Black | Black) \frac{1}{2} \\Pr(Black | Black) &=\frac{1}{3}2 \\ Pr(Black | Black) &=\frac{2}{3} \\ \end{align}$$
###Code
import itertools, pprint
import numpy as np
pp = pprint.PrettyPrinter(indent=4)
possible_draw = list(itertools.product('BW', repeat=2))
first_draw_possibilities = [ np.count_nonzero(np.array(i)[0] == 'B') for i in possible_draw]
second_draw_possibilities = [ np.count_nonzero(np.array(i)[1] == 'B') for i in possible_draw]
for num, i in enumerate(possible_draw):
print(num, i, first_draw_possibilities[num], second_draw_possibilities[num])
###Output
_____no_output_____ |
tf/examples/Bonsai/bonsai_example.ipynb | ###Markdown
Bonsai in TensorflowThis is a simple notebook that illustrates the usage of Tensorflow implementation of Bonsai. We are using the USPS dataset. Please refer to `fetch_usps.py` and run it for downloading and cleaning up the dataset.
###Code
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import helpermethods
import tensorflow as tf
import numpy as np
import sys
import os
sys.path.insert(0, '../../')
#Provide the GPU number to be used
os.environ['CUDA_VISIBLE_DEVICES'] =''
#Bonsai imports
from edgeml.trainer.bonsaiTrainer import BonsaiTrainer
from edgeml.graph.bonsai import Bonsai
# Fixing seeds for reproducibility
tf.set_random_seed(42)
np.random.seed(42)
###Output
_____no_output_____
###Markdown
USPS DataIt is assumed that the USPS data has already been downloaded and set up with the help of [fetch_usps.py](fetch_usps.py) and is present in the `./usps10` subdirectory.
###Code
#Loading and Pre-processing dataset for Bonsai
dataDir = "usps10/"
(dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest) = helpermethods.preProcessData(dataDir)
print("Feature Dimension: ", dataDimension)
print("Num classes: ", numClasses)
###Output
Feature Dimension: 257
Num classes: 10
###Markdown
Model ParametersNote that Bonsai is designed for low-memory setting and the best results are obtained when operating in that setting. Use the sparsity, projection dimension and tree depth to vary the model size.
###Code
sigma = 1.0 #Sigmoid parameter for tanh
depth = 3 #Depth of Bonsai Tree
projectionDimension = 28 #Lower Dimensional space for Bonsai to work on
#Regularizers for Bonsai Parameters
regZ = 0.0001
regW = 0.001
regV = 0.001
regT = 0.001
totalEpochs = 100
learningRate = 0.01
outFile = None
#Sparsity for Bonsai Parameters. x => 100*x % are non-zeros
sparZ = 0.2
sparW = 0.3
sparV = 0.3
sparT = 0.62
batchSize = np.maximum(100, int(np.ceil(np.sqrt(Ytrain.shape[0]))))
useMCHLoss = True #only for Multiclass cases True: Multiclass-Hing Loss, False: Cross Entropy.
#Bonsai uses one classier for Binary, thus this condition
if numClasses == 2:
numClasses = 1
###Output
_____no_output_____
###Markdown
Placeholders for Data feeding during training and infernece
###Code
X = tf.placeholder("float32", [None, dataDimension])
Y = tf.placeholder("float32", [None, numClasses])
###Output
_____no_output_____
###Markdown
Creating a directory for current model in the datadirectory using timestamp
###Code
currDir = helpermethods.createTimeStampDir(dataDir)
helpermethods.dumpCommand(sys.argv, currDir)
###Output
_____no_output_____
###Markdown
Bonsai Graph ObjectInstantiating the Bonsai Graph which will be used for training and inference.
###Code
bonsaiObj = Bonsai(numClasses, dataDimension, projectionDimension, depth, sigma)
###Output
_____no_output_____
###Markdown
Bonsai Trainer ObjectInstantiating the Bonsai Trainer which will be used for 3 phase training.
###Code
bonsaiTrainer = BonsaiTrainer(bonsaiObj, regW, regT, regV, regZ, sparW, sparT, sparV, sparZ,
learningRate, X, Y, useMCHLoss, outFile)
###Output
/home/t-dodenn/.virtualenvs/tfsource/lib/python3.5/site-packages/tensorflow/python/ops/gradients_impl.py:98: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
###Markdown
Session declaration and variable initialization. Interactive Session doesn't clog the entire GPU.
###Code
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Bonsai Training RoutineThe method to to run the 3 phase training, followed by giving out the best early stopping model, accuracy along with saving of the parameters.
###Code
bonsaiTrainer.train(batchSize, totalEpochs, sess,
Xtrain, Xtest, Ytrain, Ytest, dataDir, currDir)
###Output
Epoch Number: 0
******************** Dense Training Phase Started ********************
Train Loss: 6.389298193984562 Train accuracy: 0.6244444446638227
Test accuracy 0.724464
MarginLoss + RegLoss: 1.4452395 + 3.6491284 = 5.094368
Epoch Number: 1
Train Loss: 3.688335908783807 Train accuracy: 0.8611111144224802
Test accuracy 0.769307
MarginLoss + RegLoss: 0.9957626 + 2.7783642 = 3.7741268
Epoch Number: 2
Train Loss: 2.6678174800342984 Train accuracy: 0.9186111175351672
Test accuracy 0.760339
MarginLoss + RegLoss: 0.8745117 + 2.0955048 = 2.9700165
Epoch Number: 3
Train Loss: 1.9926944921414058 Train accuracy: 0.941527778075801
Test accuracy 0.776283
MarginLoss + RegLoss: 0.7323152 + 1.5899123 = 2.3222275
Epoch Number: 4
Train Loss: 1.5220159557130601 Train accuracy: 0.9556944477889273
Test accuracy 0.809666
MarginLoss + RegLoss: 0.58971727 + 1.2277353 = 1.8174525
Epoch Number: 5
Train Loss: 1.1967213302850723 Train accuracy: 0.9623611138926612
Test accuracy 0.839063
MarginLoss + RegLoss: 0.49087143 + 0.9732404 = 1.4641118
Epoch Number: 6
Train Loss: 0.9660082765751414 Train accuracy: 0.9694444487492243
Test accuracy 0.858495
MarginLoss + RegLoss: 0.4301353 + 0.79288167 = 1.223017
Epoch Number: 7
Train Loss: 0.8048144280910492 Train accuracy: 0.9725000088413557
Test accuracy 0.884405
MarginLoss + RegLoss: 0.34885734 + 0.6637592 = 1.0126165
Epoch Number: 8
Train Loss: 0.6851460528042581 Train accuracy: 0.9747222305999862
Test accuracy 0.899851
MarginLoss + RegLoss: 0.3091187 + 0.5690259 = 0.87814456
Epoch Number: 9
Train Loss: 0.5977631327178743 Train accuracy: 0.979305564529366
Test accuracy 0.899851
MarginLoss + RegLoss: 0.28436783 + 0.49890098 = 0.7832688
Epoch Number: 10
Train Loss: 0.535357097370757 Train accuracy: 0.9794444549414847
Test accuracy 0.910314
MarginLoss + RegLoss: 0.2635972 + 0.44721544 = 0.7108126
Epoch Number: 11
Train Loss: 0.48886746995978886 Train accuracy: 0.9802777866522471
Test accuracy 0.919781
MarginLoss + RegLoss: 0.23250574 + 0.40637112 = 0.63887686
Epoch Number: 12
Train Loss: 0.4487115616599719 Train accuracy: 0.9806944512658649
Test accuracy 0.923767
MarginLoss + RegLoss: 0.22620481 + 0.37376344 = 0.59996825
Epoch Number: 13
Train Loss: 0.41764777070946163 Train accuracy: 0.9812500104308128
Test accuracy 0.923269
MarginLoss + RegLoss: 0.22121286 + 0.34504575 = 0.5662586
Epoch Number: 14
Train Loss: 0.3893965221941471 Train accuracy: 0.9837500088744693
Test accuracy 0.924763
MarginLoss + RegLoss: 0.21379846 + 0.3230402 = 0.53683865
Epoch Number: 15
Train Loss: 0.3666431142224206 Train accuracy: 0.9838888976309035
Test accuracy 0.921276
MarginLoss + RegLoss: 0.21161106 + 0.30337086 = 0.5149819
Epoch Number: 16
Train Loss: 0.3501228694286611 Train accuracy: 0.9830555650922987
Test accuracy 0.926258
MarginLoss + RegLoss: 0.19876844 + 0.28749332 = 0.48626176
Epoch Number: 17
Train Loss: 0.32852235808968544 Train accuracy: 0.9845833430687586
Test accuracy 0.929746
MarginLoss + RegLoss: 0.18692228 + 0.27008235 = 0.45700464
Epoch Number: 18
Train Loss: 0.3183121085166931 Train accuracy: 0.9823611204822859
Test accuracy 0.931241
MarginLoss + RegLoss: 0.18857315 + 0.25766617 = 0.44623932
Epoch Number: 19
Train Loss: 0.3019753938747777 Train accuracy: 0.9845833430687586
Test accuracy 0.930244
MarginLoss + RegLoss: 0.1913568 + 0.24436466 = 0.43572146
Epoch Number: 20
Train Loss: 0.2879051696509123 Train accuracy: 0.9852777851952447
Test accuracy 0.926258
MarginLoss + RegLoss: 0.19720516 + 0.23309413 = 0.43029928
Epoch Number: 21
Train Loss: 0.2760823153787189 Train accuracy: 0.9848611222373115
Test accuracy 0.929746
MarginLoss + RegLoss: 0.19013962 + 0.22110444 = 0.41124406
Epoch Number: 22
Train Loss: 0.265699431921045 Train accuracy: 0.9843055639002058
Test accuracy 0.927255
MarginLoss + RegLoss: 0.18394011 + 0.21136439 = 0.3953045
Epoch Number: 23
Train Loss: 0.2586811340103547 Train accuracy: 0.9841666767994562
Test accuracy 0.932237
MarginLoss + RegLoss: 0.18472062 + 0.20268525 = 0.38740587
Epoch Number: 24
Train Loss: 0.2515812249233325 Train accuracy: 0.9834722330172857
Test accuracy 0.932237
MarginLoss + RegLoss: 0.17988203 + 0.19471233 = 0.37459436
Epoch Number: 25
Train Loss: 0.2439375292095873 Train accuracy: 0.9819444533851411
Test accuracy 0.936223
MarginLoss + RegLoss: 0.17650211 + 0.1861265 = 0.3626286
Epoch Number: 26
Train Loss: 0.23371089560290179 Train accuracy: 0.9833333450886939
Test accuracy 0.932735
MarginLoss + RegLoss: 0.18389018 + 0.17848735 = 0.36237752
Epoch Number: 27
Train Loss: 0.22481733912395108 Train accuracy: 0.983611119290193
Test accuracy 0.93423
MarginLoss + RegLoss: 0.16752453 + 0.1700884 = 0.33761293
Epoch Number: 28
Train Loss: 0.21649042848083708 Train accuracy: 0.9833333442608515
Test accuracy 0.931241
MarginLoss + RegLoss: 0.18024354 + 0.16205509 = 0.34229863
Epoch Number: 29
Train Loss: 0.2109515176465114 Train accuracy: 0.983194451365206
Test accuracy 0.932735
MarginLoss + RegLoss: 0.18075311 + 0.15641537 = 0.33716848
Epoch Number: 30
Train Loss: 0.20864862451950708 Train accuracy: 0.9813889016707739
Test accuracy 0.930742
MarginLoss + RegLoss: 0.18594784 + 0.15273505 = 0.3386829
Epoch Number: 31
Train Loss: 0.21935068257153034 Train accuracy: 0.975833343134986
Test accuracy 0.940708
MarginLoss + RegLoss: 0.17358617 + 0.15079822 = 0.3243844
Epoch Number: 32
Train Loss: 0.20754448076089224 Train accuracy: 0.9801388954122862
Test accuracy 0.93722
MarginLoss + RegLoss: 0.17546712 + 0.14778407 = 0.3232512
Epoch Number: 33
******************** IHT Phase Started ********************
Train Loss: 0.2142104934900999 Train accuracy: 0.9783333390951157
Test accuracy 0.92576
MarginLoss + RegLoss: 0.19405848 + 0.11986786 = 0.31392634
Epoch Number: 34
Train Loss: 0.1957869732545482 Train accuracy: 0.9804166762365235
Test accuracy 0.933732
MarginLoss + RegLoss: 0.17486349 + 0.12441108 = 0.29927456
Epoch Number: 35
Train Loss: 0.20117665910058552 Train accuracy: 0.9798611212107871
Test accuracy 0.933234
MarginLoss + RegLoss: 0.18556419 + 0.12760702 = 0.3131712
Epoch Number: 36
Train Loss: 0.19235002787576783 Train accuracy: 0.9818055654565493
Test accuracy 0.932237
MarginLoss + RegLoss: 0.18422216 + 0.12556252 = 0.30978468
Epoch Number: 37
Train Loss: 0.18417591125600868 Train accuracy: 0.9818055654565493
Test accuracy 0.933234
MarginLoss + RegLoss: 0.18428192 + 0.122980185 = 0.3072621
Epoch Number: 38
Train Loss: 0.18028576651381123 Train accuracy: 0.9831944563322597
Test accuracy 0.933732
MarginLoss + RegLoss: 0.17935511 + 0.12061933 = 0.29997444
Epoch Number: 39
Train Loss: 0.1745260620696677 Train accuracy: 0.9833333442608515
Test accuracy 0.932735
MarginLoss + RegLoss: 0.18099774 + 0.11807041 = 0.29906815
Epoch Number: 40
Train Loss: 0.17143954140030676 Train accuracy: 0.9840277888708644
Test accuracy 0.93722
MarginLoss + RegLoss: 0.17120871 + 0.11562464 = 0.28683335
Epoch Number: 41
Train Loss: 0.16799314061386716 Train accuracy: 0.9834722355008125
Test accuracy 0.93423
MarginLoss + RegLoss: 0.1756582 + 0.113046676 = 0.28870487
Epoch Number: 42
Train Loss: 0.16445335994164148 Train accuracy: 0.9845833447244432
Test accuracy 0.936223
MarginLoss + RegLoss: 0.17149687 + 0.11167378 = 0.28317064
Epoch Number: 43
Train Loss: 0.1617109359552463 Train accuracy: 0.983194457160102
Test accuracy 0.938216
MarginLoss + RegLoss: 0.16902651 + 0.109498546 = 0.27852505
Epoch Number: 44
Train Loss: 0.1589122601888246 Train accuracy: 0.9848611214094691
Test accuracy 0.93722
MarginLoss + RegLoss: 0.16804701 + 0.10741738 = 0.2754644
Epoch Number: 45
Train Loss: 0.1585660283971164 Train accuracy: 0.9852777885066138
Test accuracy 0.937718
MarginLoss + RegLoss: 0.17146519 + 0.10648606 = 0.27795124
Epoch Number: 46
Train Loss: 0.15569559753768974 Train accuracy: 0.9843055663837327
Test accuracy 0.941206
MarginLoss + RegLoss: 0.16175063 + 0.104837134 = 0.26658776
Epoch Number: 47
Train Loss: 0.1528543213175403 Train accuracy: 0.985138900578022
Test accuracy 0.940209
MarginLoss + RegLoss: 0.1619776 + 0.10374366 = 0.26572126
Epoch Number: 48
Train Loss: 0.1517944024461839 Train accuracy: 0.9856944547759162
Test accuracy 0.93722
MarginLoss + RegLoss: 0.16866311 + 0.10247567 = 0.2711388
###Markdown
Bonsai in TensorflowThis is a simple notebook that illustrates the usage of Tensorflow implementation of Bonsai. We are using the USPS dataset. Please refer to `fetch_usps.py` and run it for downloading and cleaning up the dataset.
###Code
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import helpermethods
import tensorflow as tf
import numpy as np
import sys
import os
#Provide the GPU number to be used
os.environ['CUDA_VISIBLE_DEVICES'] =''
#Bonsai imports
from edgeml.trainer.bonsaiTrainer import BonsaiTrainer
from edgeml.graph.bonsai import Bonsai
# Fixing seeds for reproducibility
tf.set_random_seed(42)
np.random.seed(42)
###Output
_____no_output_____
###Markdown
USPS DataIt is assumed that the USPS data has already been downloaded and set up with the help of [fetch_usps.py](fetch_usps.py) and is present in the `./usps10` subdirectory.
###Code
#Loading and Pre-processing dataset for Bonsai
dataDir = "usps10/"
(dataDimension, numClasses, Xtrain, Ytrain, Xtest, Ytest, mean, std) = helpermethods.preProcessData(dataDir, isRegression=False)
print("Feature Dimension: ", dataDimension)
print("Num classes: ", numClasses)
###Output
Feature Dimension: 257
Num classes: 10
###Markdown
Model ParametersNote that Bonsai is designed for low-memory setting and the best results are obtained when operating in that setting. Use the sparsity, projection dimension and tree depth to vary the model size.
###Code
sigma = 1.0 #Sigmoid parameter for tanh
depth = 3 #Depth of Bonsai Tree
projectionDimension = 28 #Lower Dimensional space for Bonsai to work on
#Regularizers for Bonsai Parameters
regZ = 0.0001
regW = 0.001
regV = 0.001
regT = 0.001
totalEpochs = 100
learningRate = 0.01
outFile = None
#Sparsity for Bonsai Parameters. x => 100*x % are non-zeros
sparZ = 0.2
sparW = 0.3
sparV = 0.3
sparT = 0.62
batchSize = np.maximum(100, int(np.ceil(np.sqrt(Ytrain.shape[0]))))
useMCHLoss = True #only for Multiclass cases True: Multiclass-Hing Loss, False: Cross Entropy.
#Bonsai uses one classier for Binary, thus this condition
if numClasses == 2:
numClasses = 1
###Output
_____no_output_____
###Markdown
Placeholders for Data feeding during training and infernece
###Code
X = tf.placeholder("float32", [None, dataDimension])
Y = tf.placeholder("float32", [None, numClasses])
###Output
_____no_output_____
###Markdown
Creating a directory for current model in the datadirectory using timestamp
###Code
currDir = helpermethods.createTimeStampDir(dataDir)
helpermethods.dumpCommand(sys.argv, currDir)
###Output
_____no_output_____
###Markdown
Bonsai Graph ObjectInstantiating the Bonsai Graph which will be used for training and inference.
###Code
bonsaiObj = Bonsai(numClasses, dataDimension, projectionDimension, depth, sigma)
###Output
_____no_output_____
###Markdown
Bonsai Trainer ObjectInstantiating the Bonsai Trainer which will be used for 3 phase training.
###Code
bonsaiTrainer = BonsaiTrainer(bonsaiObj, regW, regT, regV, regZ, sparW, sparT, sparV, sparZ,
learningRate, X, Y, useMCHLoss, outFile)
###Output
C:\Users\t-vekusu\AppData\Local\Continuum\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\ops\gradients_impl.py:100: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
###Markdown
Session declaration and variable initialization. Interactive Session doesn't clog the entire GPU.
###Code
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
###Output
_____no_output_____
###Markdown
Bonsai Training RoutineThe method to to run the 3 phase training, followed by giving out the best early stopping model, accuracy along with saving of the parameters.
###Code
bonsaiTrainer.train(batchSize, totalEpochs, sess,
Xtrain, Xtest, Ytrain, Ytest, dataDir, currDir)
###Output
Epoch Number: 0
******************** Dense Training Phase Started ********************
Classification Train Loss: 6.388934433460236
Training accuracy (Classification): 0.6250000005174015
Test accuracy 0.726956
MarginLoss + RegLoss: 1.4466879 + 3.6487768 = 5.0954647
Epoch Number: 1
Classification Train Loss: 3.6885906954606376
Training accuracy (Classification): 0.8623611107468605
Test accuracy 0.758346
MarginLoss + RegLoss: 1.0173264 + 2.778634 = 3.7959604
Epoch Number: 2
Classification Train Loss: 2.667721450328827
Training accuracy (Classification): 0.9184722271230485
Test accuracy 0.7429
MarginLoss + RegLoss: 0.92546654 + 2.095467 = 3.0209336
Epoch Number: 3
Classification Train Loss: 1.9921080254846149
Training accuracy (Classification): 0.941944446000788
Test accuracy 0.767314
MarginLoss + RegLoss: 0.7603649 + 1.5889603 = 2.3493252
Epoch Number: 4
Classification Train Loss: 1.5233625107341342
Training accuracy (Classification): 0.9563888907432556
Test accuracy 0.791231
MarginLoss + RegLoss: 0.6496898 + 1.2271981 = 1.8768879
Epoch Number: 5
Classification Train Loss: 1.1950715631246567
Training accuracy (Classification): 0.9650000035762787
Test accuracy 0.810164
MarginLoss + RegLoss: 0.54003507 + 0.97295314 = 1.5129882
Epoch Number: 6
Classification Train Loss: 0.9672323316335678
Training accuracy (Classification): 0.968333340353436
Test accuracy 0.855007
MarginLoss + RegLoss: 0.44149697 + 0.79325426 = 1.2347512
Epoch Number: 7
Classification Train Loss: 0.8014380658666292
Training accuracy (Classification): 0.9722222313284874
Test accuracy 0.874938
MarginLoss + RegLoss: 0.37062877 + 0.6628879 = 1.0335166
Epoch Number: 8
Classification Train Loss: 0.684503066043059
Training accuracy (Classification): 0.976111119820012
Test accuracy 0.899851
MarginLoss + RegLoss: 0.3099702 + 0.5688073 = 0.8787775
Epoch Number: 9
Classification Train Loss: 0.5987317487597466
Training accuracy (Classification): 0.9794444565971693
Test accuracy 0.907324
MarginLoss + RegLoss: 0.2689218 + 0.49965328 = 0.7685751
Epoch Number: 10
Classification Train Loss: 0.5343128165437115
Training accuracy (Classification): 0.9804166778922081
Test accuracy 0.9143
MarginLoss + RegLoss: 0.24538836 + 0.44663915 = 0.6920275
Epoch Number: 11
Classification Train Loss: 0.48874612069792217
Training accuracy (Classification): 0.9801388987236552
Test accuracy 0.916293
MarginLoss + RegLoss: 0.23703864 + 0.40629783 = 0.6433365
Epoch Number: 12
Classification Train Loss: 0.44733552055226433
Training accuracy (Classification): 0.98097223126226
Test accuracy 0.918286
MarginLoss + RegLoss: 0.23851919 + 0.37269312 = 0.6112123
Epoch Number: 13
Classification Train Loss: 0.4165669356783231
Training accuracy (Classification): 0.9822222317258517
Test accuracy 0.917289
MarginLoss + RegLoss: 0.23061273 + 0.345445 = 0.57605773
Epoch Number: 14
Classification Train Loss: 0.39181090601616436
Training accuracy (Classification): 0.9812500087751282
Test accuracy 0.92277
MarginLoss + RegLoss: 0.2121576 + 0.32245666 = 0.53461426
Epoch Number: 15
Classification Train Loss: 0.36949437111616135
Training accuracy (Classification): 0.9820833446251022
Test accuracy 0.926258
MarginLoss + RegLoss: 0.19854721 + 0.30341443 = 0.50196165
Epoch Number: 16
Classification Train Loss: 0.3469446731938256
Training accuracy (Classification): 0.9831944538487328
Test accuracy 0.927255
MarginLoss + RegLoss: 0.19628116 + 0.28535655 = 0.48163772
Epoch Number: 17
Classification Train Loss: 0.329777576857143
Training accuracy (Classification): 0.984166675971614
Test accuracy 0.92277
MarginLoss + RegLoss: 0.20166817 + 0.26965213 = 0.4713203
Epoch Number: 18
Classification Train Loss: 0.317672994815641
Training accuracy (Classification): 0.9815277879436811
Test accuracy 0.925262
MarginLoss + RegLoss: 0.20086277 + 0.2559616 = 0.45682436
Epoch Number: 19
Classification Train Loss: 0.3000084459781647
Training accuracy (Classification): 0.9843055655558904
Test accuracy 0.931739
MarginLoss + RegLoss: 0.18073215 + 0.24324338 = 0.42397553
Epoch Number: 20
Classification Train Loss: 0.2897499371320009
Training accuracy (Classification): 0.9827777867515882
Test accuracy 0.921276
MarginLoss + RegLoss: 0.20172484 + 0.23221089 = 0.43393573
Epoch Number: 21
Classification Train Loss: 0.2821065636558665
Training accuracy (Classification): 0.9812500096029706
Test accuracy 0.928749
MarginLoss + RegLoss: 0.18990344 + 0.22147894 = 0.41138238
Epoch Number: 22
Classification Train Loss: 0.2660716378854381
Training accuracy (Classification): 0.9844444559680091
Test accuracy 0.928251
MarginLoss + RegLoss: 0.17955597 + 0.21111046 = 0.39066643
Epoch Number: 23
Classification Train Loss: 0.2567368100086848
Training accuracy (Classification): 0.9852777885066138
Test accuracy 0.928251
MarginLoss + RegLoss: 0.18770447 + 0.20248988 = 0.39019436
Epoch Number: 24
Classification Train Loss: 0.25224825532899964
Training accuracy (Classification): 0.9823611204822859
Test accuracy 0.932735
MarginLoss + RegLoss: 0.18552671 + 0.19460817 = 0.38013488
Epoch Number: 25
Classification Train Loss: 0.24661735258996487
Training accuracy (Classification): 0.9804166762365235
Test accuracy 0.931241
MarginLoss + RegLoss: 0.18796808 + 0.18610859 = 0.37407666
Epoch Number: 26
Classification Train Loss: 0.23342499737110403
Training accuracy (Classification): 0.9829166763358645
Test accuracy 0.932735
MarginLoss + RegLoss: 0.17906994 + 0.17793566 = 0.3570056
Epoch Number: 27
Classification Train Loss: 0.22210048822065195
Training accuracy (Classification): 0.9851388972666528
Test accuracy 0.934728
MarginLoss + RegLoss: 0.17679122 + 0.16876754 = 0.34555876
Epoch Number: 28
Classification Train Loss: 0.2189549288402001
Training accuracy (Classification): 0.9831944538487328
Test accuracy 0.932237
MarginLoss + RegLoss: 0.19115414 + 0.16296963 = 0.35412377
Epoch Number: 29
Classification Train Loss: 0.21842483865718046
Training accuracy (Classification): 0.9805555658208
Test accuracy 0.936722
MarginLoss + RegLoss: 0.17462157 + 0.15921564 = 0.3338372
Epoch Number: 30
Classification Train Loss: 0.21449942576388517
Training accuracy (Classification): 0.9804166754086813
Test accuracy 0.939711
MarginLoss + RegLoss: 0.17741902 + 0.15273981 = 0.33015883
Epoch Number: 31
Classification Train Loss: 0.20739994280868107
Training accuracy (Classification): 0.9825000100665622
Test accuracy 0.933732
MarginLoss + RegLoss: 0.17381513 + 0.1498537 = 0.32366884
Epoch Number: 32
Classification Train Loss: 0.20110303929282558
Training accuracy (Classification): 0.9840277888708644
Test accuracy 0.93423
MarginLoss + RegLoss: 0.18619148 + 0.14583017 = 0.33202165
Epoch Number: 33
******************** IHT Phase Started ********************
Classification Train Loss: 0.21433907147083017
Training accuracy (Classification): 0.9801388987236552
Test accuracy 0.927255
MarginLoss + RegLoss: 0.19979775 + 0.12088289 = 0.32068065
Epoch Number: 34
Classification Train Loss: 0.1990115779141585
Training accuracy (Classification): 0.980694454577234
Test accuracy 0.933234
MarginLoss + RegLoss: 0.17835513 + 0.12438774 = 0.30274287
Epoch Number: 35
Classification Train Loss: 0.20429682172834873
Training accuracy (Classification): 0.9788888974322213
Test accuracy 0.929248
MarginLoss + RegLoss: 0.19013074 + 0.12853864 = 0.31866938
Epoch Number: 36
Classification Train Loss: 0.19357945707937083
Training accuracy (Classification): 0.9816666767001152
Test accuracy 0.932735
MarginLoss + RegLoss: 0.18534705 + 0.12509713 = 0.31044418
Epoch Number: 37
Classification Train Loss: 0.18653404754069117
Training accuracy (Classification): 0.9818055638008647
Test accuracy 0.929746
MarginLoss + RegLoss: 0.18708317 + 0.12236847 = 0.30945164
Epoch Number: 38
Classification Train Loss: 0.18141362298693922
Training accuracy (Classification): 0.9815277871158388
Test accuracy 0.933234
MarginLoss + RegLoss: 0.18262453 + 0.11991154 = 0.30253607
Epoch Number: 39
Classification Train Loss: 0.17729416727605793
Training accuracy (Classification): 0.9820833429694176
Test accuracy 0.932735
MarginLoss + RegLoss: 0.1798804 + 0.11748926 = 0.29736966
|
examples/notebooks/cartoee_projections.ipynb | ###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
###Output
_____no_output_____
###Markdown
Working with projections in cartoee
###Code
import ee
from geemap import cartoee
import cartopy.crs as ccrs
%pylab inline
ee.Initialize()
###Output
_____no_output_____
###Markdown
Plotting an image on a mapHere we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018.
###Code
# get an earth engine image of ocean data for Jan-Mar 2018
ocean = (
ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI')
.filter(ee.Filter.date('2018-01-01', '2018-03-01'))
.median()
.select(["sst"],["SST"])
)
# set parameters for plotting
# will plot the Sea Surface Temp with specific range and colormap
visualization = {'bands':"SST",'min':-2,'max':30}
# specify region to focus on
bbox = [-180,-88,180,88]
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee using a PlateCarre projection (default)
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=bbox)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Mapping with different projectionsYou can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections.
###Code
fig = plt.figure(figsize=(10,7))
# create a new Mollweide projection centered on the Pacific
projection = ccrs.Mollweide(central_longitude=-180)
# plot the result with cartoee using the Mollweide projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Mollweide projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new Goode homolosine projection centered on the Pacific
projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180)
# plot the result with cartoee using the Goode homolosine projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Goode homolosine projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new orographic projection focused on the Pacific
projection = ccrs.Orthographic(-130,-10)
# plot the result with cartoee using the orographic projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma',
orientation='vertical')
ax.set_title("Orographic projection")
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Warping artifactsOften times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole:
###Code
fig = plt.figure(figsize=(10,7))
# Create a new region to focus on
spole = [-180,-88,180,0]
projection = ccrs.SouthPolarStereo()
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not alway be the case). So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example:
###Code
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
# get bounding box coordinates of a zoom area
zoom = spole
zoom[-1] = -20
# convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects
zoom_extent = cartoee.bbox_to_extent(zoom)
# set the extent of the map to the zoom area
ax.set_extent(zoom_extent,ccrs.PlateCarree())
plt.show()
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
###Output
_____no_output_____
###Markdown
Working with projections in cartoee
###Code
import ee
from geemap import cartoee
import cartopy.crs as ccrs
%pylab inline
ee.Initialize()
###Output
_____no_output_____
###Markdown
Plotting an image on a mapHere we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018.
###Code
# get an earth engine image of ocean data for Jan-Mar 2018
ocean = (
ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI')
.filter(ee.Filter.date('2018-01-01', '2018-03-01'))
.median()
.select(["sst"],["SST"])
)
# set parameters for plotting
# will plot the Sea Surface Temp with specific range and colormap
visualization = {'bands':"SST",'min':-2,'max':30}
# specify region to focus on
bbox = [-180,-88,180,88]
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee using a PlateCarre projection (default)
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=bbox)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Mapping with different projectionsYou can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections.
###Code
fig = plt.figure(figsize=(10,7))
# create a new Mollweide projection centered on the Pacific
projection = ccrs.Mollweide(central_longitude=-180)
# plot the result with cartoee using the Mollweide projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Mollweide projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new Goode homolosine projection centered on the Pacific
projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180)
# plot the result with cartoee using the Goode homolosine projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Goode homolosine projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new orographic projection focused on the Pacific
projection = ccrs.Orthographic(-130,-10)
# plot the result with cartoee using the orographic projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma',
orientation='vertical')
ax.set_title("Orographic projection")
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Warping artifactsOften times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole:
###Code
fig = plt.figure(figsize=(10,7))
# Create a new region to focus on
spole = [-180,-88,180,0]
projection = ccrs.SouthPolarStereo()
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not always be the case). So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example:
###Code
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
# get bounding box coordinates of a zoom area
zoom = spole
zoom[-1] = -20
# convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects
zoom_extent = cartoee.bbox_to_extent(zoom)
# set the extent of the map to the zoom area
ax.set_extent(zoom_extent,ccrs.PlateCarree())
plt.show()
###Output
_____no_output_____
###Markdown
Uncomment the following line to install [geemap](https://geemap.org) if needed.
###Code
# !pip install geemap
###Output
_____no_output_____
###Markdown
Working with projections in cartoee
###Code
import ee
from geemap import cartoee
import cartopy.crs as ccrs
%pylab inline
ee.Initialize()
###Output
_____no_output_____
###Markdown
Plotting an image on a mapHere we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018.
###Code
# get an earth engine image of ocean data for Jan-Mar 2018
ocean = (
ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI')
.filter(ee.Filter.date('2018-01-01', '2018-03-01'))
.median()
.select(["sst"], ["SST"])
)
# set parameters for plotting
# will plot the Sea Surface Temp with specific range and colormap
visualization = {'bands': "SST", 'min': -2, 'max': 30}
# specify region to focus on
bbox = [-180, -88, 180, 88]
fig = plt.figure(figsize=(10, 7))
# plot the result with cartoee using a PlateCarre projection (default)
ax = cartoee.get_map(ocean, cmap='plasma', vis_params=visualization, region=bbox)
cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma')
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Mapping with different projectionsYou can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections.
###Code
fig = plt.figure(figsize=(10, 7))
# create a new Mollweide projection centered on the Pacific
projection = ccrs.Mollweide(central_longitude=-180)
# plot the result with cartoee using the Mollweide projection
ax = cartoee.get_map(
ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection
)
cb = cartoee.add_colorbar(
ax, vis_params=visualization, loc='bottom', cmap='plasma', orientation='horizontal'
)
ax.set_title("Mollweide projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10, 7))
# create a new Goode homolosine projection centered on the Pacific
projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180)
# plot the result with cartoee using the Goode homolosine projection
ax = cartoee.get_map(
ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection
)
cb = cartoee.add_colorbar(
ax, vis_params=visualization, loc='bottom', cmap='plasma', orientation='horizontal'
)
ax.set_title("Goode homolosine projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10, 7))
# create a new orographic projection focused on the Pacific
projection = ccrs.Orthographic(-130, -10)
# plot the result with cartoee using the orographic projection
ax = cartoee.get_map(
ocean, vis_params=visualization, region=bbox, cmap='plasma', proj=projection
)
cb = cartoee.add_colorbar(
ax, vis_params=visualization, loc='right', cmap='plasma', orientation='vertical'
)
ax.set_title("Orographic projection")
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Warping artifactsOften times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole:
###Code
fig = plt.figure(figsize=(10, 7))
# Create a new region to focus on
spole = [-180, -88, 180, 0]
projection = ccrs.SouthPolarStereo()
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(
ocean, cmap='plasma', vis_params=visualization, region=spole, proj=projection
)
cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not always be the case). So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example:
###Code
fig = plt.figure(figsize=(10, 7))
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(
ocean, cmap='plasma', vis_params=visualization, region=spole, proj=projection
)
cb = cartoee.add_colorbar(ax, vis_params=visualization, loc='right', cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
# get bounding box coordinates of a zoom area
zoom = spole
zoom[-1] = -20
# convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects
zoom_extent = cartoee.bbox_to_extent(zoom)
# set the extent of the map to the zoom area
ax.set_extent(zoom_extent, ccrs.PlateCarree())
plt.show()
###Output
_____no_output_____
###Markdown
Working with projections in cartoee
###Code
import ee
from geemap import cartoee
import cartopy.crs as ccrs
%pylab inline
ee.Initialize()
###Output
_____no_output_____
###Markdown
Plotting an image on a mapHere we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018.
###Code
# get an earth engine image of ocean data for Jan-Mar 2018
ocean = (
ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI')
.filter(ee.Filter.date('2018-01-01', '2018-03-01'))
.median()
.select(["sst"],["SST"])
)
# set parameters for plotting
# will plot the Sea Surface Temp with specific range and colormap
visualization = {'bands':"SST",'min':-2,'max':30}
# specify region to focus on
bbox = [-180,-88,180,88]
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee using a PlateCarre projection (default)
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=bbox)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Mapping with different projectionsYou can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections.
###Code
fig = plt.figure(figsize=(10,7))
# create a new Mollweide projection centered on the Pacific
projection = ccrs.Mollweide(central_longitude=-180)
# plot the result with cartoee using the Mollweide projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Mollweide projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new Goode homolosine projection centered on the Pacific
projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180)
# plot the result with cartoee using the Goode homolosine projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Goode homolosine projection")
ax.coastlines()
plt.show()
fig = plt.figure(figsize=(10,7))
# create a new orographic projection focused on the Pacific
projection = ccrs.Orthographic(-130,-10)
# plot the result with cartoee using the orographic projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma',
orientation='vertical')
ax.set_title("Orographic projection")
ax.coastlines()
plt.show()
###Output
_____no_output_____
###Markdown
Warping artifactsOften times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole:
###Code
fig = plt.figure(figsize=(10,7))
# Create a new region to focus on
spole = [-180,-88,180,0]
projection = ccrs.SouthPolarStereo()
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
plt.show()
###Output
_____no_output_____
###Markdown
As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not alway be the case). So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example:
###Code
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
# get bounding box coordinates of a zoom area
zoom = spole
zoom[-1] = -20
# convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects
zoom_extent = cartoee.bbox_to_extent(zoom)
# set the extent of the map to the zoom area
ax.set_extent(zoom_extent,ccrs.PlateCarree())
plt.show()
###Output
_____no_output_____ |
beginner/sympy_in_10_minutes.ipynb | ###Markdown
Tutorial - SymPy in 10 minutes IntroductionIn this tutorial we will learn the basics of [Jupyter](http://jupyter.org/) and [SymPy](https://es.wikipedia.org/wiki/SymPy). SymPy is a Python library for symbolic computation. It provides computer algebra capabilities either as a standalone application, as a library to other applications, or live on the web as [SymPy Live](http://live.sympy.org/) or [SymPy Gamma](http://www.sympygamma.com/). Sympy is similar to other CAS (Computer Algebra Software) like Mathematica, Maple or Maxima.A more complete tutorial can be found at [http://docs.sympy.org/latest/tutorial/index.html](http://docs.sympy.org/latest/tutorial/index.html).Before using SymPy we should load it, like any other Python libary. We will use```pythoninit_session()```to make some imports, this will help us in its interactive use. For scripting it would be better to do the imports diffferently, for example```pythonimport sympy as sym```and then call the functions from SymPy in the following manner```pythonx = sym.Symbols("x")expr = sym.cos(x)**2 + 3*xderiv = expr.diff(x)```where we computed the derivative of $\cos^2(x) + 3x$, that should be $-2\sin(x)\cos(x) + 3$.For further information on the Jupyter Notebook you can refer to the [User Manual](https://athena.brynmawr.edu/jupyter/hub/dblank/public/Jupyter%20Notebook%20Users%20Manual.ipynb).
###Code
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from sympy import *
init_session()
plt.style.use("seaborn-notebook")
plt.rcParams["figure.figsize"] = 6, 4
###Output
_____no_output_____
###Markdown
Let us start with some simple calculations. Below we have a _code cell_ with an addition. Place the cursor on it and press SHIFT + ENTER to evaluate it.
###Code
1 + 3
###Output
_____no_output_____
###Markdown
Let us make some calculations
###Code
factorial(5)
Out[6]* 10
1 // 3
1 / 3
S(1) / 3
###Output
_____no_output_____
###Markdown
We can evaluate an expression to its floating point version
###Code
sqrt(2*pi)
float(Out[11])
###Output
_____no_output_____
###Markdown
In the previous line we used the expression **Out[11]** that stores the output from the evaluation in cell 11 (In[11]). We can also store expressions as variables, just as any Python variable.
###Code
radius = 10
height = 100
area = pi * radius**2
volume = area * height
volume
float(volume)
###Output
_____no_output_____
###Markdown
So far, we have just used SymPy as a calculator. Let us try more advanced calculations Definite and indefinite integrals
###Code
integrate(sin(x), x)
integrate(sin(x), (x, 0, pi))
###Output
_____no_output_____
###Markdown
We can define a function and integrate it
###Code
f = lambda x: x**2 + 5
f(5)
integrate(f(z), z)
integrate(1/(x**2 + y), x)
###Output
_____no_output_____
###Markdown
If we assume that the denominator is positive, the expression can be factorized further
###Code
a = symbols("a", positive=True)
integrate(1/(x**2 + a), x)
###Output
_____no_output_____
###Markdown
We just learnt the basics, we can try some examples now.**Note:** If you want to know more about a specific function you can use ``help()`` or the IPython magic command ``??``
###Code
help(integrate)
integrate??
###Output
_____no_output_____
###Markdown
Examples Solving algebraic equationsRight now, there are two main options for solution of algebraic systems, namely: [``solveset`` and ``solve``](http://docs.sympy.org/latest/tutorial/solvers.html).The preferred method is ``solveset`` (see this[explanation](http://docs.sympy.org/latest/modules/solvers/solveset.html), althoughthere are systems that can be solved using ``solve`` and not ``solveset``.To solve equations using ``solveset``:
###Code
a, b, c = symbols("a b c")
solveset(a*x**2 + b*x + c, x)
###Output
_____no_output_____
###Markdown
We should enter the equations as equated to 0, or as an equation
###Code
solveset(Eq(a*x**2 + b*x, -c), x)
###Output
_____no_output_____
###Markdown
Currently solveset is not capable of solving the following types of equations:- Non-linear multivariate system- Equations solvable by LambertW (Transcendental equation solver).solve can be used for such cases:
###Code
solve([x*y - 1, x - 2], x, y)
solve(x*exp(x) - 1, x )
###Output
_____no_output_____
###Markdown
Linear AlgebraWe use ``Matrix`` to create matrices. Matrices can contain expressions. And we use the method ``.inv()`` to compute the inverse and ``*`` to multiply matrices.
###Code
A = Matrix([
[1, -1],
[1, sin(c)]
])
display(A)
B = A.inv()
display(B)
A * B
###Output
_____no_output_____
###Markdown
This should be the identity matrix, let us simplify the expression. There are several simplification functions, and ``simplify`` is the most general one. Simplifying is a complicated matter... if you are uncertain; use ``simplify``.
###Code
simplify(A * B)
###Output
_____no_output_____
###Markdown
PlottingWe can make 2D and 3D plots
###Code
from sympy.plotting import plot3d
plot(sin(x), (x, -pi, pi));
monkey_saddle = x**3 - 3*x*y**2
p = plot3d(monkey_saddle, (x, -2, 2), (y, -2, 2))
###Output
_____no_output_____
###Markdown
Derivatives and differential equationsWe can use the function ``diff`` or the method ``.diff()`` to compute derivatives.
###Code
f = lambda x: x**2
diff(f(x), x)
f(x).diff(x)
g = lambda x: sin(x)
diff(g(f(x)), x)
###Output
_____no_output_____
###Markdown
Yes, SymPy knows about the chain rule.To finish, let us solve a second order ODE$$ u''(t) + \omega^2 u(t) = 0$$
###Code
u = symbols("u", cls=Function)
omega = symbols("omega", positive=True)
ode = u(t).diff(t, 2) + omega**2 * u(t)
dsolve(ode, u(t))
###Output
_____no_output_____
###Markdown
Turning Sympy expression into evaluable functions``lambdify`` provides convenient functions to transform sympy expressions to lambda functions which can be used to calculate numerical values very fast.Let's try a first example
###Code
f = lambdify(x, x**2, "numpy")
f(3)
f(np.array([1, 2, 3]))
###Output
_____no_output_____
###Markdown
We can try a more difficult one
###Code
fun = diff(sin(x)*cos(x**3) - sin(x)/x, x)
fun
fun_numpy = lambdify(x, fun, "numpy")
###Output
_____no_output_____
###Markdown
and then evaluate it for some points in, let's say, $[0, 5]$
###Code
pts = np.linspace(0, 5, 1000)
fun_pts = fun_numpy(pts + 1e-6) # To avoid division by 0
plt.figure()
plt.plot(pts, fun_pts);
###Output
_____no_output_____
###Markdown
References- ลฝiga Lenarฤiฤ. ["10 minute (wx)Maxima tutorial:"](https://andrejv.github.io/wxmaxima/tutorials/10minute.zip), (2008). Accessed on: July 25, 2017.- Borland, David, and Russell M. Taylor II. ["Rainbow color map (still) considered harmful."](https://data3.mprog.nl/course/15%20Readings/40%20Reading%204/Borland_Rainbow_Color_Map.pdf) IEEE computer graphics and applications 27.2 (2007): 14-17. The following cell change the style of the notebook.
###Code
from IPython.core.display import HTML
def css_styling():
styles = open('../styles/sympy.css', 'r').read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
ykbp_model_comparison_to_hipparcos.ipynb | ###Markdown
Introduction [Yamaguchi et al 2017](http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1710.09839) estimated the number of binaries with a black hole and a stellar companion which will be detected by the [Gaia mission](http://sci.esa.int/gaia/). In this notebook we want to check whether their model reproduces the non detection of such binaries by the [hipparcos mission](http://sci.esa.int/hipparcos/). Loading libraries and setting up environment
###Code
import sympy
import numpy
sympy.init_printing()
###Output
_____no_output_____
###Markdown
Reproducing Paper Results As a first step, we try to reproduce the results in the paper, specifically, table 2. We start with the integrand of the huge integral in equation 15.
###Code
f_bin = sympy.Symbol('f_bin') # Binary fraction
Gamma_0 = sympy.Symbol('Gamma_0') # Slope of binary separation pdf
Phi = sympy.Symbol('Phi') # Mass ratio distribution
t_L1 = sympy.Symbol(r't_{L,1}') # Lifetime of the star that collapsed to a black hole
t_L2 = sympy.Symbol(r't_{L,2}') # Lifetime of the companion
A_bar = sympy.Symbol('\overline{A}') # Separation between stars
b = sympy.Symbol('b') # Viewing angle relative to galactic plane
D = sympy.Symbol('D') # Distance from Earth
rho_d = sympy.Symbol('rho_d') # Star formation number density
eqn_15_integrand_raw = (2*f_bin/(1-f_bin))*Gamma_0*Phi*(t_L2-t_L1)*(1/A_bar)*sympy.cos(b)*D**2*rho_d
eqn_15_integrand_raw
###Output
_____no_output_____
###Markdown
Spatial distribution of stars Bahcall Soniera star formation number density (equation 11)
###Code
Psi = sympy.Symbol('Psi')
rho_d0 = sympy.Symbol(r'\rho_{d,0}') # Normalisation factor
z = sympy.Symbol('z') # Distance from the galactic plane
h_z = sympy.Symbol('h_z', positive=True) # Scale height of the galactic disc
x = sympy.Symbol('x') # Distance from the rotational axis of the galaxy
r_0 = sympy.Symbol('r_0') # Distance between the sun and the galactic centre
h_r = sympy.Symbol('h_r', positive=True) # Scale radius of the galaxy
eqn_11 = sympy.Eq(rho_d,Psi*rho_d0*sympy.exp(-z/h_z-(x-r_0)/h_r))
eqn_11
###Output
_____no_output_____
###Markdown
Definition of the normalisation (equation 12)
###Code
x_max = sympy.Symbol(r'x_{\max}')
z_max = sympy.Symbol(r'z_{\max}')
temp = eqn_11.rhs/ Psi
temp = sympy.integrate(4*sympy.pi*x*temp,(z,0,z_max),(x,0,x_max)).simplify()
eqn_12 = sympy.Eq(rho_d0,sympy.solve(temp-1,rho_d0)[0])
eqn_12
###Output
_____no_output_____
###Markdown
Actually, the bounds $x_{\max}$ and $z_{\max}$ are not defined in the paper, so I assume $x_{\max}, z_{\max} \rightarrow \infty$
###Code
temp = eqn_11.rhs/Psi*4*sympy.pi*x
temp = sympy.integrate(temp, (z, 0, sympy.oo))
temp = sympy.integrate(temp, (x, 0, sympy.oo))
temp = sympy.solve(temp-1,rho_d0)[0]
eqn_12_var = sympy.Eq(rho_d0, temp)
eqn_12_var
###Output
_____no_output_____
###Markdown
Geocentric coordinates
###Code
l = sympy.Symbol('l') # Viewing angle relative to the rotation axis of the galaxy
eqn_13 = sympy.Eq(x,sympy.sqrt(r_0**2+D**2*sympy.cos(b)**2-2*D*r_0*sympy.cos(b)*sympy.cos(l)))
eqn_13
eqn_14 = sympy.Eq(z, D*sympy.sin(b))
eqn_14
###Output
_____no_output_____
###Markdown
Initial mass function
###Code
Psi_1a = sympy.Symbol(r'\Psi_{1,a}') # Coefficient
Psi_1b = sympy.Symbol(r'\Psi_{1,b}') # Coefficient
M_sol = sympy.Symbol('M_{\odot}')
M_1_bar = sympy.Symbol(r'\overline{M}_1')
m_1_bar = sympy.Symbol(r'\overline{m}_1') # M_1_bar divided by a solar mass
temp = sympy.Piecewise((Psi_1a*m_1_bar**(-1.3), sympy.And(m_1_bar < 0.5, m_1_bar>0.08)),
(Psi_1b*m_1_bar**(-2.3), sympy.And(m_1_bar>0.5, m_1_bar<100)),
(0,True))
eqn_1 = sympy.Eq(Psi, temp)
eqn_1
Psi_2a = sympy.Symbol(r'\Psi_{2,a}') # Coefficient
Psi_2b = sympy.Symbol(r'\Psi_{2,b}') # Coefficient
Psi_2c = sympy.Symbol(r'\Psi_{2,c}') # Coefficient
temp = sympy.Piecewise((Psi_2a*m_1_bar**(-1.3), sympy.And(m_1_bar < 0.5, m_1_bar>0.08)),
(Psi_2b*m_1_bar**(-2.2), sympy.And(m_1_bar>0.5, m_1_bar<1)),
(Psi_2c*m_1_bar**(-2.7), sympy.And(m_1_bar>1.0, m_1_bar<100)),
(0,True))
eqn_2 = sympy.Eq(Psi,temp)
eqn_2
###Output
_____no_output_____
###Markdown
The coefficients can be determined by continuity and normalisation condition
###Code
cond_1 = sympy.Eq(eqn_1.rhs.subs(m_1_bar,0.5-1e-6),eqn_1.rhs.subs(m_1_bar,0.5+1e-6))
cond_1
Upsilon = sympy.Symbol('Upsilon') # Star formation rate in the entire galaxy, roughly 3.5 M_sol/year
temp = sympy.integrate(eqn_1.rhs*m_1_bar,m_1_bar)
temp = ((temp.subs(m_1_bar, 0.5-1e-6) - temp.subs(m_1_bar, 0.08+1e-6))+
(temp.subs(m_1_bar, 100-1e-6) - temp.subs(m_1_bar, 0.5+1e-6)))
cond_2 = sympy.Eq(temp, Upsilon)
cond_2
eqn_1_coefficients = sympy.solve([cond_1, cond_2], [Psi_1a, Psi_1b])
eqn_1_coefficients
cond_3 = sympy.Eq(eqn_2.rhs.subs(m_1_bar,0.5-1e-6), eqn_2.rhs.subs(m_1_bar, 0.5+1e-6))
cond_3
cond_4 = sympy.Eq(eqn_2.rhs.subs(m_1_bar,1-1e-6), eqn_2.rhs.subs(m_1_bar, 1+1e-6))
cond_4
temp = sympy.integrate(eqn_2.rhs*m_1_bar, m_1_bar)
temp = ((temp.subs(m_1_bar,0.5-1e-6) - temp.subs(m_1_bar,0.08+1e-6))+
(temp.subs(m_1_bar,1.0-1e-6) - temp.subs(m_1_bar,0.5+1e-6))+
(temp.subs(m_1_bar,100-1e-6) - temp.subs(m_1_bar,1+1e-6)))
cond_5 = sympy.Eq(temp, Upsilon)
cond_5
eqn_2_coefficients = sympy.solve([cond_3, cond_4, cond_5], [Psi_2a, Psi_2b, Psi_2c])
eqn_2_coefficients
###Output
_____no_output_____
###Markdown
Distribution of mass ratios
###Code
M_min_bar = sympy.Symbol(r'\overline{M}_{\min}')
eqn_3 = sympy.Eq(Phi, 1/(1-M_min_bar/M_1_bar))
eqn_3
###Output
_____no_output_____
###Markdown
Distribution of semi major axes Minimum semimajor axis
###Code
A_bar_min = sympy.Symbol(r'\overline{A}_{\min}')
q = sympy.Symbol('q', positive=True) # Mass ratio
R_1 = sympy.Symbol('R_1') # Radios of primary
eqn_7 = sympy.Eq(A_bar_min,
(0.6*q**sympy.Rational(-2,3)+sympy.log(1+q**sympy.Rational(-1,3)))*R_1/(0.49*q**sympy.Rational(-2,3)))
eqn_7
###Output
_____no_output_____
###Markdown
Mass raius relation, from [Demircan and Kahraman 1991](http://adsabs.harvard.edu/abs/1991Ap%26SS.181..313D)
###Code
R_sol = sympy.Symbol('R_{\odot}') # Solar radius
mass_radius_relation = 1.61*m_1_bar**0.83*R_sol
###Output
_____no_output_____
###Markdown
Determination of semimajor axes distribution
###Code
A_bar_max = sympy.Symbol(r'\overline{A}_{\max}')
temp = Gamma_0/A_bar
temp = sympy.integrate(temp, (A_bar, A_bar_min, A_bar_max))
temp = temp.subs(A_bar_max, 1e5*R_sol).subs(eqn_7.lhs, eqn_7.rhs).subs(R_1, mass_radius_relation)
Gamma_0_expr = sympy.solve(temp-1,Gamma_0)[0]
Gamma_0_expr
###Output
_____no_output_____
###Markdown
Lifetimes
###Code
tau_r = sympy.Symbol('tau_r') # Reference lifetime (10 Gy)
lifetime_mass_relation = tau_r*m_1_bar**(-2.5)
lifetime_mass_relation
###Output
_____no_output_____
###Markdown
Putting it all together Carrying out the integral in equation 15
###Code
solar_radius_in_pc = 2.25e-8
D_max = sympy.Symbol(r'D_{\max}')
temp = eqn_15_integrand_raw.subs(eqn_11.lhs, eqn_11.rhs).subs(eqn_12_var.lhs, eqn_12_var.rhs)
temp = temp.subs(eqn_13.lhs, eqn_13.rhs)
temp = temp.subs(eqn_14.lhs, eqn_14.rhs)
temp = temp.subs(eqn_3.lhs, eqn_3.rhs)
temp = temp.subs(Gamma_0, Gamma_0_expr)
temp = temp.subs(Psi, eqn_1.rhs.args[1][0])
temp = temp.subs(eqn_1_coefficients)
temp = temp.subs(t_L1,lifetime_mass_relation)
temp = temp.subs(t_L2,lifetime_mass_relation.subs(m_1_bar,m_1_bar*q))
temp = temp.subs(M_min_bar, 0.08*M_sol)
temp = temp.subs(M_1_bar, m_1_bar*M_sol)
temp = temp.subs(h_z, 250)
temp = temp.subs(h_r, 3500)
temp = temp.subs(r_0, 8000)
temp = temp.subs(b, 1)
temp = temp.subs(l, sympy.Rational(1,2))
temp = temp.subs(D, 100)
temp = temp.subs(f_bin, 0.5)
temp = temp.subs(q, 0.1)
temp = temp.subs(R_sol, solar_radius_in_pc)
temp = temp.subs(m_1_bar, 30)
temp.n()
###Output
_____no_output_____
###Markdown
Step by Step Integration In this section we break up the integral in equation 15 into different categories, and consider each category separately to obtain insight to the problem. We begin by eliminating the units. From the star formation rate $3.5 \, M_{\odot} / \rm year$, a typical timescale $\tau_r = 10 \, \rm Gyr$ and a typical mass $M_{\odot}$ we obtain the total number of stars $3.5 \cdot 10^{10}$. From this point, each integral filters out the irrelevant stars and reduces this number, Primary massIn this section we filter out all primaries below the necessary mass needed to collapse to a black hole
###Code
def integrand_func_mk_1(m1):
if m1<20:
return 0
return 3.5e10*0.22*m1**(-2.3)
m_range = numpy.linspace(20,100) # Primary mass range
dm = m_range[1] - m_range[0]
numpy.sum([integrand_func_mk_1(m1) for m1 in m_range])*dm
###Output
_____no_output_____
###Markdown
Mass ratio We filter out large companion masses because their lifetime is too short, and low masses because they are too dim
###Code
def integrand_func_mk_2(m1, q):
m2min = 0.4
if m2min/m1>q:
return 0
return integrand_func_mk_1(m1)/(1-m2min/m1)*m1**-2.5*(q**-2.5-1)
m_range = numpy.linspace(20,100) # Range of primary masses
q_range = numpy.linspace(0, 1) # Range of mass ratios
dm = m_range[1] - m_range[0] # Mass interval
dq = q_range[1] - q_range[0] # Mass ratio interval
numpy.sum([[integrand_func_mk_2(m1, q) for m1 in m_range] for q in q_range])*dm*dq
###Output
_____no_output_____
###Markdown
Semi major axis
###Code
def integrand_func_mk_3(m1,q,lnA):
A_max_val = 1e5
D_ref = 4e10 # Reference distance, 1 kpc in solar radii
uas = 5e-12 # micro arcsecond in radius
gaia_angular_res = 300*uas # Gaia angular sensitivity
A_min_val = 0.66*m1**0.83*(3.0+5*q**0.67*numpy.log(1+q**0.33))
A_period = 2000 # http://www.wolframalpha.com/input/?i=((solar+mass)*(gravitation+constant)*(5+year)%5E2)%5E(1%2F3)%2F(solar+radius)
if lnA>numpy.log10(max(A_period, A_max_val)) or lnA<numpy.log10(A_min_val) or lnA<numpy.log10(gaia_angular_res)+numpy.log10(D_ref):
return 0
G0_val = 1.0/numpy.log10(A_max_val/A_min_val)
return integrand_func_mk_2(m1,q)*G0_val
m_range = numpy.linspace(20,100,10) # Range of primary masses
q_range = numpy.linspace(0, 1, 10) # Range of mass ratios
lnA_range = numpy.linspace(-1, 5, 10) # Rand of semi - major axis
dm = m_range[1] - m_range[0] # Mass interval
dq = q_range[1] - q_range[0] # Mass ratio interval
dlnA = lnA_range[1] - lnA_range[0] # semi major axis interval
numpy.sum([[[integrand_func_mk_3(m1, q, lnA) for m1 in m_range]
for q in q_range]
for lnA in lnA_range])*dm*dq*dlnA
###Output
_____no_output_____ |
doc/app/evo-model-with-tree.ipynb | ###Markdown
Apply a non-stationary nucleotide model to an alignment with a treeWe analyse an alignment with sequences from 6 primates.
###Code
from cogent3.app import io
reader = io.load_aligned(format="fasta", moltype="dna")
aln = reader("../data/primate_brca1.fasta")
aln.names
###Output
_____no_output_____
###Markdown
Specify the tree via a tree instance
###Code
from cogent3 import load_tree
from cogent3.app import evo
tree = load_tree("../data/primate_brca1.tree")
gn = evo.model("GN", tree=tree)
gn
###Output
_____no_output_____
###Markdown
Specify the tree via a path.
###Code
gn = evo.model("GN", tree="../data/primate_brca1.tree")
gn
###Output
_____no_output_____
###Markdown
Apply the model to an alignment
###Code
fitted = gn(aln)
fitted
###Output
_____no_output_____
###Markdown
In the above, no value is shown for `unique_Q`. This can happen because of numerical precision issues.**NOTE:** in the display of the `lf` below, the "length" parameter is not the ENS. It is, instead, just a scalar.
###Code
fitted.lf
###Output
_____no_output_____
###Markdown
Apply a non-stationary nucleotide model to an alignment with a treeWe analyse an alignment with sequences from 6 primates.
###Code
from cogent3.app import io
reader = io.load_aligned(format="fasta", moltype="dna")
aln = reader("../data/primate_brca1.fasta")
aln.names
###Output
_____no_output_____
###Markdown
Specify the tree via a tree instance
###Code
from cogent3 import load_tree
from cogent3.app import evo
tree = load_tree("../data/primate_brca1.tree")
gn = evo.model("GN", tree=tree)
gn
###Output
_____no_output_____
###Markdown
Specify the tree via a path.
###Code
gn = evo.model("GN", tree="../data/primate_brca1.tree")
gn
###Output
_____no_output_____
###Markdown
Apply the model to an alignment
###Code
fitted = gn(aln)
fitted
###Output
_____no_output_____
###Markdown
In the above, no value is shown for `unique_Q`. This can happen because of numerical precision issues.**NOTE:** in the display of the `lf` below, the "length" parameter is not the ENS. It is, instead, just a scalar.
###Code
fitted.lf
###Output
_____no_output_____
###Markdown
Apply a non-stationary nucleotide model to an alignment with a treeWe analyse an alignment with sequences from 6 primates.
###Code
from cogent3.app import io
reader = io.load_aligned(format="fasta", moltype="dna")
aln = reader("../data/primate_brca1.fasta")
aln.names
###Output
_____no_output_____
###Markdown
Specify the tree via a tree instance
###Code
from cogent3 import load_tree
from cogent3.app import evo
tree = load_tree("../data/primate_brca1.tree")
gn = evo.model("GN", tree=tree)
gn
###Output
_____no_output_____
###Markdown
Specify the tree via a path.
###Code
gn = evo.model("GN", tree="../data/primate_brca1.tree")
gn
###Output
_____no_output_____
###Markdown
Apply the model to an alignment
###Code
fitted = gn(aln)
fitted
###Output
_____no_output_____
###Markdown
In the above, no value is shown for `unique_Q`. This can happen because of numerical precision issues.**NOTE:** in the display of the `lf` below, the "length" parameter is not the ENS. It is, instead, just a scalar.
###Code
fitted.lf
###Output
_____no_output_____ |
bacteria_archaea/marine/cell_num/.ipynb_checkpoints/marine_prokaryote_cell_number-checkpoint.ipynb | ###Markdown
Estimating the total number of marine bacteria and archaea This notebook details the procedure for estimating the total number of marine bacteria and archaea.The estimate is based on three data sources:[Aristegui et al.](http://dx.doi.org/10.4319/lo.2009.54.5.1501),[Buitenhuis et al.](http://dx.doi.org/10.5194/essd-4-101-2012), and[Lloyd et al.](http://dx.doi.org/10.1128/AEM.02090-13)
###Code
import pandas as pd
import numpy as np
from scipy.stats import gmean
pd.options.display.float_format = '{:,.1e}'.format
import sys
sys.path.insert(0, '../../../statistics_helper')
from CI_helper import *
# Genaral parameters used in the estimate
ocean_area = 3.6e14
liters_in_m3 = 1e3
ml_in_m3 = 1e6
# Load the datasets
buitenhuis = pd.read_excel('marine_prok_cell_num_data.xlsx','Buitenhuis')
aristegui = pd.read_excel('marine_prok_cell_num_data.xlsx','Aristegui')
aristegui[['Cell abundance (cells m-2)','SE']] = aristegui[['Cell abundance (cells m-2)','SE']].astype(float)
lloyd = pd.read_excel('marine_prok_cell_num_data.xlsx','Lloyd')
###Output
_____no_output_____
###Markdown
Here are samples from the data in Aristegui et al.:
###Code
aristegui.head()
###Output
_____no_output_____
###Markdown
From the data in Buitenhuis et al.:
###Code
buitenhuis.head()
###Output
_____no_output_____
###Markdown
And from Llyod et al.:
###Code
lloyd.head()
###Output
_____no_output_____
###Markdown
For Aristegui et al. we estimate the total number of cells by multiplying each layer by the surface area of the ocean
###Code
aristegui_total = (aristegui['Cell abundance (cells m-2)']*ocean_area).sum()
print('Total number of cells based on Aristegui et al.: %.1e' % aristegui_total)
###Output
Total number of cells based on Aristegui et al.: 1.7e+29
###Markdown
For Buitenhuis et al. we bin the data along 100 meter depth bins, and estimate the average cell abundance in each bin. We then multiply the total number of cells per liter by the volume at each depth and sum across layers.
###Code
# Define depth range every 100 m from 0 to 4000 meters
depth_range = np.linspace(0,4000,41)
#Bin data along depth bins
buitenhuis['Depth_bin'] = pd.cut(buitenhuis['Depth'], depth_range)
#For each bin, calculate the average number of cells per liter
buitenhuis_bins = buitenhuis.groupby(['Depth_bin']).mean()['Bact/L']
#Multiply each average concentration by the total volume at each bin: 100 meters depth time the surface area of the oceac
buitenhuis_bins *= 100*ocean_area*liters_in_m3
#Sum across all bins to get the total estimate for the number of cells of marine prokaryotes
buitenhuis_total = buitenhuis_bins.sum()
print('Total number of cells based on Buitenhuis et al.: %.1e' % buitenhuis_total)
###Output
Total number of cells based on Buitenhuis et al.: 1.3e+29
###Markdown
For Lloyd et al., we rely on the sum of the total number of bacteria and archaea. The estimate for the number of bacteria and archaea is based on the regression of the concentration of bacteria and archaea with depth. We use the equations reported in Lloyd et al. to extrapolate the number of cells of bacteria and archaea across the average ocean depth of 4000 km.
###Code
# Define the regression equation for the number of bacteria in the top 64 m:
def bac_surf(depth):
result = np.zeros_like(depth)
for i,x in enumerate(depth):
if x==0 :
result[i] = 5.54
else:
result[i] = np.log10(x)*0.08+5.54
return 10**result
# Define the regression equation for the number of bacteria in water deeper than 64 m:
bac_deep = lambda x: 10**(np.log10(x)*-1.09+7.66)
# Define the regression equation for the number of bacteria in the top 389 m:
def arch_surf(depth):
result = np.zeros_like(depth)
for i,x in enumerate(depth):
if x==0 :
result[i] = 4.1
else:
result[i] = np.log10(x)*0.1+4.1
return 10**result
# Define the regression equation for the number of bacteria in water below 389 m:
arch_deep = lambda x: 10**(np.log10(x)*-0.8+6.43)
# Estimate the total number of bacteria in the top 64 m by first estimating the concentration using the
# regression equation, multiplying by the volume at each depth, which is 1 m^3 times the surface
# Area of the ocean, and finally summing across different depths
total_bac_surf = (bac_surf(np.linspace(0,64,65))*ml_in_m3*ocean_area).sum()
# We repeat the same procedure for the total number of bacteria in waters deeper than 64 m, and for the total
# Number of archaea
total_bac_deep = (bac_deep(np.linspace(65,4000,4000-65+1))*ml_in_m3*ocean_area).sum()
total_arch_surf = (arch_surf(np.linspace(0,389,390))*ml_in_m3*ocean_area).sum()
total_arch_deep = (arch_deep(np.linspace(390,4000,4000-390+1))*ml_in_m3*ocean_area).sum()
# Sum across bacteria and archaea to get the estimate for the total number of bacteria and archaea in the ocean
lloyd_total = total_bac_surf+total_bac_deep+total_arch_surf+total_arch_deep
print('Total number of cells based on Lloyd et al.: %.1e' % lloyd_total)
###Output
Total number of cells based on Lloyd et al.: 6.2e+28
###Markdown
The estimate of the total number of cells in Lloyd et al. is based on FISH measurements, but in general not all cells which are DAPI positive are also stained with FISH. To correct for this effect, we estimate the average FISH yield across samples, and divide our estimate from the FISH measurements by the average FISH yield.
###Code
fish_yield = lloyd['FISH yield'].dropna()
# Values which are not feasible are turned to the maximal value. We do not use 1 because of numerical reasons
fish_yield[fish_yield >=1] = 0.999
# calculate the statistics on the fish_visible/fish_invisible value and not the
# fish_visible/(fish_visible+fish_invisible) value because the first is not bound by 0 and 1
# We transform the values to log space to calculate the geometric mean
alpha_fish_yield = np.log10(1./(1./fish_yield[fish_yield<1]-1.))
mean_alpha_yield = np.average(-alpha_fish_yield.dropna())
mean_yield = 1./(1.+10**mean_alpha_yield)
print('The mean yield of FISH is %.1f' % mean_yield)
lloyd_total /= mean_yield
print('After correcting for FISH yield, the estimate for the total number of bacteria and archaea based on Lloyd et al is %.1e' % lloyd_total)
###Output
The mean yield of FISH is 0.8
After correcting for FISH yield, the estimate for the total number of bacteria and archaea based on Lloyd et al is 8.1e+28
###Markdown
Our best estimate for the total number of marine bacteria and archaea is the geometric mean of the estimates from Aristegui et al., Buitenhuis et al. and Lloyd et al.
###Code
estimates = [aristegui_total,buitenhuis_total,lloyd_total]
best_estimate = 10**(np.log10(estimates).mean())
print('Our best estimate for the total number of marine bacteria and archaea is %.1e' %best_estimate)
###Output
Our best estimate for the total number of marine bacteria and archaea is 1.2e+29
###Markdown
Uncertainty analysisTo calculate the uncertainty associated with the estimate for the total number of of bacteria and archaea, we first collect all available uncertainties and then take the largest value as our best projection for the uncertainty. Intra-study uncertainties We first survey the uncertainties reported in each of the studies. Aristegui et al. report a sandard error of โ10% for the average cell concentration per unit area. Buitenhuis et al. and Lloyd et al. do not report uncertainties. Interstudy uncertaintiesWe estimate the 95% multiplicative error of the geometric mean of the values from the three studies.
###Code
mul_CI = geo_CI_calc(estimates)
print('The interstudy uncertainty is about %.1f' % mul_CI)
###Output
The interstudy uncertainty is about 1.5
###Markdown
We thus take the highest uncertainty from our collection which is โ1.4-fold.Our final parameters are:
###Code
print('Total number of marine bacteria and archaea: %.1e' % best_estimate)
print('Uncertainty associated with the total number of marine bacteria and archaea: %.1f-fold' % mul_CI)
old_results = pd.read_excel('../marine_prok_biomass_estimate.xlsx')
result = old_results.copy()
result.loc[0] = pd.Series({
'Parameter': 'Total number of marine bacteria and archaea',
'Value': int(best_estimate),
'Units': 'Cells',
'Uncertainty': "{0:.1f}".format(mul_CI)
})
result.to_excel('../marine_prok_biomass_estimate.xlsx',index=False)
###Output
Total number of marine bacteria and archaea: 1.2e+29
Uncertainty associated with the total number of marine bacteria and archaea: 1.5-fold
|
Introduction to Tensorflow for AI ML and DL/Week 2/Exercise2_Question.ipynb | ###Markdown
Exercise 2In the course you learned how to do classification using Fashion MNIST, a data set containing items of clothing. There's another, similar dataset called MNIST which has items of handwriting -- the digits 0 through 9.Write an MNIST classifier that trains to 99% accuracy or above, and does it without a fixed number of epochs -- i.e. you should stop training once you reach that level of accuracy.Some notes:1. It should succeed in less than 10 epochs, so it is okay to change epochs to 10, but nothing larger2. When it reaches 99% or greater it should print out the string "Reached 99% accuracy so cancelling training!"3. If you add any additional variables, make sure you use the same names as the ones used in the classI've started the code for you below -- how would you finish it?
###Code
# YOUR CODE SHOULD START HERE
# YOUR CODE SHOULD END HERE
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
# YOUR CODE SHOULD START HERE
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') >= 0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
x_train = x_train / 255.0
x_test = x_test / 255.0
callbacks = myCallback()
# YOUR CODE SHOULD END HERE
model = tf.keras.models.Sequential([
# YOUR CODE SHOULD START HERE
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
# YOUR CODE SHOULD END HERE
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# YOUR CODE SHOULD START HERE
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
# YOUR CODE SHOULD END HERE
###Output
_____no_output_____ |
2_Curso/Laboratorio/SAGE-noteb/IPYNB/CRIPT/82-CRIPT-cesar.ipynb | ###Markdown
Cifra de César
###Code
def encriptar_cesar(C,k):
L = list(C)
L1 = map(ord,L)
L2 = [(item+k)%256 for item in L1]
C1 = join(map(chr,L2),sep = "")
return C1
encriptar_cesar(texto,5)
encriptar_cesar(encriptar_cesar(texto,5),251)
encriptar_cesar(limpiar(texto,alfb),5)
encriptar_cesar(encriptar_cesar(limpiar(texto,alfb),5),251)
###Output
_____no_output_____
###Markdown
Análisis de frecuencias
###Code
def analisis_frec(T):
frecuencias = {}
N = len(T)
for letra in T:
if letra in frecuencias:
frecuencias[letra] += (1/N).n()
else:
frecuencias[letra]=(1/N).n()
return frecuencias
dicc = analisis_frec(encriptar_cesar(limpiar(texto,alfb),5));dicc
def invertir(dicc):
dict_inv = {}
for key in dicc:
dict_inv[dicc[key]] = ord(key)
return dict_inv
dicc2 = invertir(dicc);dicc2
L = dicc2.items();L
L.sort(reverse = True);L
ord('E')
###Output
_____no_output_____ |
notebook/ontune-neural.ipynb | ###Markdown
ํ๋ฃจ
###Code
df = pd.read_csv(
"../input/10min/ontune2016.csv", usecols=["date", "value"], parse_dates=["date"]
)
print(df.shape)
df.head()
df.rename(columns={"date": "ds", "value": "y"}, inplace=True)
df.tail()
train = df.iloc[:int(df.shape[0] * 0.8)]
valid = df.iloc[int(df.shape[0] * 0.2):]
valid["days"] = valid["ds"].dt.date
valid = valid.groupby("days")["y"].agg("mean")
def objective(trial: Trial) -> float:
params = {
"epochs": trial.suggest_categorical("epochs", [50, 100, 300, 500]),
"batch_size": trial.suggest_categorical("batch_size", [32, 64, 128, 256]),
"num_hidden_layers": trial.suggest_int("num_hidden_layers", 0, 5),
"learning_rate": trial.suggest_float("learning_rate", 1e-3, 0.1),
"changepoints_range": trial.suggest_discrete_uniform(
"changepoints_range", 0.8, 0.95, 0.001
),
"n_changepoints": trial.suggest_int("n_changepoints", 20, 35),
"seasonality_mode": "additive",
"yearly_seasonality": False,
"weekly_seasonality": True,
"daily_seasonality": True,
"loss_func": "MSE",
}
# fit_model
m = NeuralProphet(**params)
m.fit(train, freq="1D")
future = m.make_future_dataframe(train, periods=len(valid), n_historic_predictions=True)
forecast = m.predict(future)
valid_forecast = forecast[forecast.y.isna()]
val_rmse = mean_squared_error(valid_forecast.yhat1, valid, squared=False)
return val_rmse
study = optuna.create_study(direction="minimize", sampler=optuna.samplers.TPESampler(seed=42))
study.optimize(objective, n_trials=20)
prophet_params = study.best_params
prophet_params["batch_size"] = 64
prophet_params["seasonality_mode"] = "additive"
prophet_params["loss_func"] = "MSE"
prophet_params["weekly_seasonality"] = True
prophet_params["daily_seasonality"] = True
prophet_params["yearly_seasonality"] = False
# model = NeuralProphet() if you're using default variables below.
model = NeuralProphet(**prophet_params)
metrics = model.fit(train, freq="1D")
future = model.make_future_dataframe(train, periods=len(valid), n_historic_predictions=True)
forecast = model.predict(future)
fig, ax = plt.subplots(figsize=(14, 10))
model.plot(forecast, ax=ax)
plt.show()
# model = NeuralProphet() if you're using default variables below.
model = NeuralProphet(**prophet_params)
metrics = model.fit(df, freq="1D")
future = model.make_future_dataframe(df, periods=144, n_historic_predictions=True)
forecast = model.predict(future)
fig, ax = plt.subplots(figsize=(14, 10))
model.plot(forecast, ax=ax)
plt.show()
###Output
_____no_output_____ |
study_roadmaps/4_image_classification_zoo/Classifier - Monkey Species.ipynb | ###Markdown
Table of contents Install Monk Using pretrained model for classifying Monkey specie types Training a classifier from scratch Install Monk Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` Install Monk Manually (Not recommended) Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` Used trained classifier for demo
###Code
#Using mxnet-gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
# Download trained weights
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1E6WQFx9OVgbIQl2AkrjrLVZ--J7KYMRa' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1E6WQFx9OVgbIQl2AkrjrLVZ--J7KYMRa" -O cls_monkey_trained.zip && rm -rf /tmp/cookies.txt
! unzip -qq cls_monkey_trained.zip
ls workspace/Project-Monkey-Species/
# Load project in inference mode
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet161", eval_infer=True);
#Other trained models - uncomment
#gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169", eval_infer=True);
#gtf.Prototype("Project-Monkey-Species", "Gluon-densenet201", eval_infer=True);
# Infer
img_name = "workspace/test/1.jpg"
predictions = gtf.Infer(img_name=img_name);
from IPython.display import Image
Image(filename=img_name)
img_name = "workspace/test/2.jpg"
predictions = gtf.Infer(img_name=img_name);
from IPython.display import Image
Image(filename=img_name)
###Output
Prediction
Image name: workspace/test/2.jpg
Predicted class: n1
Predicted score: 0.9997491836547852
###Markdown
Training custom classifier from scratch Dataset - Credits: https://www.kaggle.com/slothkong/10-monkey-species Download
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1AgjZP8UGCabVgyw5GcFUF8ZhR9yADYjd' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1AgjZP8UGCabVgyw5GcFUF8ZhR9yADYjd" -O 10-monkey-species.zip && rm -rf /tmp/cookies.txt
! mkdir dataset
! mv 10-monkey-species.zip dataset
! cd dataset && unzip -qq 10-monkey-species.zip
###Output
_____no_output_____
###Markdown
Training
###Code
# Using mxnet-gluon backend
from monk.gluon_prototype import prototype
# For pytorch backend
#from monk.pytorch_prototype import prototype
# For Keras backend
#from monk.keras_prototype import prototype
# Create Project and Experiment
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169");
gtf.Default(dataset_path="dataset/training/training",
model_name="densenet169",
freeze_base_network=False,
num_epochs=2);
###Output
_____no_output_____
###Markdown
How to change hyper parameters and models - Docs - https://github.com/Tessellate-Imaging/monk_v14 - Examples - https://github.com/Tessellate-Imaging/monk_v1/tree/master/study_roadmaps/1_getting_started_roadmap
###Code
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____
###Markdown
Validating on the same dataset
###Code
# Using mxnet-gluon backend
from monk.gluon_prototype import prototype
# For pytorch backend
#from monk.pytorch_prototype import prototype
# For Keras backend
#from monk.keras_prototype import prototype
# Create Project and Experiment
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169", eval_infer=True);
# Load dataset for validaion
gtf.Dataset_Params(dataset_path="dataset/validation/validation");
gtf.Dataset();
# Run validation
accuracy, class_based_accuracy = gtf.Evaluate();
###Output
_____no_output_____
###Markdown
Table of contents Install Monk Using pretrained model for classifying Monkey specie types Training a classifier from scratch Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt (Select the requirements file as per OS and CUDA version)
###Code
! git clone https://github.com/Tessellate-Imaging/monk_v1.git
# If using Colab install using the commands below
! cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
#! cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
#! cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Used trained classifier for demo
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
# Download trained weights
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1E6WQFx9OVgbIQl2AkrjrLVZ--J7KYMRa' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1E6WQFx9OVgbIQl2AkrjrLVZ--J7KYMRa" -O cls_monkey_trained.zip && rm -rf /tmp/cookies.txt
! unzip -qq cls_monkey_trained.zip
ls workspace/Project-Monkey-Species/
# Gluon project
from gluon_prototype import prototype
# Load project in inference mode
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet161", eval_infer=True);
#Other trained models - uncomment
#gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169", eval_infer=True);
#gtf.Prototype("Project-Monkey-Species", "Gluon-densenet201", eval_infer=True);
# Infer
img_name = "workspace/test/1.jpg"
predictions = gtf.Infer(img_name=img_name);
from IPython.display import Image
Image(filename=img_name)
img_name = "workspace/test/2.jpg"
predictions = gtf.Infer(img_name=img_name);
from IPython.display import Image
Image(filename=img_name)
###Output
_____no_output_____
###Markdown
Training custom classifier from scratch Dataset - Credits: https://www.kaggle.com/slothkong/10-monkey-species Download
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1AgjZP8UGCabVgyw5GcFUF8ZhR9yADYjd' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1AgjZP8UGCabVgyw5GcFUF8ZhR9yADYjd" -O 10-monkey-species.zip && rm -rf /tmp/cookies.txt
! mkdir dataset
! mv 10-monkey-species.zip dataset
! cd dataset && unzip -qq 10-monkey-species.zip
###Output
_____no_output_____
###Markdown
Training
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
# Using mxnet-gluon backend
from gluon_prototype import prototype
# For pytorch backend
#from pytorch_prototype import prototype
# For Keras backend
#from keras_prototype import prototype
# Create Project and Experiment
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169");
gtf.Default(dataset_path="dataset/training/training",
model_name="densenet169",
freeze_base_network=False,
num_epochs=2);
###Output
_____no_output_____
###Markdown
How to change hyper parameters and models - Docs - https://github.com/Tessellate-Imaging/monk_v14 - Examples - https://github.com/Tessellate-Imaging/monk_v1/tree/master/study_roadmaps/1_getting_started_roadmap
###Code
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____
###Markdown
Validating on the same dataset
###Code
# Import monk
import os
import sys
sys.path.append("monk_v1/monk/");
# Using mxnet-gluon backend
from gluon_prototype import prototype
# For pytorch backend
#from pytorch_prototype import prototype
# For Keras backend
#from keras_prototype import prototype
# Create Project and Experiment
gtf = prototype(verbose=1);
gtf.Prototype("Project-Monkey-Species", "Gluon-densenet169", eval_infer=True);
# Load dataset for validaion
gtf.Dataset_Params(dataset_path="dataset/validation/validation");
gtf.Dataset();
# Run validation
accuracy, class_based_accuracy = gtf.Evaluate();
###Output
_____no_output_____ |
notebooks/plot_OT_2D_samples.ipynb | ###Markdown
2D Optimal transport between empirical distributionsIllustration of 2D optimal transport between discributions that are weightedsum of diracs. The OT matrix is plotted with the samples.
###Code
# Author: Remi Flamary <[email protected]>
# Kilian Fatras <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
###Output
_____no_output_____
###Markdown
Generate data-------------
###Code
#%% parameters and data generation
n = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s)
xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)
a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples
# loss matrix
M = ot.dist(xs, xt)
M /= M.max()
###Output
_____no_output_____
###Markdown
Plot data---------
###Code
#%% plot samples
pl.figure(1)
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('Source and target distributions')
pl.figure(2)
pl.imshow(M, interpolation='nearest')
pl.title('Cost matrix M')
###Output
_____no_output_____
###Markdown
Compute EMD-----------
###Code
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3)
pl.imshow(G0, interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
###Output
_____no_output_____
###Markdown
Compute Sinkhorn----------------
###Code
#%% sinkhorn
# reg term
lambd = 1e-3
Gs = ot.sinkhorn(a, b, M, lambd)
pl.figure(5)
pl.imshow(Gs, interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs, xt, Gs, color=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
pl.show()
###Output
_____no_output_____
###Markdown
Emprirical Sinkhorn----------------
###Code
#%% sinkhorn
# reg term
lambd = 1e-3
Ges = ot.bregman.empirical_sinkhorn(xs, xt, lambd)
pl.figure(7)
pl.imshow(Ges, interpolation='nearest')
pl.title('OT matrix empirical sinkhorn')
pl.figure(8)
ot.plot.plot2D_samples_mat(xs, xt, Ges, color=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn from samples')
pl.show()
###Output
Warning: numerical errors at iteration 0
###Markdown
2D Optimal transport between empirical distributionsIllustration of 2D optimal transport between discributions that are weightedsum of diracs. The OT matrix is plotted with the samples.
###Code
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
###Output
_____no_output_____
###Markdown
Generate data-------------
###Code
#%% parameters and data generation
n = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.get_2D_samples_gauss(n, mu_s, cov_s)
xt = ot.datasets.get_2D_samples_gauss(n, mu_t, cov_t)
a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples
# loss matrix
M = ot.dist(xs, xt)
M /= M.max()
###Output
_____no_output_____
###Markdown
Plot data---------
###Code
#%% plot samples
pl.figure(1)
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('Source and target distributions')
pl.figure(2)
pl.imshow(M, interpolation='nearest')
pl.title('Cost matrix M')
###Output
_____no_output_____
###Markdown
Compute EMD-----------
###Code
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3)
pl.imshow(G0, interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
###Output
_____no_output_____
###Markdown
Compute Sinkhorn----------------
###Code
#%% sinkhorn
# reg term
lambd = 1e-3
Gs = ot.sinkhorn(a, b, M, lambd)
pl.figure(5)
pl.imshow(Gs, interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs, xt, Gs, color=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
pl.show()
###Output
_____no_output_____
###Markdown
2D Optimal transport between empirical distributionsIllustration of 2D optimal transport between discributions that are weightedsum of diracs. The OT matrix is plotted with the samples.
###Code
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
###Output
_____no_output_____
###Markdown
Generate data-------------
###Code
#%% parameters and data generation
n = 50 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s)
xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)
a, b = np.ones((n,)) / n, np.ones((n,)) / n # uniform distribution on samples
# loss matrix
M = ot.dist(xs, xt)
M /= M.max()
###Output
_____no_output_____
###Markdown
Plot data---------
###Code
#%% plot samples
pl.figure(1)
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('Source and target distributions')
pl.figure(2)
pl.imshow(M, interpolation='nearest')
pl.title('Cost matrix M')
###Output
_____no_output_____
###Markdown
Compute EMD-----------
###Code
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3)
pl.imshow(G0, interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs, xt, G0, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
###Output
_____no_output_____
###Markdown
Compute Sinkhorn----------------
###Code
#%% sinkhorn
# reg term
lambd = 1e-3
Gs = ot.sinkhorn(a, b, M, lambd)
pl.figure(5)
pl.imshow(Gs, interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs, xt, Gs, color=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
pl.show()
###Output
_____no_output_____ |
3.4_notebook_quizz_objects.ipynb | ###Markdown
You will need the class Car for the next exercises. The class Car has four data attributes: make, model, colour and number of owners (owner_number). The method car_info() prints out the data attributes and the method sell() increments the number of owners.
###Code
class Car(object):
def __init__(self,make,model,color):
self.make=make;
self.model=model;
self.color=color;
self.owner_number=0
def car_info(self):
print("make: ",self.make)
print("model:", self.model)
print("color:",self.color)
print("number of owners:",self.owner_number)
def sell(self):
self.owner_number=self.owner_number+1
###Output
_____no_output_____
###Markdown
Create a Car object Create a Car object my_car with the given data attributes:
###Code
make="BMW"
model="M3"
color="red"
###Output
_____no_output_____
###Markdown
Data Attributes Use the method car_info() to print out the data attributes Methods Call the method sell() in the loop, then call the method car_info() again
###Code
for i in range(5):
###Output
_____no_output_____
###Markdown
You will need the class Car for the next exercises. The class Car has four data attributes: make, model, colour and number of owners (owner_number). The method car_info() prints out the data attributes and the method sell() increments the number of owners.
###Code
class Car(object):
def __init__(self,make,model,color):
self.make=make;
self.model=model;
self.color=color;
self.owner_number=0
def car_info(self):
print("make: ",self.make)
print("model:", self.model)
print("color:",self.color)
print("number of owners:",self.owner_number)
def sell(self):
self.owner_number=self.owner_number+1
###Output
_____no_output_____
###Markdown
Create a Car object Create a Car object my_car with the given data attributes:
###Code
make="BMW"
model="M3"
color="red"
my_car = Car(make, model, color)
print(my_car.make)
###Output
BMW
###Markdown
Data Attributes Use the method car_info() to print out the data attributes
###Code
print(my_car.car_info())
###Output
make: BMW
model: M3
color: red
number of owners: 0
None
###Markdown
Methods Call the method sell() in the loop, then call the method car_info() again
###Code
for i in range(5):
my_car.sell()
print(my_car.car_info())
###Output
make: BMW
model: M3
color: red
number of owners: 5
None
|
02-Crystallography.ipynb | ###Markdown
_Reference textbook / figure credits: Charles Kittel, "Introduction to solid-state physics", Chapter 1_ Unit cells, fractional coordinates, latticesA periodic structure is defined by a *lattice* and an a-periodic *repeat unit*. The lattice is a periodic set of points generated by all integer combinations of three *unit cell vectors* $\mathbf{a}_{1,2,3}$, i.e. $$\mathbf{T} = u_1 \mathbf{a}_1 + u_2 \mathbf{a}_2 + u_3 \mathbf{a}_3, \quad u_{1,2,3} \in \mathbb{Z}$$The repeat unit, or *basis* is defined by the coordinates $\{x_i, y_i, z_i\}$ of a (usually small) number of atoms that sit in arbitrary positions within the unit cell. These are often given in *fractional coordinates* $\{s_{i1}, s_{i2}, s_{i3}\}$, such that the set of all atoms in the crystal is generated by $$(u_1+s_{i1}) \mathbf{a}_1 + (u_2+s_{i2}) \mathbf{a}_2 + (u_3+s_{i3})$$with the $u_{1,2,3}$ ranging over all positive and negative integers, and $i$ ranging over the number of atoms in the basis.There are 14 types of lattices known as _Bravais lattices_ in three dimensions, that are distinguished by the symmetry group of the lattice points. If you need to refresh your fundamentals of crystallography, and don't remember what a _face centered cubic_ or _body centered cubic_ lattice is, the [wikipedia page](https://en.wikipedia.org/wiki/Bravais_lattice) is excellent. Note also that the crystal structure (lattice+basis) can have more complicated symmetries, forming the 230 [space groups](https://en.wikipedia.org/wiki/Space_group) Now, consider the structure below. This is just a finite set of atoms, and coordinates are listed individually. You can click on the atoms in the viewer and see its coordinates by clicking on the info panel below the viewer.
###Code
positions = np.array( [[x,y,z] for x,y,z in itertools.product([0, 5, 10, 15],[0, 5, 10, 15],[0, 5, 10, 15]) ])
ase_cube = ase.Atoms("Ga64", positions=positions)
properties = {}
properties["index"] = {
"target": "atom",
"values": list(range(1, len(ase_cube)+1)),
}
properties["coordinates"] = {
"target": "atom",
"values": positions,
}
cs = chemiscope.show([ase_cube], properties=properties, mode="structure",
environments=chemiscope.all_atomic_environments([ase_cube], cutoff=40),
settings={"structure":[{"spaceFilling": False, "environments": {"cutoff": 40}}]}
)
display(cs)
###Output
_____no_output_____
###Markdown
**01a** Write a function that returns the lattice vectors and a basis that generates the periodic structure that continues to infinity the motif above. _NB: the validation code only tests for the primitive cell - you can come up with correct structures that will be marked as incorrect. Trust your own judgment (and look carefully at the visualization)_
###Code
ex01_wci = WidgetCodeInput(
function_name="sc_lattice_basis",
function_parameters="",
docstring="""
Returns the lattice vectors and the basis (in fractional coordinates) that generates a simple cubic structure.
:return: Three lattice vectors and a list of basis coordinates
""",
function_body="""
# Write your solution, then click on the button below to update the plotter
# and check against the reference value
a1 = []
a2 = []
a3 = []
basis = [[]]
return a1, a2, a3, basis
"""
)
def reciprocal_lattice_vectors(a1, a2):
# compute it in obfuscated way
reciprocal_lattice = 2*np.pi*ase.Atoms(cell=[[a1[0], a1[1], 0], [a2[0], a2[1], 0], [0,0,1]]).cell.reciprocal()
return reciprocal_lattice[0][:2], reciprocal_lattice[1][:2]
data_dump.register_field("ex01-function", ex01_wci, "function_body")
def ex01_updater():
a1, a2, a3, basis = ex01_wci.get_function_object()()
positions = np.asarray(basis)
h = np.asarray([a1,a2,a3])
structure = ase.Atoms('Ga'+str(len(basis)), [email protected], cell=[a1, a2, a3])
display(chemiscope.show(frames = [structure], mode="structure",
settings={"structure":[{"unitCell":True,"supercell":{"0":3,"1":3,"2":3}}]}
))
def match_lattice(a, b):
a1, a2, a3, basis = b;
return np.allclose([a1,a2,a3], np.eye(3)*5) and np.asarray(basis).shape==(1,3)
"""ref_values = {
(): ase.Atoms("CNH6", positions=[[ 1., -0., -0.01],
[ 2.52, -0.01, 0. ],
[ 0.6 , 1.02, -0. ],
[ 0.59, -0.52, 0.88],
[ 0.6 , -0.51, -0.9 ],
[ 2.92, 0.5 , 0.89],
[ 2.93, 0.51, -0.88],
[ 2.92, -1.03, -0. ]])
}, ref_match = match_structure, """
ex01_wcc = WidgetCodeCheck(ex01_wci, ref_values = { (): ([5,0,0],[0,5,0],[0,0,5],[[0,0,0]]) },
ref_match = match_lattice,
demo=WidgetUpdater(updater=ex01_updater))
display(ex01_wcc)
###Output
_____no_output_____
###Markdown
**01b** Try to shift rigidly the basis atom(s). Does the resulting crystal change in a significant way?
###Code
ex01_txt = Textarea("enter any additional comment", layout=Layout(width="100%"))
data_dump.register_field("ex01-answer", ex01_txt, "value")
display(ex01_txt)
###Output
_____no_output_____
###Markdown
Primitive cell, supercells, conventional cellsIt is always possible to give different descriptions to the same crystal structure. For instance, for a given cell and basis one can always specify a _supercell_ i.e. a multiple of repeat units of the original cell, with a correspondingly larger basis. This basis contains "hidden" symmetries, meaning that it would be possible to describe the same structure with a smaller unit cell. The smallest possible cell is called a _primitive_ (or minimal) cell. For instance, the figure below shows three different choices of unit cell for a face-centered cubic lattice. All cells describe the same structure! The first structure is the primitive cell, and the third the conventional cells that reveals more clearly the origin of the name of this lattice. The following visualizer shows a collection of 9 crystal structures, containing either one or two chemical species. Look at them, and try to understand what type of lattice they belong to. You can visualize the environment of each atom within an adjusable cutoff, which may help you appreciate the 3D nature of the structure.
###Code
ase_crystals = read('data/crystals.xyz',":")
properties = {}
properties["lattice vectors a1"] = {
"target": "structure",
"values": np.asarray([str(list(a.cell[0])) for a in ase_crystals]),
}
properties["lattice vectors a2"] = {
"target": "structure",
"values": np.asarray([str(list(a.cell[1])) for a in ase_crystals]),
}
properties["lattice vectors a3"] = {
"target": "structure",
"values": np.asarray([str(list(a.cell[2])) for a in ase_crystals]),
}
properties["basis"] = {
"target": "atom",
"values": np.vstack([a.positions for a in ase_crystals]),
}
cs_crystal = chemiscope.show(ase_crystals, properties=properties, mode="structure",
environments=chemiscope.all_atomic_environments(ase_crystals),
settings={"structure":[{"bonds":True, "unitCell":True,"supercell":{"0":3,"1":3,"2":3},
"environments": {"cutoff": 6}}]}
)
def update_co(change):
cs_crystal.settings={"structure": [{"environments": {"cutoff": pb_crystal.value['co']}}]}
pb_crystal = WidgetParbox(onchange=update_co, co=(6.,1,12,0.1, r"environment cutoff / ร
"))
display(VBox([pb_crystal,cs_crystal]))
###Output
_____no_output_____
###Markdown
**02** Each of the structures above are either face-centered (fcc), body-centered (bcc) or simple cubic (sc). Write down in the box below what is the Bravais lattice for each structure, and the size of the basis used to describe the structure in each frame. _NB: pay attention: (1) you are asked about the symmetry of the **lattice**: if you just look at the cell, you may be misled; (2) the number of atoms included in the basis depend on the choice of cell._
###Code
ex02_txt = Textarea("structure 1: lattice: XXX, basis size: YYY\n ....", layout=Layout(width="100%"))
data_dump.register_field("ex02-answer", ex02_txt, "value")
display(ex02_txt)
###Output
_____no_output_____
###Markdown
Using a supercell is not only a way of obtaining a more convenient/intuitive view of a crystal structure. It can also be useful to represent real materials, in which the ideal periodicity of the crystal is broken, e.g. because there are defects, interfaces, or simply disorder. In these cases, one often starts from a small unit cells and explicitly repeats it many times. _This is different from just replicating the unit cell for visualization purposes: the cell is also enlarged, and the coordinates of all atoms inside the unit cell can be adjusted independently_ASE provides convenient utilities to replicate a structure. If `structure` is an `ase.Atoms` object, then`structure.repeat((nx, ny, nz))` will replicate the structure the given number of times along each of the axes. You can learn more about the function using `help(ase.Atoms.repeat)`.
###Code
help(ase.Atoms.repeat)
###Output
_____no_output_____
###Markdown
**03** Write code to generate a supercell for _fcc_ aluminum, using the number of repeat units as an argument, and a lattice parameter of 4ร
. You can use the `repeat` ASE command, but if you feel adventurous you may as well write a replicate function yourself. _NB: the self-test function can generate false errors because it expects a specific order of the basis atoms. Don't worry too much if it tells you one test has not passed!_
###Code
ex03_wci = WidgetCodeInput(
function_name="al_supercell",
function_parameters="nrep",
docstring="""
Returns an ASE object describing a supercell built by replicating `nrep` times along each direction a conventional (4-atoms)
fcc unit cell for aluminum. Use a lattice parameter of 4 ร
.
:return: The replicated-cell ASE object
""",
function_body="""
# Write your solution, then click on the button below to update the chemiscope viewer
import ase
al4 = ase.Atoms("Al4", pbc=True)
al4.cell = [ ... ] # lattice parameters of the conventional unit cell
al4.positions = [[atom1x, atom1y, atom1z],
[atom2x, atom2y, atom2z],
[atom3x, atom3y, atom3z],
[atom4x, atom4y, atom4z]] # positions of the 4 atoms in the conventional fcc cell
# empty atom
al_replicated = ...
return al_replicated
"""
)
data_dump.register_field("ex03-function", ex03_wci, "function_body")
def ex03_updater():
al_multi = ex03_wci.get_function_object()(ex03_wp.value['nrep'])
display(VBox([ex03_wp,chemiscope.show(frames = [al_multi], mode="structure",
settings={"structure":[{"unitCell":True}]}
)]) )
def match_lattice(a, b):
return np.allclose(b.cell, a[0]) and (True if a[1] is None else np.allclose(b.positions-b.positions[0], a[1]))
ex03_wp = WidgetParbox(nrep=(2, 1, 4, 1, r"$n_{\mathrm{repeat}}$ (must click on update button!)"))
ex03_wcc = WidgetCodeCheck(ex03_wci, ref_values = {
(1,): ([[4,0,0],[0,4,0],[0,0,4]], [[0,0,0],[2,2,0],[2,0,2],[0,2,2]]),
(2,): ([[8,0,0],[0,8,0],[0,0,8]], None)
},
ref_match = match_lattice,
demo=WidgetUpdater(updater=ex03_updater))
display(ex03_wcc)
###Output
_____no_output_____
###Markdown
Lattice planes and surfaces The general notation used to define lattice planes is somewhat contrived (because it actually relates to the reciprocal lattice!). A plane is defined by three points, so one can define a lattice plane by picking three points that are multiples of the unit cell vectors, $(n_1 \mathbf{a_1}, n_2 \mathbf{a_2}, n_3 \mathbf{a_3})$. The plane is determined by the reciprocals of the $n_{1,2,3}$, multiplied by their least common multiple so as to obtain three integer indices $(h k l)$. For instance, a plane that intercepts the lattice axes with $n_{1,2,3} = (4,3,1)$ has Miller indices $(3,4,12)$. _Only in a cubic lattice_ the plane identified by (h k l) has $h \mathbf{a_1} + k \mathbf{a_2} + l \mathbf{a_3}$ as a plane normal. If you need a more detailed discussion, see Chapter 1 of _Kittel_, or the [Wikipedia page on Miller indices](https://en.wikipedia.org/wiki/Miller_index) that has some good figures. In the widget below, you can see a large supercell of _fcc_ aluminum. You can select different Miller indices, and see the atoms that belong to one of the lattice planes highlighted in a different color. Experiment a bit and note how e.g. `(2,2,4)` is equivalent to `(1,1,2)`, etc. Note also how the density of atoms on low-index planes is higher than on high-index planes.
###Code
fcc_al = ase.lattice.cubic.FaceCenteredCubic('Al')
al_supercell = fcc_al.repeat((4,4,4))
normal_plane = np.array([1,0,0])
origin = np.diagonal(al_supercell.cell)/2
distances = np.abs((al_supercell.positions-origin) @ normal_plane) / np.linalg.norm(normal_plane)
atoms_on_plane = np.where( distances < 1e-5 )[0]
al_supercell.numbers[atoms_on_plane] = 12
cs_fcc = chemiscope.show([al_supercell], mode="structure", settings={"structure":[{"spaceFilling": False, "unitCell":True}]}
)
def update_surface_cut(change):
al_supercell.numbers[:] = 13
normal_plane = np.array([pb_fcc.value['h'],pb_fcc.value['k'],pb_fcc.value['l']])
if np.linalg.norm(normal_plane) != 0:
origin = np.diagonal(al_supercell.cell)/2
distances = np.abs((al_supercell.positions-origin) @ normal_plane) / np.linalg.norm(normal_plane)
atoms_on_plane = np.where( distances < 1e-6 )[0]
al_supercell.numbers[atoms_on_plane] = 12
global cs_fcc
settings = cs_fcc.settings
cs_fcc.close()
cs_fcc = chemiscope.show([al_supercell], mode="structure", settings=cs_fcc.settings )
display(cs_fcc)
#cs.settings={"structure": [{"environments": {"cutoff": pb.value['co']}}]}
pb_fcc = WidgetParbox(onchange=update_surface_cut, h=(1,0,3,1,r"h"), k=(0,0,3,1,r"k"), l=(0,0,3,1,r"l"))
display(VBox([pb_fcc,cs_fcc]))
###Output
_____no_output_____
###Markdown
**04** Activate the visualization of multiple replicas in the visualizer above (e.g. $2\times2\times2$). Are the planes continuous across the edges of the supercell, if you pick a `(1,0,0)` plane? And what happens if you pick `(1,1,1)`?
###Code
ex04_txt = Textarea("enter your answer", layout=Layout(width="100%"))
data_dump.register_field("ex04-answer", ex04_txt, "value")
display(ex04_txt)
###Output
_____no_output_____
###Markdown
Now let's create a supercell with an actual surface. While there are tools to do this for more complicated scenarios (see e.g. [here](https://wiki.fysik.dtu.dk/ase/ase/build/surface.htmlcreate-specific-non-common-surfaces) for some examples using ASE), we will do this manually to understand better what is going on. If the surface is aligned with one of the lattice parameters, it is sufficient to first replicate the unit cell a number of times, and then artificially enlarge one of the edges of the unit cell. This will leave some empty space between the top and bottom layers of the structure, effectively leaving some atoms in contact with vacuum. Note you always create _two_ surfaces, because you are still dealing with a periodic structure. This is usually referred to as a _slab geometry_. Note also that for complicated crystals with a basis, the unit cell can be cut at different positions, so that there can be multiple different surfaces corresponding to the same lattice direction, depending on the position of the cut. **05** Write a function that creates a supercell for _fcc_ aluminum with 4x4x2 replicas along the three directions. Use a conventional unit cell and a lattice parameter of 4ร
. Modify the `cell` of the resulting ASE structure to add 20ร
of vacuum along the $\mathbf{a}_3$ direction, creating a slab geometry with a surface along `(0,0,1)` _NB: you can copy most of the code from exercise 03._
###Code
ex05_wci = WidgetCodeInput(
function_name="al_slab",
function_parameters="nrep, nslab, vacuum",
docstring="""
Returns an ASE object describing Al (100) surfaces using a slab geometry with nrep x nrep x nslab cells,
and a vacuum region along z. Use a lattice parameter of 4 ร
.
:return: The ASE object describing the slab geometry
""",
function_body="""
# Write your solution, then click on the button below to update the chemiscope viewer
import ase
al4 = ase.Atoms("Al4", pbc=True)
al4.cell = [ ... ] # lattice parameters of the conventional unit cell
al4.positions = [[, ,], [, ,], [, ,], [, ,]] # positions of the 4 atoms in the conventional fcc cell
# empty atom
al_replicated = ...
# create gap
return al_replicated
"""
)
data_dump.register_field("ex05-function", ex05_wci, "function_body")
def ex05_updater():
al_multi = ex05_wci.get_function_object()(ex05_wp.value['nrep'], ex05_wp.value['nslab'], ex05_wp.value['vacuum'])
display(VBox([ex05_wp,chemiscope.show(frames = [al_multi], mode="structure",
settings={"structure":[{"unitCell":True}]}
)]) )
def match_lattice(a, b):
return np.allclose(b.cell, a[0]) and (True if a[1] is None else np.allclose(b.positions-b.positions[0], a[1]))
ex05_wp = WidgetParbox(
nrep=(4, 1, 6, 1, r"$n_{\mathrm{rep}}$ (click on button to update!)"),
nslab=(2, 1, 6, 1, r"$n_{\mathrm{slab}}$"),
vacuum=(20., 0, 40, 0.5, r"$L_{\mathrm{vacuum}}$")
)
ex05_wcc = WidgetCodeCheck(ex05_wci, ref_values = {
(1, 1, 10): ([[4,0,0],[0,4,0],[0,0,14]], [[0,0,0],[2,2,0],[2,0,2],[0,2,2]]),
(4, 2, 11): ([[16,0,0],[0,16,0],[0,0,19]], None)
},
ref_match = match_lattice,
demo=WidgetUpdater(updater=ex05_updater))
display(ex05_wcc)
###Output
_____no_output_____
###Markdown
Creating a `(1,1,1)` surface is somewhat more complicated, because the conventional unit cell is not aligned properly. One way to create a slab with the appropriate orientation is to create a _primitive_ face-centered cell and to create the slab by _extending_ one of the cell vectors. **06** Write a function that creates a supercell for _fcc_ aluminum, starting with the *primitive* cell replicated as 8x8x2 along the lattice vectors. Then _multiply_ by four $\mathbf{a}_3$. Visualize the resulting structure
###Code
ex06_wci = WidgetCodeInput(
function_name="al_slab_111",
function_parameters="nrep, nslab, zscale",
docstring="""
Returns an ASE object describing Al (111) surfaces using a slab geometry with
nrep x nrep x nslab copies of the primitive cell. Vacuum must be created by
elongating one of the lattice vectors by a factor zscale. Use a lattice parameter of 4 ร
.
:return: The ASE object describing the slab geometry
""",
function_body="""
# Write your solution, then click on the button below to update the chemiscope viewer
import ase
al = ase.Atoms("Al", pbc=True)
al.cell = [ ... ] # lattice parameters of the *primitive* unit cell
al.positions = [[0,0,0]] # just one-atom basis!
# empty atom
al_replicated = ...
# create gap
return al_replicated
"""
)
data_dump.register_field("ex06-function", ex06_wci, "function_body")
def ex06_updater():
al_multi = ex06_wci.get_function_object()(ex06_wp.value['nrep'], ex06_wp.value['nslab'], ex06_wp.value['zscale'])
display(VBox([ex06_wp,chemiscope.show(frames = [al_multi], mode="structure",
settings={"structure":[{"unitCell":True}]}
)]) )
def match_lattice(a, b):
return np.allclose(b.cell, a[0]) and (True if a[1] is None else np.allclose(b.positions-b.positions[0], a[1]))
ex06_wp = WidgetParbox(
nrep=(4, 1, 6, 1, r"$n_{\mathrm{rep}}$ (click on button to update!)"),
nslab=(2, 1, 6, 1, r"$n_{\mathrm{slab}}$"),
zscale=(4., 1., 10, 0.5, r"$z_{\mathrm{scale}}$")
)
ex06_wcc = WidgetCodeCheck(ex06_wci, ref_values = {
(1, 1, 1): ([[2,2,0],[2,0,2],[0,2,2]], [[0,0,0]]),
(1, 2, 2): ([[2,2,0],[2,0,2],[0,8,8]], None)
},
ref_match = match_lattice,
demo=WidgetUpdater(updater=ex06_updater))
display(ex06_wcc)
###Output
_____no_output_____
###Markdown
**07** What is the direction of the primitive lattice parameters relative to the conventional _fcc_ index system? Is the surface normal parallel to the $\mathbf{a}_3$ cell vector? In order to create an easier to visualize structure, one often builds a unit cell that is orthorhombic and has one axis that is parallel to the surface normal. Can you come up with three low-Miller-indices directions that are mutually orthogonal and could be used to define an appropriate unit cell?_NB: The last point is not entirely trivial, think about it but don't get too stressed if you don't manage to create the appropriate cell. If you are *really* into these geometric problems, you can also try to derive an actual unit cell, including also the basis._
###Code
ex07_txt = Textarea("write your answer", layout=Layout(width="100%"))
data_dump.register_field("ex07-answer", ex07_txt, "value")
display(ex07_txt)
###Output
_____no_output_____
###Markdown
Reciprocal lattice in 2D From here on, we work exclusively with 2D lattices,$$\mathbf{T} = u_1 \mathbf{a}_1 + u_2 \mathbf{a}_2, \quad \mathbf{a}_{1,2} \in \mathbb{R}^2, u_{1,2} \in Z$$because they allow for simpler visualization of the core concepts. Most of the concepts we develop and demonstrate, however, apply equally well to actual 3D systems. The idea of the reciprocal lattice is deeply linked to the idea of scatterig, and constructive interference. A plane waves $e^{\mathrm{i} \mathbf{k}\cdot \mathbf{x}}$ originating at the origin will be in phase with similar waves originating at all other lattice points if its wavevector $\mathbf{k}$ satisfy the phase relation $\mathbf{k}\cdot \mathbf{T} = 2\pi n$, where $n$ is an integer, for each and every lattice vector. We can build a _reciprocal lattice_ that consist of integer combinations of basis vectors $\mathbf{b}_{1,2}$$$\mathbf{G} = v_1 \mathbf{b}_1 + v_2 \mathbf{b}_2, \quad \mathbf{b}_{1,2} \in \mathbb{R}^2, v_{1,2} \in Z$$such that every reciprocal lattice point is a wavevector that results in a complete in-phase scattering from the direct lattice. To do this, we need relationships to hold between the lattice vectors: $\mathbf{b}_{1,2} \cdot \mathbf{a}_{1,2} = 2\pi$, $\mathbf{b}_{1,2}\cdot \mathbf{a}_{2,1} = 0$. Thus, $\mathbf{b}_1$ must be orthogonal to $\mathbf{a}_2$ and $\mathbf{b}_2$ must be orthogonal to $\mathbf{a}_1$.We can achieve this (including also normalization that ensure the correct scaling $\mathbf{b}_{1,2} \cdot \mathbf{a}_{1,2} = 2\pi$, with the definition$$ \mathbf{b}_1 = 2\pi\frac{\mathbf{R}\mathbf{a}_2}{\mathbf{a}_1\cdot\mathbf{R}\mathbf{a}_2}, \quad \mathbf{b}_2 = 2\pi\frac{\mathbf{R}\mathbf{a}_1}{\mathbf{a}_2\cdot\mathbf{R}\mathbf{a}_1},\quad\quad\mathbf{R} = \begin{bmatrix}0 & -1\\ 1 & 0\end{bmatrix}$$where $\mathbf{R}$ is a $\pi/2$ rotation. **08** What are the reciprocal lattice vectors for a 2D rectangular Bravais lattice with lattice vectors $(1,0)$ and $(0,2)$?
###Code
ex08_txt = Textarea("The reciprocal primitive vectors are:\n b_1 = ... , b_2 = ...", layout=Layout(width="100%"))
data_dump.register_field("ex08-answer", ex08_txt, "value")
display(ex08_txt)
###Output
_____no_output_____
###Markdown
**09** Write a function that computes the reciprocal lattice vectors given $\mathbf{a}_{1,2}$. Use the demo widget to experiment and get an intuitive understanding of what's going on.
###Code
# TODO AG: make an exercise where they build b1, b2 starting from
# a1, a2, and have a demo box that shows two panels, one with the direct lattice
# and one with the reciprocal lattice
ex09_wci = WidgetCodeInput(
function_name="reciprocal_lattice",
function_parameters="a1, a2",
docstring="""
Return the 2D reciprocal unit cell vectors.
:param a1: unit cell vector a1
:param a2: unit cell vector a2
:return: reciprocal lattice unit cell vectors
""",
function_body="""
import numpy as np
from numpy import pi
# this is just to accept all sort of sequence inputs
a1 = np.asarray(a1)
a2 = np.asarray(a2)
# matrix-matrix and matrix-vector multiplication can be achieved with the @ operator
# For example `R @ a` multiplies the matrix R with the vector a
R = np.array([[0,-1],[1,0]]) # <- this converts a nested list to a numpy array
# change to the correct expression
b1 = 2*pi*a1
b2 = 2*pi*a2
return np.asarray(b1), np.asarray(b2) # this is just to return a standard format even if you define b1 and b2 as another type of iterable
"""
)
data_dump.register_field("ex09-function", ex09_wci, "function_body")
def plot_lattice(ax, a1, a2, basis=None, alphas=None, s=20, c='red',
lattice_size = 60, head_length = 0.5, head_width= 0.2, width=0.05):
if basis is None:
basis = np.array([[0,0]])
A = np.array([a1, a2])
# each atom in the basis gets a different basis alpha value when plotted
if alphas is None:
alphas = np.linspace(1, 0.3, len(basis))
for i in range(len(basis)):
lattice = (np.mgrid[:lattice_size,:lattice_size].T @ A + basis[i]).reshape(-1, 2)
lattice -= (np.array([lattice_size//2,lattice_size//2]) @ A).reshape(-1, 2)
ax.scatter(lattice[:,0], lattice[:,1], color=c, s=s, alpha=alphas[i])
ax.fill([0,a1[0],(a1+a2)[0],a2[0]], [0,a1[1],(a1+a2)[1],a2[1]], color=c, alpha=0.2)
ax.arrow(0,0, a1[0], a1[1],width=width,
length_includes_head=True,
fc=c, ec='black')
ax.arrow(0,0, a2[0], a2[1],width=width,
length_includes_head=True,
fc=c, ec='black')
def plot_reciprocal_and_real_lattice(axes, a11, a12, a21, a22, basis=None):
if basis is None:
basis = np.array([[0,0]])
a1 = np.array([a11, a12])
a2 = np.array([a21, a22])
b1, b2 = ex09_wci.get_function_object()(a1, a2)
plot_lattice(axes[0], a1, a2, basis, s=20, c='red')
plot_lattice(axes[1], b1/(2*np.pi), b2/(2*np.pi), None, s=20, c='blue')
axes[0].set_title('real space')
axes[0].set_xlim(-5,5)
axes[0].set_ylim(-5,5)
axes[0].set_xlabel("$x$ / ร
")
axes[0].set_ylabel("$y$ / ร
")
axes[1].set_title('reciprocal space')
axes[1].set_xlim(-5,5)
axes[1].set_ylim(-5,5)
axes[1].set_xlabel("$k_x/2\pi$ / ร
$^{-1}$")
axes[1].set_ylabel("$k_y/2\pi$ / ร
$^{-1}$")
fig_ax = plt.subplots(1, 2, figsize=(7.5,3.8), tight_layout=True)
ex09_wp = WidgetPlot(plot_reciprocal_and_real_lattice,
WidgetParbox(a11 = (1., -4, 4, 0.1, r'$a_{11} / ร
$'),
a12 = (0., -4, 4, 0.1, r'$a_{12} / ร
$'),
a21 = (0., -4, 4, 0.1, r'$a_{21} / ร
$'),
a22 = (2., -4, 4, 0.1, r'$a_{22} / ร
$')
),
fig_ax=fig_ax)
ex09_wcc = WidgetCodeCheck(ex09_wci, ref_values=
{
aa: reciprocal_lattice_vectors(np.asarray(aa[0]), np.asarray(aa[1]) )
for aa in [((0,1), (1,0)), ((1,1), (1,-1)), ((0,2), (2,1))]
}
, demo=ex09_wp)
display(ex09_wcc)
###Output
_____no_output_____
###Markdown
Diffraction from a lattice To understand diffraction, you need to consider that scattering is a process involving an incoming wave, $e^{\mathrm{i} \mathbf{k}_{\mathrm{in}} \cdot \mathbf{x}}$. Each lattice point will be hit by the wave with a different phase $e^{\mathrm{i} \mathbf{k}_{\mathrm{in}} \cdot \mathbf{T}}$ and act as a spherical scatterer. The detector will be in a given position $\mathbf{R}_d$, and if it is sufficiently far it pick up a plane wave in that direction, with wavevector $\mathbf{k}_{\mathrm{out}}$. Depending on the position of the scattering point, it will accumulate a further phase $e^{-\mathrm{i} \mathbf{k}_{\mathrm{out}} \cdot \mathbf{T}}$ (the minus coming from the fact the distance traveled will be $~(\mathbf{R}_d - \mathbf{T})$. The scheme below, drawn for arbitrary position of the scattering centers, applies clearly also to the case when scattering occurs from lattice pointsThe condition to have constructive interference is therefore to have $\mathbf{k}=\mathbf{k}_\mathrm{in}-\mathbf{k}_\mathrm{out}$ be a vector of the reciprocal lattice. For typical diffraction experiments, the scattering process is elastic, meaning that the modulus of the wavevector does not change during the interaction. Furthermore, the modulus of the wavevector depends on the source of radiation used in the experiment. **10** what are the smallest and largest reciprocal lattice vectors that can be observed with electromagnetic radiation with wavelength $\lambda$? Think of the geometry of the experiment, and take the modulus of the wavevector to be $2\pi/\lambda$.
###Code
ex10_txt = Textarea("Write the answer here", layout=Layout(width="100%"))
data_dump.register_field("ex10-answer", ex10_txt, "value")
display(ex10_txt)
###Output
_____no_output_____
###Markdown
The widget below allows you to experiment with the geometry of a scattering experiment. You can define a direct lattice, the wavelength of the incoming light, and the angle between incoming and outgoing beam. The scheme that is drawn around the origin is the construction for the Ewald circle (2D equivalent of the Ewald sphere). There may be scattering only if the scattering vector $\mathbf{k}$ coincides with a point of the reciprocal lattice.
###Code
def plot_braggs_reflection(axes, a11, a12, a21, a22, phi, wavelength, two_theta):
def rot2d(angle):
return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
rot_phi = rot2d(phi)
a1 = np.array([a11, a12]) @ rot_phi
a2 = np.array([a21, a22]) @ rot_phi
# reuse
plot_reciprocal_and_real_lattice(axes, a1[0], a1[1], a2[0], a2[1])
head_length = 0.5
head_width = 0.2
width = 0.05
lw = 1
alpha_unit_cell_vectors = 0.8
k_in = 2*np.pi*np.array([-1,0])/wavelength /(2*np.pi) #<- scale by 2pi so it is consistent with reciprocal lattice plot
k_out = rot2d(two_theta)@k_in
k = k_in - k_out
scatter_origin = np.array([0,0])
axes[1].arrow(scatter_origin[0]+k_in[0], scatter_origin[1]+k_in[1], -k_in[0], -k_in[1],
lw=lw, head_width=head_width, head_length=head_length, width=0.05,
length_includes_head=True,
fc='red', ec='red', label='$k_{in}}$')
axes[1].arrow(scatter_origin[0]+k_in[0], scatter_origin[1]+k_in[1], -k_out[0], -k_out[1],
lw=lw, head_width=head_width, head_length=head_length, width=0.05,
length_includes_head=True,
fc='orange', ec='orange', label='$k_{out}$')
axes[1].arrow(scatter_origin[0], scatter_origin[1], k[0], k[1],
lw=lw, head_width=head_width, head_length=head_length, width=0.05,
length_includes_head=True,
fc='black', ec='black', label='$k$')
circle_theta = np.linspace(-np.pi, np.pi, 200)
r = 2*np.pi/wavelength/(2*np.pi)
axes[1].plot(np.sin(circle_theta)*r + scatter_origin[0]+k_in[0],
np.cos(circle_theta)*r + scatter_origin[1]+k_in[1],
color='red', label="Ewald circle")
r = np.linalg.norm(k)
axes[1].plot(np.sin(circle_theta)*r + scatter_origin[0],
np.cos(circle_theta)*r + scatter_origin[1],
color='black', label="$|\mathbf{k}|$ circle")
axes[1].legend()
fig_ax = plt.subplots(1, 2, figsize=(7.5,3.8), tight_layout=True)
ex10_wp = WidgetPlot(plot_braggs_reflection,
WidgetParbox(a11 = (1., -4, 4, 0.1, r'$a_{11} / ร
$'),
a12 = (0., -4, 4, 0.1, r'$a_{12} / ร
$'),
a21 = (0., -4, 4, 0.1, r'$a_{21} / ร
$'),
a22 = (1., -4, 4, 0.1, r'$a_{22} / ร
$'),
phi = (0., 0., 2*np.pi, 0.05, r'$\phi$'),
wavelength = (1., 0.2, 2, 0.05, r'$\lambda$ / ร
'),
two_theta = (1., 0.1, np.pi, 0.05, r'$2\theta$')
),
fig_ax=fig_ax)
# we need to reload the widget when the solution of ex09
# computing the reciprocal unit cell vectors has been updated
button = Button(description="Update")
button.on_click(ex10_wp.update)
display(button)
display(ex10_wp)
###Output
_____no_output_____
###Markdown
You will notice that it is not easy, with a fixed position of the incoming light, to orient the detector so that $\mathbf{k}$ corresponds to a reciprocal lattice vector. The widget above allows you to change the orientation of the direct lattice. Note how, by changing the orientation of the crystal, you can bring any of the reciprocal lattice point that lie on the circle with radius $k$ in coincidence with the scattering wavevector $\mathbf{k}$. **11** What happens if you consider scattering from a collection of crystals with different, random orientations (a powder sample)? Does diffraction depend on the orientation of the sample?
###Code
ex11_txt = Textarea("Write the answer here", layout=Layout(width="100%"))
data_dump.register_field("ex11-answer", ex11_txt, "value")
display(ex11_txt)
###Output
_____no_output_____
###Markdown
Diffraction from an atomic structure An actual crystalline structure involves both a lattice and of an a-periodic basis $\{\mathbf{s}_i\}$ of atoms. Each will scatter with a form factor $f_i$ (that depends on the modulus of $\mathbf{k}$ but we will take to be a constant proportional to the atomic charge $Z_i$). The scattered amplitude can be written as $$F(\mathbf{k}) = \frac{1}{N} \sum^N_{j \textrm{ atom}} f_j\exp(-\mathrm{i}\mathbf{k}\cdot\mathbf{r}_j)\textrm{, with }\mathbf{r}_j\textrm{ position of atom }j$$by breaking the position of atoms into lattice vector and basis positions ($\mathbf{r}_j=\mathbf{T}+\mathbf{s}_m$), we can write $$F(\mathbf{k}) = \frac{1}{n_\mathrm{cell} n_\mathrm{basis}} \sum^{n_\mathrm{cell}}_{\mathbf{T}} \exp(-\mathrm{i}\mathbf{k}\cdot \mathbf{T}) \sum_m^{n_\mathrm{basis}} f_m \exp(-\mathrm{i}\mathbf{k}\cdot\mathbf{s}_m).$$One sees that the sum over lattice vectors selects the diffracted wavevectors that correspond to a reciprocal lattice vector $\mathbf{G}$, while the sum over the atomic basis modulates the amplitude of the peak through a _structure factor_$$F_\mathbf{G} = \sum_m^{n_\mathrm{basis}} f_m \exp(-\mathrm{i}\mathbf{G}\cdot\mathbf{s}_m).$$ In the scattering geometry above, one can determine the conditions for scattering in terms of the scattering angle $2\theta$, as follows. If $k$ is the modulus of both $\mathbf{k}_\mathrm{in}$ and $\mathbf{k}_\mathrm{out}$, and $\mathbf{G} = \mathbf{k}_\mathrm{in}-\mathbf{k}_\mathrm{out}$ must hold, a first necessary condition is$$G^2 = |\mathbf{k}_\mathrm{in}-\mathbf{k}_\mathrm{out}|^2 = 2k^2 (1-\cos 2\theta) = 4k^2 sin^2 \theta$$hence, $\sin\theta=G/2k$. To determine the orientation of $\mathbf{G}$ a second condition is needed. We can consider the angle between $\mathbf{G}$ and the incoming vector $\phi$, that can be set by changing the orientation of the crystal. Thus, by writing $|\mathbf{G}-\mathbf{k}_\mathrm{in}|^2 = |\mathbf{k}_\mathrm{out}|^2$ we can easily get $G = 2 k \cos\phi$, and hence $\cos\phi = \sin\theta$. If the sample is formed by uniformly oriented grains, the second condition is always satisfied by some crystals, and so the diffraction pattern is just a sequence of peaks at particular values of the angle $2\theta$. The intensity of each peak is given by the square modulus of the structure factor, $|F_\mathbf{G}|^2$. This widget below computes the powder (rotationally averaged) diffraction pattern for a crystal with a basis of two atoms. The function computes the list of diffraction peaks, with the corresponding structure factor. It is not entirely trivial, and so it is already implemented: you don't have to change it, but you can read it and understand what it does. Experiment with the widget, and then move to the exercises below, that will ask you to comment on what you observe in different scenarios. Parameters are as follows:* $a_{ij}$: components of the lattice vectors* $\phi$: rigid rotation of the lattice (does it have an impact on the diffraction?)* $s_{1,2}$: fractional coordinates of the second atom of the basis (first atom is in (0,0))* $f_{1,2}$: atomic form factors for the two atoms in the basis (roughly take it to be the atomic number)* $\lambda$: wavelength of the scattering radiation
###Code
# set upt the code widget window
ex12_wci = WidgetCodeInput(
function_name="diffraction_peaks",
function_parameters="basis, atomic_ff, reciprocal_b1, reciprocal_b2, wavelength",
docstring="""
Computes the list of peaks for a lattice with a given (real-space) basis
and reciprocal lattice, and for a given wavelength of the incoming radiation.
:param basis: list of N 2D vectors corresponding to the (real space!) position of the basis atoms
:param atomic_form_factors: atomic form factors of atoms in lattice, array of length N
:param reciprocal_b1, reciprocal_b2: reciprocal lattice vectors
:param wavelength: wavelength of the incoming radiation
:return: The list of diffraction peaks, as [h, l, theta, intensity]
""",
function_body="""
import numpy as np
def compute_absolute_structure_factor(sj, fj, G):
# sj: atomic basis (n_basis x 2)
# fj: form factors (n_basis)
# G: reciprocal lattice vectors (2D)
return np.abs(fj @ np.exp(-1j * sj @ G))
# wave number (modulus of the incoming wavevector)
k = np.pi*2/wavelength
# determine the range of reciprocal lattice vectors that could give rise to permissible reflections
n1 = int((k*2)/np.sqrt(reciprocal_b1@reciprocal_b1))+1
n2 = int((k*2)/np.sqrt(reciprocal_b2@reciprocal_b2))+1
# allocated space for the list of peaks
lpeaks = []
for v1 in range(-n1,n1+1):
for v2 in range(-n2,n2+1):
# reciprocal lattice vector
G = reciprocal_b1*v1 + reciprocal_b2*v2
# theta (from 2theta geometry)
sin_theta = np.sqrt(G@G)/(2*k)
if sin_theta > 1: # discards reflections that fall outside of the permissible range
continue
theta = np.arcsin(sin_theta)
# structure factor
absolute_structure_factor = compute_absolute_structure_factor(basis, atomic_ff, G)
lpeaks.append([v1, v2, theta, absolute_structure_factor**2])
return np.asarray(lpeaks)
"""
)
data_dump.register_field("ex12-function", ex12_wci, "function_body")
def plot_diffraction(axes, a11, a12, a21, a22, s1, s2, f1, f2, phi, wavelength):
def rot2d(angle):
return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
rot_phi = rot2d(phi)
a1 = np.array([a11, a12]) @ rot_phi
a2 = np.array([a21, a22]) @ rot_phi
basis = np.asarray([0*a1,s1*a1+s2*a2])
plot_lattice(axes[0], a1, a2, basis=basis, alphas=[f1/40, f2/40], c='red')
axes[0].set_title('real space')
axes[0].set_xlim(-5,5)
axes[0].set_ylim(-5,5)
axes[0].set_xlabel("$x$ / ร
")
axes[0].set_ylabel("$y$ / ร
")
b1, b2 = reciprocal_lattice_vectors(a1, a2)
dpeaks = ex12_wci.get_function_object()( basis, np.asarray([f1, f2]), b1, b2, wavelength )
twotheta_grid = np.linspace(0, 180, 720)
dp_grid = np.zeros(len(twotheta_grid))
for _, _, t, f2 in dpeaks:
dp_grid += np.exp(-(twotheta_grid-2*t*180/np.pi)**2/0.5)*f2
axes[1].clear()
# we plot two theta
axes[1].plot(twotheta_grid, dp_grid, 'b-')
axes[1].set_xlim(0,180)
axes[1].set_xlabel("$2\\theta$ / degree ยฐ")
axes[1].set_title('Diffraction pattern')
axes[1].set_ylabel("Intensity $|F(\mathbf{k}(\\theta))|$")
axes[0].set_aspect('equal')
axes[1].set_aspect(150/np.max(dp_grid))
header = """<tr>
<th>v<sub>1</sub> / ร
<span style="padding-left:150px"></th>
<th>v<sub>2</sub> / ร
<span style="padding-left:150px"></th>
<th>2θ / degree ยฐ <span style="padding-left:150px"></th>
<th>|F<sub>G</sub>|<sup>2</sup> <span style="padding-left:150px"></th>
</tr>"""
# cleans up peak info for displaying
tpeaks = []
for d in dpeaks[np.argsort(dpeaks[:,2])]:
tpeaks.append( [ int(d[0]), int(d[1]), np.round(2*d[2]*180/np.pi,2), np.round(d[3],1) ])
reflection_table_html.value = array_to_html_table(tpeaks, header)
def array_to_html_table(numpy_array, header):
rows = ""
for i in range(len(numpy_array)):
rows += "<tr>" + functools.reduce(lambda x,y: x+y,
map(lambda x: "<td>" + str(x) + "</td>",
numpy_array[i])
) + "</tr>"
return "<table>" + header + rows + "</table>"
reflection_table_html = HTML(
value=f"dpeaks")
reflection_table = HBox(layout=Layout(height='250px', overflow_y='auto'))
reflection_table.children += (reflection_table_html,)
fig_ax = plt.subplots(1, 2, figsize=(9,4.2), tight_layout=True)
ex12_wp = WidgetPlot(plot_diffraction,
WidgetParbox(a11 = (1., -4, 4, 0.1, r'$a_{11} / ร
$'),
a12 = (0., -4, 4, 0.1, r'$a_{12} / ร
$'),
a21 = (0., -4, 4, 0.1, r'$a_{21} / ร
$'),
a22 = (1.5, -4, 4, 0.1, r'$a_{22} / ร
$'),
phi = (0., 0., 2*np.pi, 0.1, r'$\phi$'),
s1 = (0.25, 0.01, 0.99, 0.01, r'$s_1$'),
s2 = (0.75, 0.01, 0.99, 0.01, r'$s_2$'),
f1 = (10., 1., 40., 1, r'$f_{1}$'),
f2 = (30., 0, 40., 1, r'$f_{2}$'),
wavelength = (0.1, 1.0, 2, 0.05, r'$\lambda$')
),
fig_ax=fig_ax)
ex12_wcc = WidgetCodeCheck(ex12_wci, ref_values = {}, demo = (ex12_wp, reflection_table))
display(ex12_wcc)
###Output
_____no_output_____
###Markdown
**12a** Set $f_2$ to zero (so that effectively this becomes a lattice with a single atom) and set the unit cell to be a $1\times 1.2$ rectangle. Set the wavelength to 1. Observe how the position and intensity of the peaks change when you change the dimensions of the lattice. What happens if you set the off-diagonal term $a_{12}$ to be (slightly) different from zero? What happens if you set the two lattice vectors to be orthogonal and equal in length?
###Code
ex12a_txt = Textarea("Write the answer here", layout=Layout(width="100%"))
data_dump.register_field("ex12a-answer", ex12a_txt, "value")
display(ex12a_txt)
###Output
_____no_output_____
###Markdown
**12b** Set $f_1=30$, $f_2=10$, and set the unit cell to be a $1\times 1.2$ rectangle. Set the fractional coordinates of the second atom to be $(0.5,0.5)$. Set the wavelength to 1. Observe how the position and intensity of the peaks change when you change the form factor of the second atom from $0$ to be equal to $f_2$. Do the peak positions change? How do you explain the change in intensity?What happens if (keeping the form factors equal) you change one of the fractional coordinates to be different from $0.5$? And if you change both coordinates? How do you explain this observation?
###Code
ex12b_txt = Textarea("Write the answer here", layout=Layout(width="100%"))
data_dump.register_field("ex12b-answer", ex12b_txt, "value")
display(ex12b_txt)
###Output
_____no_output_____
###Markdown
**12c** Set $f_1=30$, $f_2=10$, and set the unit cell to be a $1\times 1$ square. Set the fractional coordinates of the second atom to be $(0.5,0.5)$. Set the wavelength to 1. Observe how the position and intensity of the peaks change when you change the form factor of the second atom from $0$ to be $f_2=f_1=30$. How many peaks are left? How do you explain this?What lattice parameters should you use to obtain the exact same diffraction pattern with $f_2=0$?
###Code
ex12c_txt = Textarea("Write the answer here", layout=Layout(width="100%"))
data_dump.register_field("ex12c-answer", ex12c_txt, "value")
display(ex12c_txt)
###Output
_____no_output_____ |
venv/3Keras/keras.ipynb | ###Markdown
here we are creating 500 points and then we are scattering in the plane such that the above area contain 500 points distrubuted with a center deviation of 2. similarly in the bottom region 500 points randomly distributed at a sd of 2.We have transposed the points because by this only we can print all the points in x and y Then we are stacking the two variables and then plotting it.
###Code
n_pts = 500
np.random.seed(0)
Xa = np.array([np.random.normal(13,2, n_pts),
np.random.normal(12,2, n_pts)]).T
Xb = np.array([np.random.normal(8,2, n_pts),
np.random.normal(6,2, n_pts)]).T
X = np.vstack((Xa,Xb))
Y = np.matrix(np.append(np.zeros(n_pts), np.ones(n_pts))).T
plt.scatter(X[:n_pts,0], X[:n_pts,1])
plt.scatter(X[n_pts:, 0], X[n_pts:,1])
#IMPLEMENTING AND CREATING THE NEURAL NETWORK
model = Sequential()
model.add(Dense(units =1, input_shape=(2,), activation ='sigmoid' ))#by this we are adding the layers in neural network
"""units = 1 means that single output
input_shape means how many input layers we wanna take
activation function is what type of function is required"""
adam = Adam(lr = 0.1)
"""adam is a optimizer which is just mean adaptive learning method algorithm
combination of two extensions a stochastic gradient descent
read about in details
hERE WE SET ATOM IS EQUAL TO AN INSTANCE OF ATOM WHICH WILL
MINIMIZE THE ERROR WITH A LEARNING RATE LR """
model.compile(adam, loss='binary_crossentropy', metrics=['accuracy'])
""" here we need to find what type of optimizer we're going to use
what kind of error function and tend to == ADAM
loss == it calculate the cross entropy value for a binary classification
problems for miltiple we use multiple_crossentropy
metrices == metrice
"""
h = model.fit(x=X,y=Y, verbose=1, batch_size=50, epochs =500, shuffle='true')
"""fit function == that we use to start treating our perceptron to start
trainig and model that perfectly classify our data """
###Output
Epoch 1/500
450/1000 [============>.................] - ETA: 1:10 - loss: 1.8885 - acc: 0.5511
###Markdown
the sequential class is a linear stack of layers in Dense every network is connected to the preciddingg layer which is the case for the fully connected neural network.
###Code
plt.plot(h.history['acc'])
plt.title('accuracy')
plt.xlabel('epoch')
plt.legend(['accuracy'])
"""X refers to the data that we created earlias 500 above nd below
y refers to the matrix whuch contained datasets oone for the botton and above
the last model refers to the sequential model that refers to the sequential
model that contain all the data that we trained earlier containing all our neural network data """
def plot_decision_boundary(X, y, model):
x_span = np.linspace(min(X[:,0])-1, max(X[:,0]) + 1)
y_span = np.linespace(min(X[:,1])-1, max(X[:,1] + 1))
xx, yy = np.meshgrid(x_span, y_span)
xx_,yy_= xx.ravel(), yy.ravel()
grid = np.c_[xx_, yy_]
pred_func = model.predict(grid)
z = pred_func.reshape(xx.shape)
plt.contourf(xx, yy, z)
plot_decision_boundary(X, y, model)
plt.scatter(X[:n_pts,0], X[:n_pts,1])
plt.scatter(X[n_pts:,0], X[n_pts: ,1])
###Output
_____no_output_____ |
Untitled10.ipynb | ###Markdown
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Considering a dataframe with 12 rows to Just Understanding the case
###Code
data = {'AGE': ['', '','','','','','','','','','',''],
'SEX': ['', '','','','','','','','','','',''],
'GI illness': ['Y', 'Y','N','N','Y','Y','N','Y','Y','Y','U','Y'],
'INCUBATION': ['', '','','','','','','','','','',''],
'LENGTH OF': ['', '','','','','','','','','','',''],
'SALAD': ['Y', 'Y','N','Y','Y','Y','Y','Y','N','Y','N','Y'],
'RIGATONI': ['Y', 'Y','Y','Y','Y','N','N','N','Y','Y','U','Y'],
'ROAST BEEF': ['Y','Y','N','Y','Y','Y','Y','N','Y','Y','Y','Y'],
'CHICKEN': ['Y','Y','Y','Y','Y','Y','N','N','Y','Y','U','Y'],
'POTATOES': ['Y','Y','N','Y','Y','Y','Y','Y','N','Y','U','Y'],
'GREEN BEANS': ['Y', 'Y','Y','Y','Y','Y','Y','Y','Y','Y','U','Y'],
'CAKE': ['Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','U','Y'],
'COFFEE': ['Y', 'N','Y','Y','Y','Y','Y','N','Y','Y','U','Y'],
'WATER': ['Y', 'N','N','N','N','Y','N','N','N','Y','U','Y'],
'SODA': ['Y', 'Y','N','N','N','N','N','Y','Y','Y','U','Y']
}
df = pd.DataFrame (data, columns = ['AGE','SEX','GI illness','INCUBATION',
'LENGTH OF','SALAD','RIGATONI','ROAST BEEF','CHICKEN','POTATOES',
'GREEN BEANS','CAKE','COFFEE','WATER','SODA'])
df.head(13)
# we dont need AGE, SEX,INCUBATION, LENGTH OF so we select only required columns from dataframe
df_res = pd.DataFrame(df, columns = ['GI illness','SALAD','RIGATONI','ROAST BEEF','CHICKEN','POTATOES',
'GREEN BEANS','CAKE','COFFEE','WATER','SODA'])
df_res.head()
#We are to find out the guests who consumed each food and drinks item at the picnic
df_eachfood=df_res.loc[(df['SALAD']=='Y') & (df['RIGATONI']=='Y') & (df['ROAST BEEF']=='Y')
& (df['CHICKEN']=='Y') & (df['POTATOES']=='Y')& (df['GREEN BEANS']=='Y')&
(df['CAKE']=='Y') & (df['COFFEE']=='Y') & (df['WATER']=='Y') & (df['SODA']=='Y')]
df_eachfood.head()
number_of_ill_each = df_eachfood['GI illness'][df_eachfood['GI illness']=='Y'].count()
total_numbereach= df_eachfood.shape[0]
print(number_of_ill_each)
print(total_numbereach)
Attack_Rate_EachFood= number_of_ill_each/total_numbereach
print(Attack_Rate_EachFood)
#We are to find out the guests who didn't consumed each food and drinks item at the picnic
df_noteach=df_res.drop(df_eachfood.index,axis=0)
df_noteach #This dataframe contains the guests who didn't consumed each food and drinks item at the picnic
number_of_ill_noteach = df_noteach['GI illness'][df_noteach['GI illness']=='Y'].count()
total_numbernoteach= df_noteach.shape[0]
Attack_Rate_NotEachFood= number_of_ill_noteach/total_numbernoteach
print(Attack_Rate_NotEachFood)
if
###Output
_____no_output_____
###Markdown
###Code
### install necessary packages if in colab
def run_subprocess_command(cmd):
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
for line in process.stdout:
print(line.decode().strip())
import sys, subprocess
IN_COLAB = "google.colab" in sys.modules
colab_requirements = ["pip install tf-nightly-gpu-2.0-preview==2.0.0.dev20190513"]
if IN_COLAB:
for i in colab_requirements:
run_subprocess_command(i)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
%matplotlib inline
from IPython import display
import pandas as pd
print(tf.__version__)
TRAIN_BUF=60000
BATCH_SIZE=512
TEST_BUF=10000
DIMS = (28,28,1)
N_TRAIN_BATCHES =int(TRAIN_BUF/BATCH_SIZE)
N_TEST_BATCHES = int(TEST_BUF/BATCH_SIZE)
# load dataset
(train_images, _), (test_images, _) = tf.keras.datasets.fashion_mnist.load_data()
# split dataset
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype(
"float32"
) / 255.0
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype("float32") / 255.0
# batch datasets
train_dataset = (
tf.data.Dataset.from_tensor_slices(train_images)
.shuffle(TRAIN_BUF)
.batch(BATCH_SIZE)
)
test_dataset = (
tf.data.Dataset.from_tensor_slices(test_images)
.shuffle(TEST_BUF)
.batch(BATCH_SIZE)
)
class GAN(tf.keras.Model):
""" a basic GAN class
Extends:
tf.keras.Model
"""
def __init__(self, **kwargs):
super(GAN, self).__init__()
self.__dict__.update(kwargs)
self.gen = tf.keras.Sequential(self.gen)
self.disc = tf.keras.Sequential(self.disc)
def generate(self, z):
return self.gen(z)
def discriminate(self, x):
return self.disc(x)
def compute_loss(self, x):
""" passes through the network and computes loss
"""
# generating noise from a uniform distribution
z_samp = tf.random.normal([x.shape[0], 1, 1, self.n_Z])
# run noise through generator
x_gen = self.generate(z_samp)
# discriminate x and x_gen
logits_x = self.discriminate(x)
logits_x_gen = self.discriminate(x_gen)
### losses
# losses of real with label "1"
disc_real_loss = gan_loss(logits=logits_x, is_real=True)
# losses of fake with label "0"
disc_fake_loss = gan_loss(logits=logits_x_gen, is_real=False)
disc_loss = disc_fake_loss + disc_real_loss
# losses of fake with label "1"
gen_loss = gan_loss(logits=logits_x_gen, is_real=True)
return disc_loss, gen_loss
def compute_gradients(self, x):
""" passes through the network and computes loss
"""
### pass through network
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
disc_loss, gen_loss = self.compute_loss(x)
# compute gradients
gen_gradients = gen_tape.gradient(gen_loss, self.gen.trainable_variables)
disc_gradients = disc_tape.gradient(disc_loss, self.disc.trainable_variables)
return gen_gradients, disc_gradients
def apply_gradients(self, gen_gradients, disc_gradients):
self.gen_optimizer.apply_gradients(
zip(gen_gradients, self.gen.trainable_variables)
)
self.disc_optimizer.apply_gradients(
zip(disc_gradients, self.disc.trainable_variables)
)
@tf.function
def train(self, train_x):
gen_gradients, disc_gradients = self.compute_gradients(train_x)
self.apply_gradients(gen_gradients, disc_gradients)
def gan_loss(logits, is_real=True):
"""Computes standard gan loss between logits and labels
"""
if is_real:
labels = tf.ones_like(logits)
else:
labels = tf.zeros_like(logits)
return tf.compat.v1.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits
)
N_Z = 64
generator = [
tf.keras.layers.Dense(units=7 * 7 * 64, activation="relu"),
tf.keras.layers.Reshape(target_shape=(7, 7, 64)),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME", activation="sigmoid"
),
]
discriminator = [
tf.keras.layers.InputLayer(input_shape=DIMS),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation="relu"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1, activation=None),
]
# optimizers
gen_optimizer = tf.keras.optimizers.Adam(0.001, beta_1=0.5)
disc_optimizer = tf.keras.optimizers.RMSprop(0.005)# train the model
# model
model = GAN(
gen = generator,
disc = discriminator,
gen_optimizer = gen_optimizer,
disc_optimizer = disc_optimizer,
n_Z = N_Z
)
# exampled data for plotting results
def plot_reconstruction(model, nex=8, zm=2):
samples = model.generate(tf.random.normal(shape=(BATCH_SIZE, N_Z)))
fig, axs = plt.subplots(ncols=nex, nrows=1, figsize=(zm * nex, zm))
for axi in range(nex):
axs[axi].matshow(
samples.numpy()[axi].squeeze(), cmap=plt.cm.Greys, vmin=0, vmax=1
)
axs[axi].axis('off')
plt.show()
# a pandas dataframe to save the loss information to
losses = pd.DataFrame(columns = ['disc_loss', 'gen_loss'])
n_epochs = 50
for epoch in range(n_epochs):
# train
for batch, train_x in tqdm(
zip(range(N_TRAIN_BATCHES), train_dataset), total=N_TRAIN_BATCHES
):
model.train(train_x)
# test on holdout
loss = []
for batch, test_x in tqdm(
zip(range(N_TEST_BATCHES), test_dataset), total=N_TEST_BATCHES
):
loss.append(model.compute_loss(train_x))
losses.loc[len(losses)] = np.mean(loss, axis=0)
# plot results
display.clear_output()
print(
"Epoch: {} | disc_loss: {} | gen_loss: {}".format(
epoch, losses.disc_loss.values[-1], losses.gen_loss.values[-1]
)
)
plot_reconstruction(model)
###Output
_____no_output_____
###Markdown
###Code
from google.colab import files
uploaded = files.upload()
import io
import pandas as pd
df = pd.read_csv(io.StringIO(uploaded['์ค์์ _์ฃฝ๋ นํฐ๋(๋
ธ๋ฉด์จ๋).csv'].decode('cp949')))
df.head()
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.values
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, :-1], test[i, -1]
# fit model on history and make a prediction
yhat = random_forest_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f, predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = mean_absolute_error(test[:, -1], predictions)
return error, test[:, 1], predictions
def random_forest_forecast(train, testX):
# transform list into array
train = asarray(train)
# split into input and output columns
trainX, trainy = train[:, :-1], train[:, -1]
# fit model
model = RandomForestRegressor(n_estimators=1000)
model.fit(trainX, trainy)
# make a one-step prediction
yhat = model.predict([testX])
return yhat[0]
df.head()
df.์์ง์ผ์
index = df.index
columns = df.columns
data = df.values
index
columns
data
type(index)
type(columns)
type(data)
issubclass(pd.RangeIndex, pd.Index)
index.values
columns.values
df.dtypes
df.๋
ธ์
type(df['๋
ธ์ '])
director = df['๋
ธ์ ']
director.name
director.to_frame()
s_attr_methonds = set(dir(pd.Series))
len(s_attr_methonds)
df_attr_methods = set(dir(pd.DataFrame))
len(df_attr_methods)
len(s_attr_methonds & df_attr_methods )
###Output
_____no_output_____
###Markdown
###Code
#Import scikit-learn dataset library
from sklearn import datasets
#load datasets
wine=datasets.load_wine()
#print the names of the 13 features
print("Features: ", wine.feature_names)
#print the label type of wine(class_0, class_1, class_2)
print("Labels: ", wine.target_names)
#Import train_test_split
from sklearn.model_selection import train_test_split
X = wine.data
y = wine.target
X_train, X_test, y_train, y_test=train_test_split(X, y, test_size=0.3, random_state=109) # 70% training and 30% test
#Import Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
#Create a Gaussian Classifier
gnb=GaussianNB()
#Train the model using the training sets
gnb.fit(X_train, y_train)
#Predict the response for test dataset
y_pred = gnb.predict(X_test)
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
#Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
###Output
Accuracy: 0.9074074074074074
###Markdown
###Code
!pip install python-docx
!pip install beautifulsoup4
!pip install mammoth
import mammoth
from bs4 import BeautifulSoup
with open("PwC Brief.docx", "rb") as docx_file:
result = mammoth.convert_to_html(docx_file,ignore_empty_paragraphs=False)
html = result.value
soup = BeautifulSoup(html, 'lxml')
strong
html
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'lxml')
strong
import spacy, re
from spacy.matcher import Matcher
from spacy.tokenizer import Tokenizer
from spacy import displacy
def custom_tokenizer(nlp, infix_reg):
"""
Function to return a customized tokenizer based on the infix regex
PARAMETERS
----------
nlp : Language
A Spacy language object with loaded model
infix_reg : relgular expression object
The infix regular expression object based on which the tokenization is to be
carried out.
RETURNS
-------
Tokenizer : Tokenizer object
The Spacy tokenizer obtained based on the infix regex.
"""
return Tokenizer(nlp.vocab, infix_finditer = infix_reg.finditer)
#!python -m spacy download de_core_news_md
#import de_core_news_md
import spacy
from spacy import displacy
from spacy.scorer import Scorer
from spacy.pipeline import EntityRuler
import re
spacy.prefer_gpu() # or spacy.require_gpu()
nlp = spacy.blank('de')
infix_re = re.compile(r'''[-/,.]''')
nlp.tokenizer = custom_tokenizer(nlp, infix_re)
#nlp = de_core_news_md.load()
#test doc
ruler = EntityRuler(nlp)
patternd = [{'IS_ALPHA': True},{"IS_SPACE": False, "OP": "?"},{"LOWER": 'platz'.lower()}]
patterns = [{"label": "DATE", "pattern": [{'LIKE_NUM': True},{'ORTH': '/' ,'OP': '?'}, {'ORTH': '.', 'OP': '?'},{'ORTH': '-', 'OP': '?'}, {'LIKE_NUM': True}, {'ORTH': '/' ,'OP': '?'}, {'ORTH': '.' ,'OP': '?'},{'ORTH': '-' ,'OP': '?'},{'LIKE_NUM': True}]},
{"label":"LOC", "pattern":[{"LOWER":{"REGEX": "straรe([a-zA-Z]*)"}}]},
{"label":"LOC", "pattern":[{"LOWER":{"REGEX": "platz([a-zA-Z]*)"}}]},
{"label":"LOC", "pattern":[{"LOWER":{"REGEX": "gasse([a-zA-Z]*)"}}]},
{"label":"LOC", "pattern":[{"LOWER":{"REGEX": "stadt([a-zA-Z]*)"}}]},
{"label":"LOC", "pattern":[{"LOWER":{"REGEX": "bundesland([a-zA-Z]*)"}}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "mag([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "dipl([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "dr([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "ing([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "habil([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "frau([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
{"label":"PER", "pattern":[{"LOWER":{"REGEX": "herr([a-zA-Z/.]*)"}},{'ORTH': '.', 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'},{"TEXT":{"REGEX": "[A-Z]"}, 'OP': '?'}]},
]
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
!pip install docx2txt
html_in.replace("</br></br></br>","").replace("</br></br>","</br>")
import docx2txt
# extract text
text = docx2txt.process("PwC Brief.docx")
doc = nlp(text)
html_in = displacy.render(doc,style='ent')
import IPython
IPython.display.HTML(html_in.replace("</br></br>","</br>").replace("</br></br>","</br>"))
# Write HTML String to file.html
#with open("PwC Brief.html", "w") as file:
#file.write(html_in)
text
html_in
import docx
import mammoth
doc_name = 'PwC Brief.docx'
doc = docx.Document(doc_name)
indexes =[]
right_alig_ind =[]
bold_ind =[]
html_in = ''
for para_id, para in enumerate(doc.paragraphs):
# make a sentence
#if len(para.text)>0:
indexes.append(para_id)
if para.alignment:
print(para_id, para, para.alignment)
right_alig_ind.append(para)
html_in += para.text
#doc = nlp(para.text)
#html_in += displacy.render(doc,style='ent')
html_in += '\n'
else:
#for rn in para.runs:
#doc = nlp(para.text)
html_in += para.text
#html_in += displacy.render(doc,style='ent')
html_in += '\n'
print(para_id, para.style.name)
if para.style.name == "UNamec":
bold_ind.append(para)
#for para_id, para in enumerate(doc.sections):
# make a sentence
#if len(para.text)>0:
# print(para_id, para)
doc = nlp(html_in)
html_out = displacy.render(doc,style='ent',page= True)
#html_out = html_out.replace("</br></br>","</br>")
#result = HTML_WRAPPER.format(html_out)
bold_ind[5].text
import IPython
hto = html_out.replace("</br></br>","</br>").replace('</br>\t','</br> ')
IPython.display.HTML(hto)
import mammoth
from bs4 import BeautifulSoup
with open("PwC Brief.docx", "rb") as docx_file:
result = mammoth.convert_to_html(docx_file,ignore_empty_paragraphs=False)
html = result.value
soup = BeautifulSoup(html, 'lxml')
list_of_bold = soup.findAll('strong')
for pr in right_alig_ind:
hto = hto.replace(pr.text, '<div style="text-align: right">{}</div>'.format(pr.text),1)
for pr in list_of_bold:
hto = hto.replace(pr.text, '<strong>{}</strong>'.format(pr.text), 1)
import xml.etree.ElementTree as ET
import zipfile as zf
import re
z = zf.ZipFile("PwC Brief2.docx")
f = z.open("word/document.xml") # a file-like object
nsmap = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}
def qn(tag):
"""
Stands for 'qualified name', a utility function to turn a namespace
prefixed tag name into a Clark-notation qualified tag name for lxml. For
example, ``qn('p:cSld')`` returns ``'{http://schemas.../main}cSld'``.
Source: https://github.com/python-openxml/python-docx/
"""
prefix, tagroot = tag.split(':')
uri = nsmap[prefix]
return '{{{}}}{}'.format(uri, tagroot)
def xml2text(xml):
"""
A string representing the textual content of this run, with content
child elements like ``<w:tab/>`` translated to their Python
equivalent.
Adapted from: https://github.com/python-openxml/python-docx/
"""
check_next = False
text = u''
root = ET.fromstring(xml)
for child in root.iter():
if child.tag == qn('w:b'):
check_next = True
print('check')
#for it in child.items():
print(''.join(child.iter().itertext()))
if child.tag == qn('w:t'):
#if check_next:
#print(child.text)
#check_next = False
t_text = child.text
text += t_text if t_text is not None else ''
elif child.tag == qn('w:tab'):
text += '\t'
elif child.tag in (qn('w:br'), qn('w:cr')):
text += '\n'
elif child.tag == qn("w:p"):
text += '\n\n'
return text
header = u''
# unzip the docx in memory
zipf = zf.ZipFile("PwC Brief5.docx")
filelist = zipf.namelist()
# get header text
# there can be 3 header files in the zip
header_xmls = 'word/header[0-9]*.xml'
for fname in filelist:
if re.match(header_xmls, fname):
header += xml2text(zipf.read(fname))
# get main text
text = u''
doc_xml = 'word/document.xml'
text += xml2text(zipf.read(doc_xml))
text
#header
qn('w:jc')
IPython.display.HTML(header.replace('\n\n\n\n\n\n\n\n\n\n','').replace('\n','</br>') + '</br>' + hto+ '</br>')
!pip install docx2python
import mammoth
from docx import Document
from docx2python import docx2python
from docx2python.iterators import iter_paragraphs
# one header one footer
doc = docx2python("PwC Brief.docx")
header = ''.join(iter_paragraphs(doc.header))
footer = ''.join(iter_paragraphs(doc.footer))
print("header: ", header)
print("footer: ", footer)
header.encode('utf-8')
header
for header in header:
file.write(header.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.write(text)
""" add footers """
for footer in footer3:
file.write(footer.encode('utf-8'))
file.write('\n'.encode('utf-8'))
from bs4 import BeautifulSoup
#html = document.value
soup = BeautifulSoup(hto,"html")
import IPython
IPython.display.HTML(str(soup))
str(soup)
!pip install docx2python
from docx2python import docx2python
# one header one footer
doc3 = docx2python("PwC Brief.docx",html=True)
doc3.header
docx2python.iterators.get_html_map(doc3.document)
import IPython
IPython.display.HTML(soup.prettify())
import IPython
IPython.display.HTML(html_out.replace("</br></br>","</br>"))
print(result)
# Write HTML String to file.html
with open("PwC Brief.html", "w") as file:
file.write(result)
from bs4 import BeautifulSoup
#html = document.value
soup = BeautifulSoup(html_out, 'lxml')
print(html_out)
import IPython
IPython.display.HTML(text)
def label_par(par,html_out):
soup_S = BeautifulSoup(html_out)
p1 = soup_S.find('body')
s = p1.find_next('div')
labels = s.find_all('mark')
newcont = str(par)
print(labels)
if len(labels) >0:
for lb in labels:
for c in par.contents:
if lb.contents[0].strip() in c:
#c.(lb.contents[0].strip(),lb)
print(lb.contents[0].strip())
newcont = newcont.replace(lb.contents[0].strip(), str(lb))
newconthtml = BeautifulSoup(newcont).find('body').contents[0]
par.string = ''
par.append(newconthtml)
return par
#options = get_entity_options()
for par_i, par in enumerate(soup.findAll('p')):
print(par_i,par.getText())
doc = nlp(par.getText())
html_out = displacy.render(doc, style='ent')
par = label_par(par,html_out)
#print(html_out)
#par.replace_with()
#IPython.display.HTML(html_out)
#soup_S = BeautifulSoup(html_out)
#for match in soup_S.findAll('div'):
#match.replaceWithChildren()
###Output
_____no_output_____
###Markdown
###Code
import torch
###Output
_____no_output_____
###Markdown
Table of Contents
###Code
import geopandas
###Output
_____no_output_____
###Markdown
Data Exploration
###Code
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
import sklearn.metrics as metrics
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.datasets import make_classification
import itertools
from sklearn.feature_extraction.text import HashingVectorizer
from scipy.sparse import csr_matrix
# Import `fake_or_real_news.csv`
df = pd.read_csv("https://s3.amazonaws.com/assets.datacamp.com/blog_assets/fake_or_real_news.csv")
# Inspect shape of `df`
df.shape
# Print first lines of `df`
df.head()
# Set index
df = df.set_index("Unnamed: 0")
# Print first lines of `df`
df.head()
###Output
_____no_output_____
###Markdown
Extracting the training data
###Code
# Set `y`
y = df.label
# Drop the `label` column
df.drop("label", axis=1)
# Make training and test sets
X_train, X_test, y_train, y_test = train_test_split(df['text'], y, test_size=0.33, random_state=53)
###Output
_____no_output_____
###Markdown
Building Vectorizer **Classifiers** **bold text**Now that you have your training and testing data, you can build your classifiers. To get a good idea if the words and tokens in the articles had a significant impact on whether the news was fake or real, you begin by using CountVectorizer and TfidfVectorizer.Youโll see the example has a max threshhold set at .7 for the TF-IDF vectorizer tfidf_vectorizer using the max_df argument. This removes words which appear in more than 70% of the articles. Also, the built-in stop_words parameter will remove English stop words from the data before making vectors.There are many more parameters available and you can read all about them in the scikit-learn documentation for TfidfVectorizer and CountVectorizer.
###Code
# Initialize the `count_vectorizer`
count_vectorizer = CountVectorizer(stop_words='english')
# Fit and transform the training data
count_train = count_vectorizer.fit_transform(X_train)
# Transform the test set
count_test = count_vectorizer.transform(X_test)
# Initialize the `tfidf_vectorizer`
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
# Fit and transform the training data
tfidf_train = tfidf_vectorizer.fit_transform(X_train)
# Transform the test set
tfidf_test = tfidf_vectorizer.transform(X_test)
###Output
_____no_output_____
###Markdown
Now that you have vectors, you can then take a look at the vector features, stored in count_vectorizer and tfidf_vectorizer.Are there any noticable issues? (Yes!)There are clearly comments, measurements or other nonsensical words as well as multilingual articles in the dataset that you have been using. Normally, you would want to spend more time preprocessing this and removing noise, but as this tutorial just showcases a small proof of concept, you will see if the model can overcome the noise and properly classify despite these issues.
###Code
# Get the feature names of `tfidf_vectorizer`
print(tfidf_vectorizer.get_feature_names()[-10:])
# Get the feature names of `count_vectorizer`
print(count_vectorizer.get_feature_names()[:10])
###Output
['ุญูุจ', 'ุนุฑุจู', 'ุนู', 'ูู
', 'ู
ุง', 'ู
ุญุงููุงุช', 'ู
ู', 'ูุฐุง', 'ูุงูู
ุฑุถู', 'เธขเธade']
['00', '000', '0000', '00000031', '000035', '00006', '0001', '0001pt', '000ft', '000km']
###Markdown
Intermezzo: Count versus TF-IDF FeaturesI was curious if my count and TF-IDF vectorizers had extracted different tokens. To take a look and compare features, you can extract the vector information back into a DataFrame to use easy Python comparisons.As you can see by running the cells below, both vectorizers extracted the same tokens, but obviously have different weights. Likely, changing the max_df and min_df of the TF-IDF vectorizer could alter the result and lead to different features in each.
###Code
count_df = pd.DataFrame(count_train.A, columns=count_vectorizer.get_feature_names())
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vectorizer.get_feature_names())
difference = set(count_df.columns) - set(tfidf_df.columns)
difference
print(count_df.equals(tfidf_df))
count_df.head()
tfidf_df.head()
###Output
_____no_output_____
###Markdown
Comparing ModelsNow it's time to train and test your models.Here, you'll begin with an NLP favorite, MultinomialNB. You can use this to compare TF-IDF versus bag-of-words. My intuition was that bag-of-words (aka CountVectorizer) would perform better with this model. (For more reading on multinomial distribution and why it works best with integers, check out this fairly succinct explanation from a UPenn statistics course).I personally find Confusion Matrices easier to compare and read, so I used the scikit-learn documentation to build some easily-readable confusion matrices (thanks open source!). A confusion matrix shows the proper labels on the main diagonal (top left to bottom right). The other cells show the incorrect labels, often referred to as false positives or false negatives. Depending on your problem, one of these might be more significant. For example, for the fake news problem, is it more important that we don't label real news articles as fake news? If so, we might want to eventually weight our accuracy score to better reflect this concern.
###Code
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
See full source and example:
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
clf = MultinomialNB()
clf.fit(tfidf_train, y_train)
pred = clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
clf = MultinomialNB()
clf.fit(count_train, y_train)
pred = clf.predict(count_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
linear_clf = PassiveAggressiveClassifier()
linear_clf.fit(tfidf_train, y_train)
pred = linear_clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
clf = MultinomialNB(alpha=0.1)
last_score = 0
for alpha in np.arange(0,1,.1):
nb_classifier = MultinomialNB(alpha=alpha)
nb_classifier.fit(tfidf_train, y_train)
pred = nb_classifier.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
if score > last_score:
clf = nb_classifier
print("Alpha: {:.2f} Score: {:.5f}".format(alpha, score))
def most_informative_feature_for_binary_classification(vectorizer, classifier, n=100):
"""
See: https://stackoverflow.com/a/26980472
Identify most important features if given a vectorizer and binary classifier. Set n to the number
of weighted features you would like to show. (Note: current implementation merely prints and does not
return top classes.)
"""
class_labels = classifier.classes_
feature_names = vectorizer.get_feature_names()
topn_class1 = sorted(zip(classifier.coef_[0], feature_names))[:n]
topn_class2 = sorted(zip(classifier.coef_[0], feature_names))[-n:]
for coef, feat in topn_class1:
print(class_labels[0], coef, feat)
print()
for coef, feat in reversed(topn_class2):
print(class_labels[1], coef, feat)
most_informative_feature_for_binary_classification(tfidf_vectorizer, linear_clf, n=30)
feature_names = tfidf_vectorizer.get_feature_names()
### Most real
sorted(zip(clf.coef_[0], feature_names), reverse=True)[:20]
### Most fake
sorted(zip(clf.coef_[0], feature_names))[:20]
tokens_with_weights = sorted(list(zip(feature_names, clf.coef_[0])))
hash_vectorizer = HashingVectorizer(stop_words='english')
hash_train = hash_vectorizer.fit_transform(X_train)
hash_test = hash_vectorizer.transform(X_test)
clf = PassiveAggressiveClassifier()
clf.fit(hash_train, y_train)
pred = clf.predict(hash_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred, labels=['FAKE', 'REAL'])
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
###Output
_____no_output_____
###Markdown
###Code
class Usuario():
_nomeUsuario = ""
def settNomeUsuario(self, nomeUsuario):
self._nomeUsuario = nomeUsuario
def getNomeUsuario(self):
return self._nomeUsuario
class Admin(Usuario):
def escrevaNome(self):
return "ADM"
def digaOla(self):
return "Ola ADM, "+ self.getNomeUsuario()
admin1 = Admin()
admin1.settNomeUsuario("Baltazar")
print(admin1.digaOla())
###Output
Ola ADM, Baltazar
|
docker/65896179-get-exposed-port-from-within-docker-compose/DOCS.ipynb | ###Markdown
Question [[link](https://stackoverflow.com/questions/65896179/get-exposed-port-from-within-docker-compose65896179)] Get exposed port from within docker-compose Answer
###Code
!cat dockerfile
!cat docker-compose.yml
###Output
version: "3.9"
services:
server:
build: .
ports:
- "8086"
environment:
- PORT=8086
###Markdown
[!]No `HOST_PORT` has been provided in the `docker-compose.yml` file above.
###Code
!docker-compose up
###Output
Building with native build. Learn about native build in Compose here: https://docs.docker.com/go/compose-native-build/
Building server
Sending build context to Docker daemon 38.4kB
Step 1/10 : FROM python:3.8
---> 4d53664a7025
Step 2/10 : RUN python -m pip install --upgrade pip
---> Using cache
---> 9a549e8b7b16
Step 3/10 : RUN pip install pipenv
---> Using cache
---> a6144725fda4
Step 4/10 : COPY Pipfile* /app/
---> Using cache
---> dee26770a0c6
Step 5/10 : RUN cd /app && pipenv lock --keep-outdated --requirements > requirements.txt
---> Using cache
---> 6ec6eef050f8
Step 6/10 : RUN pip install -r /app/requirements.txt
---> Using cache
---> 91e654cb1802
Step 7/10 : COPY . /app/src
---> aa2a5a9da395
Step 8/10 : WORKDIR /app/src
---> Running in dab674cdaa8d
Removing intermediate container dab674cdaa8d
---> 9ca4edcc70cf
Step 9/10 : EXPOSE ${PORT}
---> Running in fe3270892dcf
Removing intermediate container fe3270892dcf
---> 1ccbbad053cf
Step 10/10 : CMD uwsgi --http :${PORT} --wsgi-file server.py --callable main
---> Running in a66c32531bdd
Removing intermediate container a66c32531bdd
---> 6914fdab68cb
Successfully built 6914fdab68cb
Successfully tagged 65896179-get-exposed-port-from-within-docker-compose_server:latest
[33mWARNING[0m: Image for service server was built because it did not already exist. To rebuild this image you must use `docker-compose build` or `docker-compose up --build`.
Creating 65896179-get-exposed-port-from-within-docker-compose_server_1 ...
[1BAttaching to 65896179-get-exposed-port-from-within-docker-compose_server_12mdone[0m
[36mserver_1 |[0m *** Starting uWSGI 2.0.19.1 (64bit) on [Tue Jan 26 06:12:47 2021] ***
[36mserver_1 |[0m compiled with version: 8.3.0 on 26 January 2021 06:08:33
[36mserver_1 |[0m os: Linux-5.4.0-64-generic #72-Ubuntu SMP Fri Jan 15 10:27:54 UTC 2021
[36mserver_1 |[0m nodename: de0120c61c1e
[36mserver_1 |[0m machine: x86_64
[36mserver_1 |[0m clock source: unix
[36mserver_1 |[0m pcre jit disabled
[36mserver_1 |[0m detected number of CPU cores: 16
[36mserver_1 |[0m current working directory: /app/src
[36mserver_1 |[0m detected binary path: /usr/local/bin/uwsgi
[36mserver_1 |[0m uWSGI running as root, you can use --uid/--gid/--chroot options
[36mserver_1 |[0m *** WARNING: you are running uWSGI as root !!! (use the --uid flag) ***
[36mserver_1 |[0m *** WARNING: you are running uWSGI without its master process manager ***
[36mserver_1 |[0m your processes number limit is 126513
[36mserver_1 |[0m your memory page size is 4096 bytes
[36mserver_1 |[0m detected max file descriptor number: 1048576
[36mserver_1 |[0m lock engine: pthread robust mutexes
[36mserver_1 |[0m thunder lock: disabled (you can enable it with --thunder-lock)
[36mserver_1 |[0m uWSGI http bound on :8086 fd 4
[36mserver_1 |[0m spawned uWSGI http 1 (pid: 9)
[36mserver_1 |[0m uwsgi socket 0 bound to TCP address 127.0.0.1:33619 (port auto-assigned) fd 3
[36mserver_1 |[0m uWSGI running as root, you can use --uid/--gid/--chroot options
[36mserver_1 |[0m *** WARNING: you are running uWSGI as root !!! (use the --uid flag) ***
[36mserver_1 |[0m Python version: 3.8.7 (default, Jan 12 2021, 17:06:28) [GCC 8.3.0]
[36mserver_1 |[0m *** Python threads support is disabled. You can enable it with --enable-threads ***
[36mserver_1 |[0m Python main interpreter initialized at 0x55a43c32d5c0
[36mserver_1 |[0m uWSGI running as root, you can use --uid/--gid/--chroot options
[36mserver_1 |[0m *** WARNING: you are running uWSGI as root !!! (use the --uid flag) ***
[36mserver_1 |[0m your server socket listen backlog is limited to 100 connections
[36mserver_1 |[0m your mercy for graceful operations on workers is 60 seconds
[36mserver_1 |[0m mapped 72920 bytes (71 KB) for 1 cores
[36mserver_1 |[0m *** Operational MODE: single process ***
[36mserver_1 |[0m WSGI app 0 (mountpoint='') ready in 0 seconds on interpreter 0x55a43c32d5c0 pid: 8 (default app)
[36mserver_1 |[0m uWSGI running as root, you can use --uid/--gid/--chroot options
[36mserver_1 |[0m *** WARNING: you are running uWSGI as root !!! (use the --uid flag) ***
[36mserver_1 |[0m *** uWSGI is running in multiple interpreter mode ***
[36mserver_1 |[0m spawned uWSGI worker 1 (and the only) (pid: 8, cores: 1)
^C
Gracefully stopping... (press Ctrl+C again to force)
Stopping 65896179-get-exposed-port-from-within-docker-compose_server_1 ...
###Markdown
[!]As seen above, docker-compose can build the container just fine.But as shown below, the container is assigned a random `HOST_PORT`, in this case `49157`.
###Code
!docker ps
!cat docker-compose.yml
###Output
version: "3.9"
services:
server:
build: .
ports:
- "8086:8086"
environment:
- PORT=8086
###Markdown
[!]We can solve this by providing a `HOST_PORT` as described in https://docs.docker.com/compose/networking/
###Code
!docker-compose up
!docker ps
!curl localhost:8086
###Output
Hello World!
###Markdown
[!]Alternatively, if the `--scale` flag is to be used and multiple ports have to be chosen one can use a range of ports as `HOST_PORT` such as `8000-8004:8`
###Code
!docker-compose up --scale server=4
!docker ps
!curl localhost:8000
!curl localhost:8001
!curl localhost:8002
!curl localhost:8003
###Output
Hello World! |
diploma/Poissons on the original distrib.ipynb | ###Markdown
Outlier Factors for Device Profiling
###Code
import pandas as pd
import numpy as np
%matplotlib inline
time_epoch = 60
epochs_per_batch = 50
# hard coded nrows
df_all = pd.read_csv('../../../diploma/multi-source-syber-security-events/flows.txt', header=None, nrows=500000)
df_all.columns = ['time', 'duration', 'source computer', 'source port', 'destination computer',
'destination port', 'protocol', 'packet count', 'byte count']
df = df_all[df_all['time'] <= epochs_per_batch * time_epoch]
df.index = df['time']
df.drop(columns=['time'],inplace=True)
df.head()
# get all the host in the buckets we are interested in
hosts = np.array(list(set(df_all[df_all['time'] <= epochs_per_batch * time_epoch * 2]['source computer'].values)))
def group_scale_data(df, size_of_bin_seconds=60, addZeros=True, hosts=None, verbose=0):
"""
:param size_of_bin_seconds: the time period of each bin,
assumes the dataframe has a column names 'source computer' and a name 'byte count'
:param addZeros: add values (0, 0) where no data has been received for this bucket
:return: a dictionary containing for each host the features, the hosts
"""
if hosts is None:
hosts = np.array(list(set(df['source computer'].values)))
bins = np.arange(df.index.min(), df.index.max() + size_of_bin_seconds + 1, size_of_bin_seconds)
groups = df[['byte count','source computer']].groupby([np.digitize(df.index, bins),'source computer'])
data = groups.count()
data.columns = ['number of flows']
data['mean(byte count)'] = groups.mean().values
data_reset = data.reset_index()
if verbose > 0:
print('A total of', len(bins) - 1, 'time epochs have been encountered')
len_hosts = len(hosts)
intervals = int(len_hosts / 20)
i = 0
if addZeros:
for host in hosts:
if verbose > 0 and i % intervals == 0:
print('Done with', i, 'hosts out of', len_hosts)
i += 1
for bin_i in range(1,len(bins)):
if (bin_i, host) not in data.index:
new_row = [bin_i, host, 0.0, 0.0]
data_reset = data_reset.append(pd.DataFrame([new_row], columns=data_reset.columns), ignore_index=True )
groupped_data = pd.DataFrame(data_reset.values[:,2:], columns=['number of flows', 'mean(byte count)'])
groupped_data['epoch'] = data_reset['level_0']
groupped_data['source computer'] = data_reset['source computer']
# set parameters for next acquisition of data
parameters = {}
parameters['size_of_bin_seconds'] = size_of_bin_seconds
parameters['hosts'] = hosts
groupped_data = groupped_data.sample(frac=1)
groupped_data['mean(byte count)'] = groupped_data['mean(byte count)'].values.astype(int)
groupped_data['number of flows'] = groupped_data['number of flows'].values.astype(int)
return groupped_data.sort_values(by=['epoch']), hosts, parameters
groupped_data, hosts, parameters = group_scale_data(df, size_of_bin_seconds=time_epoch,
addZeros=True, verbose=1, hosts=hosts)
groupped_data.head(5)
print(np.max(groupped_data['number of flows'].values))
print(np.max(groupped_data['mean(byte count)'].values))
from emClustering import OnlineEM
from plots import plot_points, plot_results, plot_category, plot_all_categories
from kplusplus import KPlusPlus
import numpy as np
from sklearn.externals import joblib
#joblib.dump(groupped_data,'groupped_data_60_50.pkl')
groupped_data = joblib.load('groupped_data_60_50.pkl')
print(np.sum(groupped_data['mean(byte count)'] == 0))
print(len(groupped_data['mean(byte count)']))
print(np.max(groupped_data['mean(byte count)']))
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1000))
scaler.fit([[x] for x in groupped_data['mean(byte count)'].values])
groupped_data['mean(byte count)'] = scaler.transform([[x] for x in groupped_data['mean(byte count)'].values]).astype(int)
np.sum(groupped_data['mean(byte count)'] == 0)
from pylab import rcParams
from matplotlib import pyplot as plt
import numpy as np
from collections import Counter
import matplotlib.patches as mpatches
colors = ['blue', 'red', 'green', 'yellow']
styles = ['-','--',':','-.']
rcParams['font.size'] = 16
def plot_points(data, em=None):
rcParams['figure.figsize'] = 16, 9
data_hashable = [tuple(x) for x in data]
total_points = len(data_hashable)
values = np.vstack([list(x) for x in list(Counter(data_hashable).keys())])
counts = np.array(list(Counter(data_hashable).values()))
for i in range(len(values)):
plt.scatter(values[i][0], values[i][1], s=20, color='blue')
if em:
for i, lambda_i in enumerate(em.lambdas):
plt.scatter(lambda_i[0], lambda_i[1], s=em.gammas[i]*1000, linewidth=4, color='red', marker='x')
blue_patch = mpatches.Patch(color='blue', label='Data points')
red_patch = mpatches.Patch(color='red', label='Centers of Distribution')
plt.legend(handles=[red_patch, blue_patch], fontsize=18)
else:
blue_patch = mpatches.Patch(color='blue', label='Data points')
plt.legend(handles=[blue_patch], fontsize=18)
plt.ylabel('average number of bytes')
plt.xlabel('number of flows')
plt.show()
plot_points(groupped_data.values[:, :2])
test = groupped_data[groupped_data['number of flows'] > 1].copy()
small_points = test[(test['mean(byte count)'] < 1000) & (test['number of flows'] < 30)].values[:, :2]
plot_points(small_points)
len(small_points)
import scipy.stats.distributions
from math import log
import sys
import numpy as np
def poisson(x, l):
return_value = 1
for x_i, l_i in zip(x, l):
return_value *= scipy.stats.distributions.poisson.pmf(x_i, l_i)
if return_value == 0:
return sys.float_info.epsilon
return return_value
class EM:
def __init__(self, lambdas, gammas, x,
convergence_error=0.0001, verbose=0):
self.convergence_error = convergence_error
self.lambdas = np.vstack(lambdas)
self.gammas = np.array(gammas)
self.m = len(gammas)
self.x = x
def _calculate_probabilities(self):
for i, x_i in enumerate(self.x):
self.probabilities[i] = np.sum(self.gammas * np.array([poisson(x_i, lambda_i) for lambda_i in self.lambdas]))
def _calculate_likelihood(self):
# naive implementation for likelihood calculation
new_likelihood = 0
for x in self.x:
total_x = np.sum(self.gammas * np.array([poisson(x, lambda_i) for lambda_i in self.lambdas]))
new_likelihood = new_likelihood + log(total_x)
return new_likelihood
def _calculate_participation(self):
f = np.zeros(shape=(len(self.x), len(self.lambdas)))
for i, x_i in enumerate(self.x):
participation = self.gammas * np.array([poisson(x_i, lambda_i) for lambda_i in self.lambdas])
total_x = np.sum(participation)
if total_x == 0:
participation = np.array([1/self.m] * self.m)
total_x = 1
f[i] = participation / total_x
return f
def run(self):
previous_likelihood = self._calculate_likelihood()
while True:
f = self._calculate_participation()
temp_sum = f.sum(axis=0)
self.gammas = temp_sum / len(self.x)
temp = np.zeros(shape=(len(self.lambdas), len(self.lambdas[0])))
for i, x_i in enumerate(self.x):
temp = temp + np.vstack([x_i * f_i for f_i in f[i]])
self.lambdas = np.vstack([temp[i] / temp_i for i, temp_i in enumerate(temp_sum)])
new_likelihood = self._calculate_likelihood()
convergence = new_likelihood / previous_likelihood - 1
print('convergence', convergence)
if - self.convergence_error < convergence < self.convergence_error:
break
previous_likelihood = new_likelihood
em = EM([[0,0],[20, 500]], [0.6, 0.4] , small_points[:1000], convergence_error=0.1)
em.run()
print(em.gammas)
print(em.lambdas)
def real_poisson(x, l):
return_value = 1
for x_i, l_i in zip(x, l):
return_value *= scipy.stats.distributions.poisson.logpmf(x_i, l_i)
return return_value
print(real_poisson([100, 200], [0, 0]))
print(real_poisson([100, 200], [25, 2000]))
print(real_poisson([100, 200], [75, 2000]))
print(real_poisson([100, 200], [25, 2100000]))
print(real_poisson([100, 200], [75, 2100000]))
print(real_poisson([100, 2100000], [100, 2000000]))
scipy.stats.distributions.poisson.pmf(100, 1000)
mixtures = 3
kplusplus = KPlusPlus(mixtures, groupped_data.values[:, :2], stochastic=True, stochastic_n_samples=10000)
kplusplus.init_centers(verbose=1)
kplusplus.centers
test = int(len(set(groupped_data['source computer'].values)))
# random initialization
onlineEM = OnlineEM([1/mixtures]*mixtures, kplusplus.centers, test, n_clusters=8, verbose=1, update_power=0.5)
plot_points(groupped_data.values[:, :2], onlineEM)
data = groupped_data.values[:,[0,1,3]]
onlineEM.fit(data)
from pylab import rcParams
from matplotlib import pyplot as plt
import numpy as np
from collections import Counter
import matplotlib.patches as mpatches
colors = ['blue', 'red', 'green', 'yellow']
styles = ['-','--',':','-.']
rcParams['font.size'] = 16
def plot_points(data, em=None):
rcParams['figure.figsize'] = 16, 9
data_hashable = [tuple(x) for x in data]
total_points = len(data_hashable)
values = np.vstack([list(x) for x in list(Counter(data_hashable).keys())])
counts = np.array(list(Counter(data_hashable).values()))
for i in range(len(values)):
plt.scatter(values[i][0], values[i][1], s=20, color='blue')
if em:
for i, lambda_i in enumerate(em.lambdas):
plt.scatter(lambda_i[0], lambda_i[1], s=em.gammas[i]*1000, linewidth=4, color='red', marker='x')
blue_patch = mpatches.Patch(color='blue', label='Data points')
red_patch = mpatches.Patch(color='red', label='Centers of Distribution')
plt.legend(handles=[red_patch, blue_patch], fontsize=18)
else:
blue_patch = mpatches.Patch(color='blue', label='Data points')
plt.legend(handles=[blue_patch], fontsize=18)
plt.ylabel('average number of bytes')
plt.xlabel('number of flows')
plt.xlim([-5,35])
plt.ylim([-10000,1000000])
plt.show()
plot_points(groupped_data.values[:, :2], onlineEM)
print(onlineEM.lambdas)
print(onlineEM.gammas)
total = 0
for i in range(1):
total += np.sum(groupped_data['number of flows'] == i)
print(total)
np.sum(groupped_data['number of flows'] == 0)
###Output
_____no_output_____ |
mapping/zillow_data.ipynb | ###Markdown
Ok so the want here is to do a copule of things with the map. Specifically:- Bring in the Zillow data- Map rents across NYC- Layer on a subway map?
###Code
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt # Helps plot
import numpy as np # Numerical operations
import pyarrow as pa
import pyarrow.parquet as pq
import matplotlib.colors as colors
#import fiona # Needed for geopandas to run
import geopandas as gpd # this is the main geopandas
#from shapely.geometry import Point, Polygon # also needed
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
os.getcwd() + "\\zillow\\zillow_med_price"
df = pd.read_csv(os.getcwd() + "\\zillow\\Zip_ZriPerSqft_AllHomes.csv",encoding = "ISO-8859-1")
df.head()
###Output
_____no_output_____
###Markdown
One thing about the zillow dataset is that is what I would call "wide" in the sense that there are many columns, and most of these reflect the same obsrvation unit, but just different time periods. For a lot of reasons, it makes for sense to make this dataframe "long" in the sense that each row reflects a observation (place,time,value).Below is one way to do this by "melting it"
###Code
df = df.melt(id_vars = ["RegionID","RegionName","City","State","Metro","CountyName","SizeRank"
])
df.head()
###Output
_____no_output_____
###Markdown
Now what we have is a "long data set" where each row is a place, time, value. This facilitaties our ability to select on stuff
###Code
nyc_price = df[(df.City == "New York") & (df.variable == "2019-01")].copy()
nyc_price.shape
nyc_price.head()
nyc_price["ZIPCODE"] = nyc_price.RegionName.astype(int)
new_name_dict = {"value": "price"}
nyc_price.rename(columns= new_name_dict, inplace=True)
nyc_price["log_price"] = np.log(nyc_price.price)
cwd = os.getcwd()
regions_shape = cwd + "\\shapefile\\ZIP_CODE_040114.shx"
regions_shape
nyc_map = gpd.read_file(regions_shape)
nyc_map.ZIPCODE = nyc_map.ZIPCODE.astype(int) # we want these to look like numbers
nyc_map = nyc_map.merge(nyc_price, on='ZIPCODE', how = "left", indicator = True)
nyc_map.price.replace(np.nan,0.0, inplace = True)
nyc_map.tail()
fig, ax = plt.subplots(figsize = (10,8))
plt.tight_layout()
# First create the map for the urban share
nyc_map.plot(ax = ax, edgecolor='tab:grey',
column='price', # THIS IS NEW, it says color it based on this column
cmap='RdBu_r', # This is the color map scheme https://matplotlib.org
#/examples/color/colormaps_reference.html
alpha = 0.75, vmin=0, vmax=1.1*nyc_price.price.max(), legend=True)
#subway.plot(ax = ax, color = 'k', alpha = 0.5)
#ax.get_xaxis().set_visible(False)
#ax.get_yaxis().set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
fig.suptitle("Rental Price Per Square Foot", fontsize = 15)
########################################################################################
axins = zoomed_inset_axes(ax, # The original ax
4, # zoom level
loc=2, # location
borderpad=2) # space around it relative to figure
nyc_map.plot(ax = axins, column='price', cmap='RdBu_r',
vmin=0, vmax=1.1*nyc_price.price.max())
# Then create the map in the "insice ax" or axins. Note, that you do not
# need to keep the colering or the income, you could have the inset
# be population or what ever else.
# then the stuff below picks the box for the inset to cover. I
# I kind of just eyballed this untill I zoomed into what I wanted
# Note the "axins" object really just works like the ax
x1, x2, y1, y2 = 975000, 987000, 190000, 210000
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.set_title("Downtown NYC")
# Make a title.
mark_inset(ax, axins, loc1=3, loc2=1, alpha = 0.15)
# This then creates the lines that marks where the inset comes from
# Make it look nice
axins.spines["right"].set_visible(False)
axins.spines["top"].set_visible(False)
axins.spines["left"].set_visible(False)
axins.spines["bottom"].set_visible(False)
#axins.Tick.remove()
axins.get_xaxis().set_visible(False)
axins.get_yaxis().set_visible(False)
plt.show()
cwd = os.getcwd()
subway_shape = cwd + "\\shapefile\\geo_export_ff76e398-04a7-40d7-b073-109c7324632d.shx"
regions_shape
subway = gpd.read_file(subway_shape)
subway.head()
fig, ax = plt.subplots(figsize = (10,8))
plt.tight_layout()
# First create the map for the urban share
subway.plot(ax = ax, color = 'k', alpha = 0.5)
subway.crs
nyc_map.crs
test = nyc_map.to_crs({'init': 'epsg:4326'})
test.COUNTY
test_nyc_map = nyc_map.to_crs({'init': 'epsg:3395'})
test_subway = subway.to_crs({'init': 'epsg:3395'})
# This converts the geometry to a mercator projection. We had two problems, one was
just_man = test_nyc_map[test_nyc_map.COUNTY == "New York"]
fig, ax = plt.subplots(figsize = (10,8))
plt.tight_layout()
# First create the map for the urban share
just_man.plot(ax = ax, edgecolor='tab:grey',
column='price', # THIS IS NEW, it says color it based on this column
cmap='RdBu_r', # This is the color map scheme https://matplotlib.org
#/examples/color/colormaps_reference.html
alpha = 0.75, vmin=0, vmax=1.1*test.price.max(), legend=True)
test_subway.plot(ax = ax, color = 'k', alpha = 0.5)
x1, x2, y1, y2 = -8245000, -8225000, 4939000, 4965000
ax.set_xlim(x1, x2)
ax.set_ylim(y1, y2)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
#fig.suptitle("Rental Price Per Square Foot", fontsize = 15)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
########################################################################################
plt.show()
###Output
_____no_output_____ |
assessment/spm_sr15_figure_3a_global_emissions_pathways.ipynb | ###Markdown
*IPCC SR15 scenario assessment* Global emission pathway characteristics Figure 3a of the *Summary for Policymakers*This notebook extracts the emissions pathways for Figure 3a in the Summary for Policymakersof the IPCC's _"Special Report on Global Warming of 1.5ยฐC"_.The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). *Disclaimer**The figures shown in this notebook are NOT the same figures as used in Figure 3a of the SPM.They are simplified figures included here only for reference.* Load `pyam` package and other dependencies
###Code
import pandas as pd
import numpy as np
import io
import itertools
import yaml
import math
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
%matplotlib inline
import pyam
###Output
_____no_output_____
###Markdown
Import scenario data, categorization and specifications filesThe metadata file with scenario categorisation and quantitative indicators can be downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). Alternatively, it can be re-created using the notebook `sr15_2.0_categories_indicators`.The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
###Code
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r2.0.xlsx')
sr1p5.load_meta('sr15_metadata_indicators.xlsx')
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
rc = pyam.run_control()
for item in specs.pop('run_control').items():
rc.update({item[0]: item[1]})
cats = specs.pop('cats')
cats_15 = specs.pop('cats_15')
cats_15_no_lo = specs.pop('cats_15_no_lo')
marker= specs.pop('marker')
###Output
_____no_output_____
###Markdown
Downselect scenario ensemble to categories of interest for this assessmentThis figure only includes scenarios where the 2010 Kyoto GHG emissions are in line with the valid range as determined by the Second Assessment Report.
###Code
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
df = sr1p5.filter(category=cats_15, kyoto_ghg_2010='in range')
df.set_meta(meta='1.5C limited overshoot', name='supercategory', index=df.filter(category=cats_15_no_lo))
rc.update({'color': {'supercategory': {'1.5C limited overshoot': 'xkcd:bluish'}}})
###Output
_____no_output_____
###Markdown
Set specifications for filters and initialize data list
###Code
filter_args = dict(df=df, category=cats, marker=None, join_meta=True)
data = []
###Output
_____no_output_____
###Markdown
Plot different emissions pathways by category Net carbon dioxide emissions for all pathways limiting global warming to 1.5ยฐC by the end of the century
###Code
co2 = (
df.filter(variable='Emissions|CO2')
.convert_unit('Mt CO2/yr', 'Gt CO2/yr')
)
data.append(('Net carbon dioxide', co2))
_co2 = co2.filter(category=cats_15, year=range(2010, 2101, 5))
fig, ax = plt.subplots()
_co2.filter(year=[2010]).line_plot(ax=ax, color='category', linewidth=2)
_co2.line_plot(ax=ax, color='category', linewidth=0.1, fill_between=True, final_ranges=True)
_co2.filter(marker=marker).line_plot(ax=ax, color='category')
###Output
_____no_output_____
###Markdown
Emissions of methane, black carbon and nitrous oxide for 1.5ยฐC pathways with limited overshootThe figures below are shown as reduction relative to 2010.
###Code
def plot_relative(data, baseyear=2010):
_data = data.timeseries()
_data_rel = pd.DataFrame()
for y in range(2010, 2101, 5):
_data_rel[y] = _data[y] / _data[2010]
_data_rel.reset_index(inplace=True)
_data_rel['unit'] = 'relative to {}'.format(baseyear)
_df = pyam.IamDataFrame(_data_rel)
_df.set_meta(meta='1.5C limited overshoot', name='supercategory')
_df.filter(supercategory='1.5C limited overshoot', year=range(2010, 2101, 5))\
.line_plot(color='supercategory', linewidth=0.1, fill_between=True, legend=False)
ch4 = df.filter(variable='Emissions|CH4')
data.append(('Methane', ch4))
plot_relative(ch4)
bc = df.filter(variable='Emissions|BC')
data.append(('Black carbon', bc))
plot_relative(bc)
n2o = df.filter(variable='Emissions|N2O')
n2o.convert_unit('kt N2O/yr', 'Mt N2O/yr', factor=1/1000, inplace=True)
data.append(('Nitrous oxide', n2o))
plot_relative(n2o)
###Output
_____no_output_____
###Markdown
Save timeseries data to `xlsx`
###Code
writer = pd.ExcelWriter('output/spm_sr15_figure3a_data_table.xlsx')
for (name, _df) in data:
pyam.utils.write_sheet(writer, name,
pyam.filter_by_meta(_df.timeseries(), **filter_args), index=True)
writer.save()
###Output
_____no_output_____
###Markdown
*IPCC SR15 scenario assessment* Global emission pathway characteristics Figure 3a of the *Summary for Policymakers*This notebook extracts the emissions pathways for Figure 3a in the Summary for Policymakersof the IPCC's _"Special Report on Global Warming of 1.5ยฐC"_.The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). *Disclaimer**The figures shown in this notebook are NOT the same figures as used in Figure 3a of the SPM.They are simplified figures included here only for reference.* Load `pyam` package and other dependencies
###Code
import pandas as pd
import numpy as np
import warnings
import io
import itertools
import yaml
import math
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
%matplotlib inline
import pyam
###Output
_____no_output_____
###Markdown
Import scenario data, categorization and specifications filesThe metadata file must be generated from the notebook `sr15_2.0_categories_indicators` included in this repository. If the snapshot file has been updated, make sure that you rerun the categorization notebook.The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
###Code
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
#sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
sr1p5.load_metadata('../data/sr15_metadata_indicators.xlsx')
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
rc = pyam.run_control()
for item in specs.pop('run_control').items():
rc.update({item[0]: item[1]})
cats = specs.pop('cats')
cats_15 = specs.pop('cats_15')
cats_15_no_lo = specs.pop('cats_15_no_lo')
marker= specs.pop('marker')
###Output
_____no_output_____
###Markdown
Downselect scenario ensemble to categories of interest for this assessmentThis figure only includes scenarios where the 2010 Kyoto GHG emissions are in line with the valid range as determined by the Second Assessment Report.
###Code
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
df = sr1p5.filter(category=cats_15, kyoto_ghg_2010='in range')
df.set_meta(meta='1.5C limited overshoot', name='supercategory', index=df.filter(category=cats_15_no_lo))
rc.update({'color': {'supercategory': {'1.5C limited overshoot': 'xkcd:bluish'}}})
###Output
_____no_output_____
###Markdown
Set specifications for filters and initialize data list
###Code
filter_args = dict(df=df, category=cats, marker=None, join_meta=True)
data = []
###Output
_____no_output_____
###Markdown
Plot different emissions pathways by category Net carbon dioxide emissions for all pathways limiting global warming to 1.5ยฐC by the end of the century
###Code
co2 = (
df.filter(variable='Emissions|CO2')
.convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})
)
data.append(('Net carbon dioxide', co2))
_co2 = co2.filter(category=cats_15, year=range(2010, 2101, 5))
fig, ax = plt.subplots()
_co2.filter(year=[2010]).line_plot(ax=ax, color='category', linewidth=2)
_co2.line_plot(ax=ax, color='category', linewidth=0.1, fill_between=True, final_ranges=True)
_co2.filter(marker=marker).line_plot(ax=ax, color='category')
type(_co2)
###Output
_____no_output_____
###Markdown
Emissions of methane, black carbon and nitrous oxide for 1.5ยฐC pathways with limited overshootThe figures below are shown as reduction relative to 2010.
###Code
def plot_relative(data, baseyear=2010):
_data = data.timeseries()
_data_rel = pd.DataFrame()
for y in range(2010, 2101, 5):
_data_rel[y] = _data[y] / _data[2010]
_data_rel.reset_index(inplace=True)
_data_rel['unit'] = 'relative to {}'.format(baseyear)
_df = pyam.IamDataFrame(_data_rel)
_df.set_meta(meta='1.5C limited overshoot', name='supercategory')
_df.filter(supercategory='1.5C limited overshoot', year=range(2010, 2101, 5))\
.line_plot(color='supercategory', linewidth=0.1, fill_between=True, legend=False)
ch4 = df.filter(variable='Emissions|CH4')
data.append(('Methane', ch4))
plot_relative(ch4)
bc = df.filter(variable='Emissions|BC')
data.append(('Black carbon', bc))
plot_relative(bc)
n2o = df.filter(variable='Emissions|N2O')
n2o.convert_unit({'kt N2O/yr': ('Mt N2O/yr', 0.001)}, inplace=True)
data.append(('Nitrous oxide', n2o))
plot_relative(n2o)
###Output
_____no_output_____
###Markdown
Save timeseries data to `xlsx`
###Code
writer = pd.ExcelWriter('output/spm_sr15_figure3a_data_table.xlsx')
for (name, _df) in data:
pyam.utils.write_sheet(writer, name,
pyam.filter_by_meta(_df.timeseries(), **filter_args), index=True)
writer.save()
type(data)
len(data)
data
###Output
_____no_output_____
###Markdown
*IPCC SR15 scenario assessment* Global emission pathway characteristics Figure 3a of the *Summary for Policymakers*This notebook extracts the emissions pathways for Figure 3a in the Summary for Policymakersof the IPCC's _"Special Report on Global Warming of 1.5ยฐC"_.The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer). *Disclaimer**The figures shown in this notebook are NOT the same figures as used in Figure 3a of the SPM.They are simplified figures included here only for reference.* Load `pyam` package and other dependencies
###Code
import pandas as pd
import numpy as np
import warnings
import io
import itertools
import yaml
import math
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
%matplotlib inline
import pyam
###Output
_____no_output_____
###Markdown
Import scenario data, categorization and specifications filesThe metadata file must be generated from the notebook `sr15_2.0_categories_indicators` included in this repository. If the snapshot file has been updated, make sure that you rerun the categorization notebook.The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
###Code
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
sr1p5.load_metadata('sr15_metadata_indicators.xlsx')
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
rc = pyam.run_control()
for item in specs.pop('run_control').items():
rc.update({item[0]: item[1]})
cats = specs.pop('cats')
cats_15 = specs.pop('cats_15')
cats_15_no_lo = specs.pop('cats_15_no_lo')
marker= specs.pop('marker')
###Output
_____no_output_____
###Markdown
Downselect scenario ensemble to categories of interest for this assessmentThis figure only includes scenarios where the 2010 Kyoto GHG emissions are in line with the valid range as determined by the Second Assessment Report.
###Code
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
df = sr1p5.filter(category=cats_15, kyoto_ghg_2010='in range')
df.set_meta(meta='1.5C limited overshoot', name='supercategory', index=df.filter(category=cats_15_no_lo))
rc.update({'color': {'supercategory': {'1.5C limited overshoot': 'xkcd:bluish'}}})
###Output
_____no_output_____
###Markdown
Set specifications for filters and initialize data list
###Code
filter_args = dict(df=df, category=cats, marker=None, join_meta=True)
data = []
###Output
_____no_output_____
###Markdown
Plot different emissions pathways by category Net carbon dioxide emissions for all pathways limiting global warming to 1.5ยฐC by the end of the century
###Code
co2 = (
df.filter(variable='Emissions|CO2')
.convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})
)
data.append(('Net carbon dioxide', co2))
_co2 = co2.filter(category=cats_15, year=range(2010, 2101, 5))
fig, ax = plt.subplots()
_co2.filter(year=[2010]).line_plot(ax=ax, color='category', linewidth=2)
_co2.line_plot(ax=ax, color='category', linewidth=0.1, fill_between=True, final_ranges=True)
_co2.filter(marker=marker).line_plot(ax=ax, color='category')
###Output
_____no_output_____
###Markdown
Emissions of methane, black carbon and nitrous oxide for 1.5ยฐC pathways with limited overshootThe figures below are shown as reduction relative to 2010.
###Code
def plot_relative(data, baseyear=2010):
_data = data.timeseries()
_data_rel = pd.DataFrame()
for y in range(2010, 2101, 5):
_data_rel[y] = _data[y] / _data[2010]
_data_rel.reset_index(inplace=True)
_data_rel['unit'] = 'relative to {}'.format(baseyear)
_df = pyam.IamDataFrame(_data_rel)
_df.set_meta(meta='1.5C limited overshoot', name='supercategory')
_df.filter(supercategory='1.5C limited overshoot', year=range(2010, 2101, 5))\
.line_plot(color='supercategory', linewidth=0.1, fill_between=True, legend=False)
ch4 = df.filter(variable='Emissions|CH4')
data.append(('Methane', ch4))
plot_relative(ch4)
bc = df.filter(variable='Emissions|BC')
data.append(('Black carbon', bc))
plot_relative(bc)
n2o = df.filter(variable='Emissions|N2O')
n2o.convert_unit({'kt N2O/yr': ('Mt N2O/yr', 0.001)}, inplace=True)
data.append(('Nitrous oxide', n2o))
plot_relative(n2o)
###Output
_____no_output_____
###Markdown
Save timeseries data to `xlsx`
###Code
writer = pd.ExcelWriter('output/spm_sr15_figure3a_data_table.xlsx')
for (name, _df) in data:
pyam.utils.write_sheet(writer, name,
pyam.filter_by_meta(_df.timeseries(), **filter_args), index=True)
writer.save()
###Output
_____no_output_____ |
Assignment3_Panganiban.ipynb | ###Markdown
**Linear Algebra for ChE** Assignment 3: Matrices *Objectives*At the end of this activity you will be able to:1. Be familiar with matrices and their relation to linear equations.2. Perform basic matrix operations.3. Program and translate matrix equations and operations using Python.
###Code
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as la
%matplotlib inline
###Output
_____no_output_____
###Markdown
**MATRICES** The notation and use of matrices is probably one of the fundamentals of modern computing. Matrices are also handy representations of complex equations or multiple inter-related equations from 2-dimensional equations to even hundreds and thousands of them. Let's say for example you have $A$ and $B$ as system of equation. $$A = \left\{ \begin{array}\ x + y \\ 4x - 10y \end{array}\right. \\B = \left\{ \begin{array}\ x+y+z \\ 3x -2y -z \\ -x + 4y +2z \end{array}\right. $$ We could see that $A$ is a system of 2 equations with 2 parameters. While $B$ is a system of 3 equations with 3 parameters. We can represent them as matrices: $$A=\begin{bmatrix} 1 & 1 \\ 4 & {-10}\end{bmatrix} \\B=\begin{bmatrix} 1 & 1 & 1 \\ 3 & -2 & -1 \\ -1 & 4 & 2\end{bmatrix}$$ *Declaring Matrices* Just like our previous laboratory activity, we'll represent system of linear equations as a matrix. The entities or numbers in matrices are called the elements of a matrix. These elements are arranged and ordered in rows and columns which form the list/array-like structure of matrices. And just like arrays, these elements are indexed according to their position with respect to their rows and columns. This can be represent just like the equation below. Whereas, $A$ is a matrix consisting of elements denoted by Aij. Denoted by i is the number of rows in the matrix while $j$ stands for the number of columns. Do note that the $size$ of a matrix is $i$ x $j$ $$A=\begin{bmatrix}a_{(0,0)}&a_{(0,1)}&\dots&a_{(0,j-1)}\\a_{(1,0)}&a_{(1,1)}&\dots&a_{(1,j-1)}\\\vdots&\vdots&\ddots&\vdots&\\a_{(i-1,0)}&a_{(i-1,1)}&\dots&a_{(i-1,j-1)}\end{bmatrix}$$ We already gone over some of the types of matrices but we'll further discuss them in this laboratory activity. Since you already know how to describe vectors using **shape**, **dimension**, and **size** attributes of the matrices, we will do it in this platform.
###Code
## Since we'll keep on describing matrices. Let's make a function.
def describe_mat(matrix):
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\n')
## Declaring a 2 x 2 matrix
A = np.array([
[1, 2],
[3,1]
])
describe_mat(A)
G = np.array([
[4,1],
[2,2]
])
describe_mat(G)
## Declaring a 3 x 2 matrix
B = np.array([
[8,2],
[5,4],
[1,1]
])
describe_mat (B)
H = np.array([1, 2, 3, 4, 5,])
describe_mat(H)
###Output
Matrix:
[1 2 3 4 5]
Shape: (5,)
Rank: 1
###Markdown
*Categorizing Matrices* There are several ways of classifying matrices. Once could be according to their **shape** and another is according to their **element values**. We'll try to go through them. *ROW AND COLUMN MATRICES* Row and column matrices are common in vector and matrix computations. They can also represent row and column spaces of a bigger vector space. Row and column matrices are represented by a single column or single row. So with that being, the shape of row matrices would be $1$ x $j$ and column matrices would $i$ x $1$
###Code
## Declaring a Row Matrix
row_mat_1D = np.array([
1, 3, 2
]) ## this is a 1-D Matrix with a shape of (3,), it's not really considered as a row matrix
row_mat_2D = np.array([
[1,2,3]
]) ## this is a 2-D Matrix with a shape of (3,1)
describe_mat(row_mat_1D)
describe_mat(row_mat_2D)
## Declaring a Column Matrix
col_mat = np.array([
[1],
[2],
[5]
]) ## this is a 2-D matrix with a shape of (3,1)
describe_mat(col_mat)
###Output
Matrix:
[[1]
[2]
[5]]
Shape: (3, 1)
Rank: 2
###Markdown
*SQUARE MATRICES* Square matrices are matrices that have the same row and column sizes. We could say a matrix is square if $i=j$. We can tweak our matrix descriptor function to determine square matrices.
###Code
def describe_mat(matrix):
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print (f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n')
square_mat = np.array([
[1,2,5],
[3,3,8],
[6,1,2]
])
non_square_mat = np.array([
[1,2,5],
[3,3,8]
])
describe_mat(square_mat)
describe_mat(non_square_mat)
###Output
Matrix:
[[1 2 5]
[3 3 8]
[6 1 2]]
Shape: (3, 3)
Rank: 2
Is Square: True
Matrix:
[[1 2 5]
[3 3 8]]
Shape: (2, 3)
Rank: 2
Is Square: False
###Markdown
***According to element values*** A ***Null Matrix*** is a matrix that has no elements. It is always a subspace of any vector or matrix.
###Code
def describe_mat(matrix):
if matrix.size > 0:
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print (f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n')
else:
print('Matrix is Null')
null_mat = np.array([])
describe_mat(null_mat)
###Output
Matrix is Null
###Markdown
A ***zero matrix*** can be any rectangular matrix but with all elements having a value of 0.
###Code
zero_mat_row = np.zeros((1,2))
zero_mat_sqr = np.zeros((2,2))
zero_mat_rct = np.zeros((3,2))
zero_4x4 = np.zeros((4,4))
print(f'Zero Row Matrix: \n{zero_mat_row}')
print(f'Zero Square Matrix: \n{zero_mat_sqr}')
print(f'Zero Rectangular Matrix: \n{zero_mat_rct}')
print(f'Zero Matrix 4x4: \n{zero_4x4}')
###Output
Zero Row Matrix:
[[0. 0.]]
Zero Square Matrix:
[[0. 0.]
[0. 0.]]
Zero Rectangular Matrix:
[[0. 0.]
[0. 0.]
[0. 0.]]
Zero Matrix 4x4:
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
###Markdown
A ***ones matrix***, just like the zero matrix, can be any rectangular matrix but all of its elements are 1s instead of 0s.
###Code
ones_mat_row = np.ones((1,2))
ones_mat_sqr = np.ones((2,2))
ones_mat_rct = np.ones((3,2))
print(f'Ones Row Matrix: \n{ones_mat_row}')
print(f'Ones square Matrix: \n{ones_mat_sqr}')
print(f'Ones Rectangular Matrix: \n{ones_mat_rct}')
###Output
Ones Row Matrix:
[[1. 1.]]
Ones square Matrix:
[[1. 1.]
[1. 1.]]
Ones Rectangular Matrix:
[[1. 1.]
[1. 1.]
[1. 1.]]
###Markdown
A ***diagonal matrix*** is a square matrix that has values only at the diagonal of the matrix.
###Code
np.array([
[2,0,0],
[0,3,0],
[0,0,5]
])
# a[1,1], a[2,2], a[3,3], ... a[n-1,n-1]
d = np.diag([2,3,5,7])
d
###Output
_____no_output_____
###Markdown
An ***identity matrix*** is a special diagonal matrix in which the values at the diagonal are ones.
###Code
np.eye(5)
np.identity(5)
###Output
_____no_output_____
###Markdown
An ***upper triangular matrix*** is a matrix that has no values below the diagonal.
###Code
np.array([
[1,2,3],
[0,3,1],
[0,0,5]
])
###Output
_____no_output_____
###Markdown
A ***lower triangular matrix*** is a matrix that has no values above the diagonal.
###Code
np.array([
[1,0,0],
[5,3,0],
[7,8,5]
])
###Output
_____no_output_____
###Markdown
**PRACTICE** **1.)** $$\theta = 5x + 3y - z$$
###Code
theta = np.array([
[5,3,-1]
])
describe_mat(theta)
###Output
Matrix:
[[ 5 3 -1]]
Shape: (1, 3)
Rank: 2
Is Square: False
###Markdown
**2.)** $$A = \left\{\begin{array}5x_1 + 2x_2 +x_3\\4x_2 - x_3\\10x_3\end{array}\right.$$
###Code
A = np.array([
[1,2,1],
[0,4,-1],
[10,0,0]
])
describe_mat(A)
###Output
Matrix:
[[ 1 2 1]
[ 0 4 -1]
[10 0 0]]
Shape: (3, 3)
Rank: 2
Is Square: True
###Markdown
**3.)** Given the matrix below, express it as a linear combination in a markdown.
###Code
G = np.array([
[1,7,8],
[2,2,2],
[4,6,7]
])
###Output
_____no_output_____
###Markdown
G = \begin{bmatrix} 1 & 7 & 8 \\ 2 & 2 & 2 \\ 4 & 6 & 7\end{bmatrix} \\$$G = \left\{ \begin{array}\ x + 7x_2 + 8x_3 \\ 2x + 2x_2 + 2x_3 \\ 4x + 6x_2 + 7x_3 \end{array}\right. \\$$ **4.)** Given the matrix below, display the output as a LaTeX markdown also express it as a system of linear combinations.
###Code
H = np.tril(G)
H
###Output
_____no_output_____
###Markdown
**MATRIX ALGEBRA** Addition
###Code
A = np.array([
[1,2],
[2,3],
[4,1]
])
B = np.array([
[2,2],
[0,0],
[1,1]
])
A+B
2+A ##Broadcasting
# 2*np.ones(A.shape)+A
###Output
_____no_output_____
###Markdown
Subtraction
###Code
A-B
3-B == 3*np.ones(B.shape)-B
###Output
_____no_output_____
###Markdown
Element-wise Multiplication
###Code
A*B
np.multiply(A,B)
2*A
##A@B
alpha=10**-10
A/(alpha+B)
np.add(A,B)
###Output
_____no_output_____
###Markdown
**ACTIVITY** *Task 1*Create a function named `mat_desc()` that througouhly describes a matrix, it should: 1. Displays the shape, size, and rank of the matrix. 2. Displays whether the matrix is square or non-square. 3. Displays whether the matrix is an empty matrix. 4. Displays if the matrix is an identity, ones, or zeros matrix Use 5 sample matrices in which their shapes are not lower than $(3,3)$.In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared.
###Code
def mat_desc(matrix):
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\n')
###Output
_____no_output_____
###Markdown
**SHAPE, SIZE, & RANK**
###Code
X = np.array([
[7,3,6,2],
[2,1,4,3],
[1,9,2,5]
])
mat_desc(X)
Y = np.array([
[4,7,8,2,9],
[9,3,1,1,3],
[7,6,2,5,4],
[1,3,6,5,8]
])
mat_desc(Y)
###Output
Matrix:
[[4 7 8 2 9]
[9 3 1 1 3]
[7 6 2 5 4]
[1 3 6 5 8]]
Shape: (4, 5)
Rank: 2
###Markdown
***FLOWCHART*** **SQUARE & NON-SQUARE MATRICES**
###Code
def mat_desc(matrix):
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print (f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n')
square_mat = np.array([
[6,8,2,9],
[3,7,4,6],
[7,2,6,3],
[8,9,1,1]
])
non_square_mat = np.array([
[7,3,8],
[9,1,2],
[4,4,3],
[6,5,6]
])
mat_desc(square_mat)
mat_desc(non_square_mat)
###Output
Matrix:
[[6 8 2 9]
[3 7 4 6]
[7 2 6 3]
[8 9 1 1]]
Shape: (4, 4)
Rank: 2
Is Square: True
Matrix:
[[7 3 8]
[9 1 2]
[4 4 3]
[6 5 6]]
Shape: (4, 3)
Rank: 2
Is Square: False
###Markdown
***FLOWCHART*** **EMPTY MATRIX**
###Code
def mat_desc(matrix):
if matrix.size > 0:
is_square = True if matrix.shape[0] == matrix.shape[1] else False
print (f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\nIs Square: {is_square}\n')
else:
print('Matrix is Null')
null_mat = np.array([])
mat_desc(null_mat)
###Output
Matrix is Null
###Markdown
***FLOWCHART*** **IDENTITY, ONES, AND ZEROS**
###Code
##IDENTITY MATRIX
np.eye(12)
np.identity(8)
###Output
_____no_output_____
###Markdown
***FLOWCHART***
###Code
##ONES MATRIX
ones_mat_row = np.ones((5,4))
ones_mat_sqr = np.ones((4,6))
ones_mat_rct = np.ones((9,5))
print(f'Ones Row Matrix: \n{ones_mat_row}')
print(f'Ones square Matrix: \n{ones_mat_sqr}')
print(f'Ones Rectangular Matrix: \n{ones_mat_rct}')
###Output
Ones Row Matrix:
[[1. 1. 1. 1.]
[1. 1. 1. 1.]
[1. 1. 1. 1.]
[1. 1. 1. 1.]
[1. 1. 1. 1.]]
Ones square Matrix:
[[1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1. 1.]]
Ones Rectangular Matrix:
[[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]
[1. 1. 1. 1. 1.]]
###Markdown
***FLOWCHART*** 
###Code
##ZEROS MATRIX
zero_mat_row = np.zeros((4,8))
zero_mat_sqr = np.zeros((6,6))
zero_mat_rct = np.zeros((5,9))
print(f'Zero Row Matrix: \n{zero_mat_row}')
print(f'Zero Square Matrix: \n{zero_mat_sqr}')
print(f'Zero Rectangular Matrix: \n{zero_mat_rct}')
###Output
Zero Row Matrix:
[[0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0.]]
Zero Square Matrix:
[[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0.]]
Zero Rectangular Matrix:
[[0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0.]]
###Markdown
***FLOWCHART*** *Task 2*Create a function named `mat_operations()` that takes in two matrices a input parameters it should: 1. Determines if the matrices are viable for operation and returns your own error message if they are not viable. 2. Returns the sum of the matrices. 3. Returns the difference of the matrices. 4. Returns the element-wise multiplication of the matrices. 5. Returns the element-wise division of the matrices.Use 5 sample matrices in which their shapes are not lower than $(3,3)$.In your methodology, create a flowchart discuss the functions and methods you have done. Present your results in the results section showing the description of each matrix you have declared.
###Code
def mat_operations(matrix):
print(f'Matrix:\n{matrix}\n\nShape:\t{matrix.shape}\nRank:\t{matrix.ndim}\n')
## *Determines if the matrices are viable for operation and returns your own error message if they are not viable.*
print ("Yes, the matrices are viable.")
###Output
Yes, the matrices are viable.
###Markdown
2. **ADDITION**
###Code
A = np.array([
[8,2,3],
[3,9,6],
[7,5,9],
[6,6,1],
[1,4,5]
])
B = np.array([
[2,4,4],
[5,8,1],
[9,3,7],
[3,5,9],
[1,2,3]
])
A + B
15+A
###Output
_____no_output_____
###Markdown
***FLOWCHART*** 3. **SUBTRACTION**
###Code
A-B
9-A
###Output
_____no_output_____ |
EficienciaCรณdigos.ipynb | ###Markdown
###Code
from time import time
def ejemplo2( n ):
contador = 0
start_time = time()
for i in range( n ) :
for j in range( n ) :
contador += 1
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return contador
def ejemplo3( n ):
start_time = time()
x = n * 2
y = 0
for m in range (100):
y = x - n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return y
def ejemplo4( n ):
start_time = time()
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return z
def ejemplo5( x ):
start_time = time()
n = 10
for j in range( 0 , x , 1 ):
n = j + n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return n
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
def main():
vuelta = 1
for h in range(100,1100,100):
print(vuelta)
print("Ejemplo 2: ",ejemplo2(h))
print("Ejemplo 3: ",ejemplo3(h))
print("Ejemplo 4: ",ejemplo4(h))
print("Ejemplo 5: ",ejemplo5(h))
print("Ejemplo 6: ",ejemplo6(h))
vuelta += 1
main()
###Output
1
Tiempo transcurrido: 0.0004897118 segundos.
Ejemplo 2: 10000
Tiempo transcurrido: 0.0000069141 segundos.
Ejemplo 3: 100
Tiempo transcurrido: 0.0000009537 segundos.
Ejemplo 4: 127.84960000000001
Tiempo transcurrido: 0.0000097752 segundos.
Ejemplo 5: 4960
Tiempo transcurrido: 0.1384265423 segundos.
Ejemplo 6: 1000000
2
Tiempo transcurrido: 0.0019879341 segundos.
Ejemplo 2: 40000
Tiempo transcurrido: 0.0000078678 segundos.
Ejemplo 3: 200
Tiempo transcurrido: 0.0000014305 segundos.
Ejemplo 4: 227.8496
Tiempo transcurrido: 0.0000226498 segundos.
Ejemplo 5: 19910
Tiempo transcurrido: 1.1131577492 segundos.
Ejemplo 6: 8000000
3
Tiempo transcurrido: 0.0041046143 segundos.
Ejemplo 2: 90000
Tiempo transcurrido: 0.0000097752 segundos.
Ejemplo 3: 300
Tiempo transcurrido: 0.0000016689 segundos.
Ejemplo 4: 327.8496
Tiempo transcurrido: 0.0000259876 segundos.
Ejemplo 5: 44860
Tiempo transcurrido: 3.6521036625 segundos.
Ejemplo 6: 27000000
4
Tiempo transcurrido: 0.0084984303 segundos.
Ejemplo 2: 160000
Tiempo transcurrido: 0.0000102520 segundos.
Ejemplo 3: 400
Tiempo transcurrido: 0.0000011921 segundos.
Ejemplo 4: 427.8496
Tiempo transcurrido: 0.0000369549 segundos.
Ejemplo 5: 79810
Tiempo transcurrido: 8.7559375763 segundos.
Ejemplo 6: 64000000
5
Tiempo transcurrido: 0.0135233402 segundos.
Ejemplo 2: 250000
Tiempo transcurrido: 0.0000107288 segundos.
Ejemplo 3: 500
Tiempo transcurrido: 0.0000014305 segundos.
Ejemplo 4: 527.8496
Tiempo transcurrido: 0.0000417233 segundos.
Ejemplo 5: 124760
Tiempo transcurrido: 17.6952991486 segundos.
Ejemplo 6: 125000000
6
Tiempo transcurrido: 0.0185880661 segundos.
Ejemplo 2: 360000
Tiempo transcurrido: 0.0000095367 segundos.
Ejemplo 3: 600
Tiempo transcurrido: 0.0000014305 segundos.
Ejemplo 4: 627.8496
Tiempo transcurrido: 0.0000524521 segundos.
Ejemplo 5: 179710
Tiempo transcurrido: 32.6443607807 segundos.
Ejemplo 6: 216000000
7
Tiempo transcurrido: 0.0260484219 segundos.
Ejemplo 2: 490000
Tiempo transcurrido: 0.0000107288 segundos.
Ejemplo 3: 700
Tiempo transcurrido: 0.0000011921 segundos.
Ejemplo 4: 727.8496
Tiempo transcurrido: 0.0000498295 segundos.
Ejemplo 5: 244660
Tiempo transcurrido: 53.7953050137 segundos.
Ejemplo 6: 343000000
8
Tiempo transcurrido: 0.0394110680 segundos.
Ejemplo 2: 640000
Tiempo transcurrido: 0.0000271797 segundos.
Ejemplo 3: 800
Tiempo transcurrido: 0.0000016689 segundos.
Ejemplo 4: 827.8496
Tiempo transcurrido: 0.0000572205 segundos.
Ejemplo 5: 319610
Tiempo transcurrido: 78.4677960873 segundos.
Ejemplo 6: 512000000
9
Tiempo transcurrido: 0.0429062843 segundos.
Ejemplo 2: 810000
Tiempo transcurrido: 0.0000107288 segundos.
Ejemplo 3: 900
Tiempo transcurrido: 0.0000016689 segundos.
Ejemplo 4: 927.8496
Tiempo transcurrido: 0.0000705719 segundos.
Ejemplo 5: 404560
Tiempo transcurrido: 109.4635345936 segundos.
Ejemplo 6: 729000000
10
Tiempo transcurrido: 0.0521612167 segundos.
Ejemplo 2: 1000000
Tiempo transcurrido: 0.0000112057 segundos.
Ejemplo 3: 1000
Tiempo transcurrido: 0.0000014305 segundos.
Ejemplo 4: 1027.8496
Tiempo transcurrido: 0.0000815392 segundos.
Ejemplo 5: 499510
Tiempo transcurrido: 152.6276922226 segundos.
Ejemplo 6: 1000000000
|
stock_data.ipynb | ###Markdown
**Getting microsoft Historical data from IEX**
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import datetime as dt
web.DataReader("MSFT","iex","2014-1-1","2018-9-24")
###Output
5y
###Markdown
**DOWNLOADING MICROSOFT HISTORICAL DATA(CLEAN)FROM YAHOO**
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("MSFT",start,end)
display (web)
#web=yf.download("MSFT","2014-1-1","2018-10-17")
#web=yf.download("AAPL","2014-1-1","2018-10-17")# downloading APPL historical stock data
web
###Output
_____no_output_____
###Markdown
LINE PLOT OF ADJ CLOSE OF MICROSOFT STOCK OF ALL THE YEARS
###Code
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20, 9)
web["Adj Close"].plot(grid = True)
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
https://www.pivotaltracker.com/story/show/160775317
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20, 9)
web["Adj Close"]["2018-01-01":].plot(grid = True)
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
https://www.pivotaltracker.com/story/show/160800230 SIMPLE MOVING AVERAGE OF CLOSE PRICE (20 days and 50 days)
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("MSFT",start,end)
display (web)
web_close=pd.DataFrame(web.Close)
web_close["MA_20"]=web_close.Close.rolling(20).mean()
web_close["MA_50"]=web_close.Close.rolling(50).mean()
pylab.rcParams['figure.figsize'] = (20, 9)
plt.grid(True)
plt.plot(web_close['Close'],label='Close')
plt.plot(web_close["MA_20"],label="MA 20 days")
plt.plot(web_close["MA_50"],label="MA 50 days")
plt.legend(loc=2)
###Output
Populating the interactive namespace from numpy and matplotlib
[*********************100%***********************] 1 of 1 downloaded
###Markdown
https://www.pivotaltracker.com/story/show/160695607 BOLLINGER BAND
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("AAPL",start,end)
web_close=pd.DataFrame(web.Close) # assigning close prices to web_ close
web_close["MA_20"]=web_close.Close.rolling(20).mean() # moving average of close prices of 20 days
web["20 Day STD"] = web.Close.rolling(20).std()
web["Upper Band"] = web_close["MA_20"] + (web['20 Day STD'] * 2) # adding 20 day moving average to 20 day standard deviation
web["Lower Band"] = web_close["MA_20"] - (web['20 Day STD'] * 2) # for lower band subtracting 20 day moving average and 20 day standard deviation
pylab.rcParams['figure.figsize'] = (20, 9)
plt.plot(web_close["Close"],label='Close')
plt.plot(web_close["MA_20"],label="MA 20 days")
plt.plot(web["Upper Band"], label="Upperband")
plt.plot(web["Lower Band"], label="Lowerband")
plt.legend(loc=2)
###Output
Populating the interactive namespace from numpy and matplotlib
[*********************100%***********************] 1 of 1 downloaded
###Markdown
https://www.pivotaltracker.com/story/show/160696264 CCI https://www.pivotaltracker.com/story/show/160699188
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
%pylab inline
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("MSFT",start,end)
web_close=pd.DataFrame(web.Close)
web_high=pd.DataFrame(web.High)
web_low=pd.DataFrame(web.Low)
web_close["MA_20"]=web.Close.rolling(20).mean()
web["20 Day STD"] = web.Close.rolling(20).std()
TP=(web.High+web.Low+web.Close)/3
CCI = pd.Series((TP -web_close["MA_20"]) / (0.015*web["20 Day STD"]))
pylab.rcParams['figure.figsize'] = (20, 9)
print (CCI)
plt.plot(CCI,label='CCI')
plt.legend(loc=2)
###Output
Populating the interactive namespace from numpy and matplotlib
[*********************100%***********************] 1 of 1 downloaded
Date
2017-09-01 NaN
2017-09-05 NaN
2017-09-06 NaN
2017-09-07 NaN
2017-09-08 NaN
2017-09-11 NaN
2017-09-12 NaN
2017-09-13 NaN
2017-09-14 NaN
2017-09-15 NaN
2017-09-18 NaN
2017-09-19 NaN
2017-09-20 NaN
2017-09-21 NaN
2017-09-22 NaN
2017-09-25 NaN
2017-09-26 NaN
2017-09-27 NaN
2017-09-28 NaN
2017-09-29 -3.996477
2017-10-02 25.599563
2017-10-03 3.322109
2017-10-04 -10.993418
2017-10-05 107.340255
2017-10-06 105.774882
2017-10-09 119.063751
2017-10-10 112.930350
2017-10-11 95.931636
2017-10-12 117.760421
2017-10-13 132.861846
...
2018-10-02 74.006028
2018-10-03 77.358163
2018-10-04 -1.897045
2018-10-05 -56.343274
2018-10-08 -125.291161
2018-10-09 -68.977777
2018-10-10 -170.732654
2018-10-11 -163.012137
2018-10-12 -80.635641
2018-10-15 -98.799314
2018-10-16 -41.079862
2018-10-17 -34.432299
2018-10-18 -65.505431
2018-10-19 -52.545248
2018-10-22 -42.982536
2018-10-23 -81.224092
2018-10-24 -119.974506
2018-10-25 -44.757032
2018-10-26 -59.621239
2018-10-29 -89.901345
2018-10-30 -120.707458
2018-10-31 -34.211038
2018-11-01 -40.414796
2018-11-02 -37.867501
2018-11-05 -11.432448
2018-11-06 10.524711
2018-11-07 98.340627
2018-11-08 96.521523
2018-11-09 53.258507
2018-11-12 -4.019879
Length: 302, dtype: float64
###Markdown
RSI INDICATOR https://www.pivotaltracker.com/story/show/160699122
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like # Used as a patch for pandas data reader to avoid error
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("MSFT",start,end)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
%matplotlib inline
%pylab inline
web_close=pd.DataFrame(web.Close)
print (len(web_close))
matplotlib.rcParams.update({'font.size': 9})
def rsiFunc(prices, n=14):
deltas = np.diff(prices,axis=0)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n #moving average of last 14 days prices
down = -seed[seed<0].sum()/n
rs = (up/down)
rsi= np.zeros_like(prices)
rsi[:n] = 100. - (100./(1.+rs))
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = abs(up/down)
rsi[i] = 100. -(100./(1.+rs))
return rsi
#Reset the index to remove Date column from index
web_ohlc = web.reset_index()
#Naming columns
web_ohlc.columns = ["Date","Open","High",'Low',"Close","Adj Close","Volume"]
#Converting dates column to float values
web_ohlc['Date'] = web_ohlc['Date'].map(mdates.date2num)
prices=web_close
rsi=rsiFunc(prices)
print (rsi)# PRINTING THR RSI VALUES
#plotting RSI INDICATOR
pylab.rcParams['figure.figsize'] = (20, 9)#this function makes the graph bigger and presentable
ax= plt.gca()
ax.xaxis_date()
plt.xlabel("Date")
plt.plot(web_ohlc['Date'],rsi)
plt.ylim(0,100)
plt.yticks([30, 70])
plt.show()
###Output
[*********************100%***********************] 1 of 1 downloaded
Populating the interactive namespace from numpy and matplotlib
302
[[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[45.29085351]
[46.87568171]
[39.74616755]
[39.74616755]
[44.74642706]
[44.91335474]
[49.95912221]
[50.89664254]
[48.06804218]
[51.62508435]
[60.33456731]
[60.51399817]
[62.28990422]
[62.28990422]
[63.15149278]
[67.46250188]
[69.49403192]
[70.35600846]
[69.56216975]
[69.68497518]
[71.53944178]
[76.23599659]
[76.32949526]
[76.47895254]
[72.68935016]
[73.48897734]
[88.08395307]
[88.19482924]
[80.99212126]
[80.99212126]
[82.96877643]
[83.16381837]
[83.89226009]
[81.58824359]
[82.34536902]
[76.83102173]
[74.32221381]
[74.56613444]
[75.07611537]
[62.95486975]
[64.23346779]
[56.58514107]
[57.47131258]
[64.59552134]
[59.12772126]
[60.02382938]
[63.52599065]
[68.45381018]
[56.02475427]
[60.21729809]
[60.6553468 ]
[42.74617832]
[45.52418142]
[51.44436966]
[50.01780167]
[57.35205565]
[61.27311308]
[62.48802053]
[61.13081976]
[57.28591337]
[65.03653219]
[62.38395121]
[59.33422274]
[57.62425368]
[57.50912049]
[57.55480095]
[56.83117862]
[58.41784946]
[58.47088699]
[57.0603605 ]
[59.4593277 ]
[61.70709022]
[65.60872545]
[70.24788837]
[70.60374245]
[70.00262147]
[65.96998093]
[67.28905328]
[73.7058229 ]
[62.79621691]
[69.71024282]
[69.39985496]
[68.57780754]
[73.93170909]
[74.76574067]
[74.06177724]
[75.63663197]
[80.05941873]
[78.81254734]
[69.05101596]
[75.37079949]
[70.26554332]
[56.61065322]
[42.91986842]
[53.57222193]
[48.53401802]
[38.18980898]
[46.63137446]
[48.88439098]
[50.54124827]
[52.84566505]
[56.92574928]
[55.09422901]
[56.72977711]
[53.16721204]
[53.77717993]
[59.31692871]
[62.16710553]
[58.2263151 ]
[56.85824271]
[53.93819617]
[54.48544497]
[56.14082326]
[54.97299076]
[56.61316642]
[58.33828371]
[64.0385044 ]
[64.6069576 ]
[54.9998324 ]
[52.98636351]
[54.05379925]
[55.44047147]
[48.96141867]
[49.84729965]
[47.44538714]
[39.05757394]
[32.96760702]
[52.94708327]
[43.77193653]
[43.62083342]
[48.15047313]
[42.7410585 ]
[45.58965461]
[51.3305872 ]
[51.4358866 ]
[46.75142188]
[48.03159729]
[52.80607398]
[50.39572035]
[54.19276638]
[52.92450289]
[55.37624007]
[59.35043145]
[60.09574079]
[59.05572838]
[55.57200477]
[56.44447731]
[49.74191905]
[47.533957 ]
[52.94864782]
[56.7905582 ]
[50.27281334]
[53.9363336 ]
[49.94681682]
[51.40175628]
[54.19284014]
[56.79168791]
[55.48052953]
[58.33557728]
[60.66738907]
[59.8859433 ]
[60.74168497]
[57.88076188]
[57.18624676]
[53.25928818]
[53.89203167]
[58.10003806]
[57.64315661]
[61.43200339]
[59.69685899]
[59.8712411 ]
[57.98021181]
[61.49753261]
[60.85556343]
[67.36025892]
[69.79922189]
[71.17013909]
[71.96090962]
[62.11414013]
[64.54780124]
[61.27003481]
[62.19676164]
[59.48477247]
[61.71258155]
[54.41945499]
[57.48167283]
[57.48167283]
[61.61916639]
[57.28050563]
[53.24327149]
[44.00138407]
[47.36243396]
[41.39124442]
[46.52965081]
[46.44919692]
[52.62554043]
[48.49515198]
[51.52541331]
[56.90902685]
[59.30771183]
[60.24040761]
[59.47914668]
[66.64482882]
[69.86489808]
[66.94618391]
[69.67485113]
[65.05886326]
[61.2669557 ]
[66.69617533]
[70.7149685 ]
[69.07802501]
[75.35945847]
[69.55168213]
[61.38349298]
[53.34932714]
[55.28645933]
[55.84268931]
[59.35485694]
[60.58492295]
[60.82937259]
[62.89468556]
[64.53266293]
[65.02331882]
[61.60695685]
[57.75382129]
[62.11444557]
[53.71149275]
[53.62922837]
[53.36524364]
[50.21514014]
[46.50901231]
[51.21438396]
[53.26411432]
[56.56577933]
[60.82362492]
[62.97354099]
[68.0143675 ]
[67.62005876]
[68.68153946]
[64.94091906]
[49.77786238]
[50.73955296]
[48.61423764]
[53.26782122]
[59.54081161]
[60.96663227]
[64.41478351]
[65.66683625]
[59.62567234]
[62.82926719]
[56.06803226]
[61.58182266]
[63.40682097]
[64.48635157]
[63.40543783]
[61.0510985 ]
[62.4257564 ]
[62.20581021]
[66.18337863]
[63.51298237]
[63.58177098]
[51.20726883]
[48.39442702]
[43.41374627]
[49.57077329]
[32.89504951]
[32.4137851 ]
[45.08147331]
[40.66357663]
[49.80609119]
[49.11102163]
[44.06429373]
[44.50890215]
[47.24645583]
[43.5933147 ]
[33.16163262]
[47.23092689]
[44.94772872]
[40.10236641]
[39.92353869]
[46.51617127]
[44.98014016]
[45.50276172]
[48.46787702]
[48.93332745]
[57.31617941]
[56.81869242]
[51.79296194]
[46.32737144]]
###Markdown
https://www.pivotaltracker.com/story/show/160780916 Candle Stick Depiction of Stock Price
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
import datetime as dt
%matplotlib inline
%pylab inline
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')# retrives data on a daily basis /or
web=yf.download("MSFT",start,end)
#Reset the index to remove Date column from index
web_ohlc = web.reset_index()
#Naming columns
web_ohlc.columns = ["Date","Open","High",'Low',"Close","Adj Close","Volume"]
#Converting dates column to float values
web_ohlc['Date'] = web_ohlc['Date'].map(mdates.date2num)
pylab.rcParams['figure.figsize'] = (20, 9)
ax1 = plt.subplot2grid((8,1), (0,0), rowspan=8, colspan=1)
plt.tight_layout()
ax1.xaxis_date()
plt.xlabel("Date")
print(web_ohlc)
candlestick_ohlc(ax1,web_ohlc.values,width=1.5, colorup='g', colordown='r',alpha=0.75)
plt.ylabel("Price")
plt.legend(loc=2)# shows label in the left most postion of the graph
plt.show()
###Output
Populating the interactive namespace from numpy and matplotlib
[*********************100%***********************] 1 of 1 downloaded
Date Open High Low Close Adj Close \
0 736573.0 74.709999 74.739998 73.639999 73.940002 72.631020
1 736577.0 73.339996 73.889999 72.980003 73.610001 72.306847
2 736578.0 73.739998 74.040001 73.349998 73.400002 72.100578
3 736579.0 73.680000 74.599998 73.599998 74.339996 73.023933
4 736580.0 74.330002 74.440002 73.839996 73.980003 72.670311
5 736583.0 74.309998 74.940002 74.309998 74.760002 73.436501
6 736584.0 74.760002 75.239998 74.370003 74.680000 73.357918
7 736585.0 74.930000 75.230003 74.550003 75.209999 73.878525
8 736586.0 75.000000 75.489998 74.519997 74.769997 73.446312
9 736587.0 74.830002 75.389999 74.070000 75.309998 73.976761
10 736590.0 75.230003 75.970001 75.040001 75.160004 73.829422
11 736591.0 75.209999 75.709999 75.010002 75.440002 74.104462
12 736592.0 75.349998 75.550003 74.309998 74.940002 73.613312
13 736593.0 75.110001 75.239998 74.110001 74.209999 72.896225
14 736594.0 73.989998 74.510002 73.849998 74.410004 73.092697
15 736597.0 74.089996 74.250000 72.919998 73.260002 71.963051
16 736598.0 73.669998 73.809998 72.989998 73.260002 71.963051
17 736599.0 73.550003 74.169998 73.169998 73.849998 72.542610
18 736600.0 73.540001 73.970001 73.309998 73.870003 72.562256
19 736601.0 73.940002 74.540001 73.879997 74.489998 73.171272
20 736604.0 74.709999 75.010002 74.300003 74.610001 73.289154
21 736605.0 74.669998 74.879997 74.190002 74.260002 72.945351
22 736606.0 74.089996 74.720001 73.709999 74.690002 73.367737
23 736607.0 75.220001 76.120003 74.959999 75.970001 74.625076
24 736608.0 75.669998 76.029999 75.540001 76.000000 74.654549
25 736611.0 75.970001 76.550003 75.860001 76.290001 74.939415
26 736612.0 76.330002 76.629997 76.139999 76.290001 74.939415
27 736613.0 76.360001 76.459999 75.949997 76.419998 75.067108
28 736614.0 76.489998 77.290001 76.370003 77.120003 75.754723
29 736615.0 77.589996 77.870003 77.290001 77.489998 76.118172
.. ... ... ... ... ... ...
272 736969.0 115.300003 115.839996 114.440002 115.150002 115.150002
273 736970.0 115.419998 116.180000 114.930000 115.169998 115.169998
274 736971.0 114.610001 114.760002 111.629997 112.790001 112.790001
275 736972.0 112.629997 113.169998 110.639999 112.129997 112.129997
276 736975.0 111.660004 112.029999 109.339996 110.849998 110.849998
277 736976.0 111.139999 113.080002 110.800003 112.260002 112.260002
278 736977.0 111.239998 111.500000 105.790001 106.160004 106.160004
279 736978.0 105.349998 108.930000 104.199997 105.910004 105.910004
280 736979.0 109.010002 111.239998 107.120003 109.570000 109.570000
281 736982.0 108.910004 109.480003 106.949997 107.599998 107.599998
282 736983.0 109.540001 111.410004 108.949997 111.000000 111.000000
283 736984.0 111.680000 111.809998 109.550003 110.709999 110.709999
284 736985.0 110.099998 110.529999 107.830002 108.500000 108.500000
285 736986.0 108.930000 110.860001 108.209999 108.660004 108.660004
286 736989.0 109.320000 110.540001 108.239998 109.629997 109.629997
287 736990.0 107.769997 108.970001 105.110001 108.099998 108.099998
288 736991.0 108.410004 108.489998 101.589996 102.320000 102.320000
289 736992.0 106.550003 109.269997 106.150002 108.300003 108.300003
290 736993.0 105.690002 108.750000 104.760002 106.959999 106.959999
291 736996.0 108.110001 108.699997 101.629997 103.849998 103.849998
292 736997.0 103.660004 104.379997 100.110001 103.730003 103.730003
293 736998.0 105.440002 108.139999 105.389999 106.809998 106.809998
294 736999.0 107.050003 107.320000 105.529999 105.919998 105.919998
295 737000.0 106.480003 107.320000 104.980003 106.160004 106.160004
296 737003.0 106.370003 107.739998 105.900002 107.510002 107.510002
297 737004.0 107.379997 108.839996 106.279999 107.720001 107.720001
298 737005.0 109.440002 112.239998 109.400002 111.959999 111.959999
299 737006.0 111.800003 112.209999 110.910004 111.750000 111.750000
300 737007.0 110.849998 111.449997 108.760002 109.570000 109.570000
301 737010.0 109.419998 109.959999 106.099998 106.870003 106.870003
Volume
0 21736200
1 21556000
2 16535800
3 17471200
4 14703800
5 17910400
6 14394900
7 13380800
8 15733900
9 38578400
10 23307000
11 16093300
12 21587900
13 19186100
14 14111400
15 24149200
16 18019600
17 19565100
18 10883800
19 17079100
20 15304800
21 12190400
22 13317700
23 21195300
24 13959800
25 11386500
26 13944500
27 15388900
28 16876500
29 15335700
.. ...
272 20787200
273 16648000
274 34821700
275 29068900
276 29640600
277 26198600
278 61376300
279 63904300
280 47742100
281 32068100
282 31610200
283 26548200
284 32506200
285 32785500
286 26545600
287 43770400
288 63897800
289 61646800
290 55523100
291 55162000
292 65350900
293 51062400
294 33384200
295 37680200
296 27922100
297 24340200
298 37901700
299 25644100
300 32039200
301 33598300
[302 rows x 7 columns]
###Markdown
https://www.pivotaltracker.com/story/show/161293103 OHLC GRAPH WITH SIMPLE LINE PLOTTING
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
import datetime as dt
%matplotlib inline
%pylab inline
start=dt.datetime(2017,9,1)
end=dt.datetime.today().strftime('%Y-%m-%d')
web=yf.download("MSFT",start,end)
web_ohlc = web.reset_index()
#Naming columns
web_ohlc.columns = ["Date","Open","High",'Low',"Close","Adj Close","Volume"]
#Converting dates column to float values
web_ohlc['Date'] = web_ohlc['Date'].map(mdates.date2num)
pylab.rcParams['figure.figsize'] = (20, 9)
ax= plt.gca() #used to set "get current axes".Current here means that it provides a handle to the last active axes. If there is no axes yet, an axes will be created. If you create two subplots, the subplot that is created last is the current one.
ax.xaxis_date()
plt.plot(web_ohlc['Date'],web_ohlc.Close)
plt.plot(web_ohlc['Date'],web_ohlc.Open)
plt.plot(web_ohlc['Date'],web_ohlc.High)
plt.plot(web_ohlc['Date'],web_ohlc.Low)
plt.ylabel("Price")
plt.xlabel("Dates")
plt.legend(loc=2)
###Output
Populating the interactive namespace from numpy and matplotlib
[*********************100%***********************] 1 of 1 downloaded
###Markdown
MACD https://www.pivotaltracker.com/story/show/160699005
###Code
import pandas as pd
pd.core.common.is_list_like=pd.api.types.is_list_like
from pandas_datareader import data as web
import fix_yahoo_finance as yf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from mpl_finance import candlestick_ohlc
import matplotlib.dates as mdates
import datetime as dt
%matplotlib inline
%pylab inline
#start=dt.datetime(2017,9,1)
#end=dt.datetime.today().strftime('%Y-%m-%d')
#web=yf.download("MSFT",start,end)
#web_close=pd.DataFrame(web.Close)
display(web_close)
web['26 ema'] = web_close.ewm(com=26).mean()
web['12 ema'] = web_close.ewm(com=12).mean()
#print (web['26 ema'],web['12 ema'])
web['MACD'] = (web['12 ema'] - web['26 ema'])
print(web['MACD'])
pylab.rcParams['figure.figsize'] = (20, 9)
plt.plot(web['MACD'],label='MACD')
plt.legend(loc=2)
###Output
Populating the interactive namespace from numpy and matplotlib
|
Lessons/Lesson06b_Matplotlib_supplement.ipynb | ###Markdown
"Geo Data Science with Python" Notebook Lesson 6b Matplotlib - Supplement--- Import Modules and Data
###Code
import matplotlib.pyplot as plt
import numpy as np
# Temperature converter
def tempF2C(temp):
return (temp - 32.0) / 1.8
# read datasets from file to numpy arrays
fp = './Kumpula-June-2016-w-metadata.txt'
data = np.genfromtxt(fp, skip_header=9, delimiter=',')
dates = data[:, 0]
temp = data[:, 1]
temp_max = data[:, 2]
temp_min = data[:, 3]
# Convert temperature into celcius
temp_celsius = tempF2C(temp)
temp_max_celsius = tempF2C(temp_max)
temp_min_celsius = tempF2C(temp_min)
###Output
_____no_output_____
###Markdown
CODE EXAMPLES 1Add dates to your plot with datetime objects
###Code
# convert dates into integer numbers, then strings
dtStr = (dates.astype(int)).astype(str)
# convert dates from strings to datetime date objects
from datetime import datetime
datesDt = [ datetime.strptime(d, '%Y%m%d') for d in dtStr ]
# plot the dataset
plt.plot(datesDt, temp_celsius)
# add x-label
plt.xlabel('Time') # x-label
# use the plt.ticks() function to rotate the dates
locs = plt.xticks(rotation=30, ha='right')
###Output
_____no_output_____
###Markdown
--- CODE EXAMPLES 2Plot the OOP way!
###Code
# create figure and axes objects
fig, ax = plt.subplots()
# plot the dataset
ax.plot(datesDt, temp_celsius, 'r')
# add x-label
ax.set_xlabel('Time')
# use the autofmt_xdate() method to rotate the dates
fig.autofmt_xdate()
###Output
_____no_output_____
###Markdown
--- CODE EXAMPLES 3Create subplotsdef tempF2C(temp): (temp - 32.0) / 1.8
###Code
# Create the figure and subplots
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(6,8))
# Rename the axes for ease of use
ax1 = axes[0]
ax2 = axes[1]
# Set the plotted line width
line_width = 1.5
# Plot data
ax1.plot(dates, temp_celsius, c='red', lw=line_width)
ax2.plot(dates, temp_max_celsius, c='blue', lw=line_width)
# Set y-axis limits
min_temp = temp_celsius.min() - 2
max_temp = temp_max_celsius.max() + 2
ax1.set_ylim(min_temp, max_temp)
ax2.set_ylim(min_temp, max_temp)
# Turn plot grids on, for each subplot
ax1.grid()
ax2.grid()
# Figure title
fig.suptitle('Temperature observations - Kumpula June 2016')
# Axis labels
ax1.set_ylabel('Temperature [C]')
ax2.set_xlabel('Date')
###Output
_____no_output_____ |
notebooks/birdsong/cassins_vireo_example/.ipynb_checkpoints/2.0-CAVI-UMAP-HDBSCAN-Clustering-checkpoint.ipynb | ###Markdown
Reduce dimensionality of dataset using UMAP, then cluster the dataset using HDBSCAN - Compare the clustering of the dataset using HDBSCAN to the hand-labels Import packages
###Code
from IPython.display import clear_output
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
import os
from tqdm import tqdm_notebook as tqdm
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import hdbscan
import seaborn as sns
import umap
from avgn.network_analysis.network_analysis import *
import avgn.network.convnet_model as conv
from avgn.network.training import *
###Output
_____no_output_____
###Markdown
Define data parameters
###Code
dims = [128, 128, 1] # first dimension of input data
batch_size = 16 # size of batches to use (per GPU)
###Output
_____no_output_____
###Markdown
Load the dataset
###Code
from glob import glob
bird_name = 'CAVI'
hdf_locs = glob('../../../data/CAVI_wavs/*_'+str(dims[0])+'.hdf5')
hdf_locs[:3]
# What information is stored in the HDF5 file
to_load = ['spectrograms', 'lengths', 'start', 'wav_file', 'syll_start_rel_wav', 'symbols']
all_content = load_from_hdf5(hdf_locs, to_load, min_ex=2500, verbose=True)
num_examples = len(all_content['name'])
###Output
../../../data/CAVI_wavs/GRA_128.hdf5 3045
../../../data/CAVI_wavs/YAW_128.hdf5 3149
../../../data/CAVI_wavs/AGBk_128.hdf5 8185
../../../data/CAVI_wavs/AGO_128.hdf5 3869
../../../data/CAVI_wavs/WABk_128.hdf5 6430
../../../data/CAVI_wavs/BuRA_128.hdf5 5527
../../../data/CAVI_wavs/AOBu_128.hdf5 7638
../../../data/CAVI_wavs/ORA_128.hdf5 2805
../../../data/CAVI_wavs/RYA_128.hdf5 3565
###Markdown
Plot a few examples
###Code
nex=32
fig, ax = plt.subplots(nrows=1,ncols=nex, figsize=(nex,1))
for i in range(nex):
ax[i].matshow(all_content['spectrograms'][i].reshape((dims[0],dims[1])),
cmap=plt.cm.viridis, interpolation='nearest', origin='lower')
ax[i].axis('off')
###Output
_____no_output_____
###Markdown
Reduce image size for clustering
###Code
from skimage.transform import resize
x = all_content['spectrograms']
x_small = [resize(i, [32,32]) for i in tqdm(x)]
x_small = np.array(x_small).reshape((len(x_small), np.prod(np.shape(x_small)[1:])))
###Output
_____no_output_____
###Markdown
UMAP embedding
###Code
breakme
x_small[0]
x_small = [(i*255).astype('uint8') for i in x_small]
fig, ax = plt.subplots(ncols=2, figsize=(8,4))
ax[0].matshow(x[0].reshape([128,128]))
ax[1].matshow(x_small[0].reshape([32,32]))
clusterable_embedding = umap.UMAP(
n_neighbors=30,
#min_dist=0.0,
n_components=2,
random_state=42,
).fit_transform(x_small)
np.unique(all_content['symbols'])
plt.plot(clusterable_embedding[:,0], clusterable_embedding[:,1])
unique_sylls = np.unique(all_content['symbols'])
len(unique_sylls)
###Output
_____no_output_____
###Markdown
Cluster UMAP representations
###Code
# we set the minimum cluster size at 0.25% of the dataset ()
cluster_pct = 0.0025
min_cluster_size = int(len(clusterable_embedding)*cluster_pct)
min_cluster_size
clustered_labels = cluster_data(np.array(list(clusterable_embedding)),
hdbscan.HDBSCAN,
(),
{'min_cluster_size':min_cluster_size, 'min_samples':1},
verbose = True)
# check how many syllables were labelled
pct_unlabelled = np.sum(clustered_labels == -1)/len(clustered_labels)
print(str(round(pct_unlabelled*100,1))+ '% of syllables went unlabelled')
###Output
1.1% of syllables went unlabelled
###Markdown
Compare hand labels to UMAP embeddings
###Code
def plot_with_labels(data, labels, title = '', ax = None, figsize = (9,9)):
palette = sns.color_palette('husl', len(np.unique(labels)))
labs_to_numbers_dict = {l:i for i,l in enumerate(np.unique(labels))}
np.random.shuffle(palette)
colors = [palette[labs_to_numbers_dict[x]] if x >= 0 else (0.75, 0.75, 0.75) for x in np.array(labels)]
if not ax: fig, ax= plt.subplots(nrows=1,ncols=1,figsize=figsize)
ax.scatter(data.T[0], data.T[1],
color=colors, alpha = 1, linewidth= 0, s=1)
ax.axis('off')
ax.set_title(title)
if not ax: plt.show()
def compareLabellingSchemes(lab1, lab2, z, figsize=(24,24)):
# get palette for first labelling
palette = sns.color_palette('husl', len(np.unique(lab1)))
labs_to_numbers_dict = {l:i for i,l in enumerate(np.unique(lab1))}
np.random.shuffle(palette)
colors = [palette[labs_to_numbers_dict[x]] if int(x) >= 0 else (0.75, 0.75, 0.75) for x in np.array(lab1)]
# plot first labelling
fig, ax= plt.subplots(nrows=1,ncols=1,figsize=figsize)
ax.scatter(z.T[0], z.T[1],
color=colors, alpha = 1, linewidth= 0, s=10)
ax.axis('off')
plt.show()
# compare labels
used_labs = []
unchanged_labs = []
for lab in np.unique(lab2):
lab1_labs = lab1[lab2 == lab]
closest_lab = np.unique(lab1_labs)[np.argmax([list(lab1_labs).count(i) for i in np.unique(lab1_labs)])]
if closest_lab not in used_labs:
used_labs.append(closest_lab)
lab2[lab2 == lab] = closest_lab
else:
unchanged_labs.append(lab)
# get palette for second labelling
palette = palette + sns.hls_palette(len(np.unique(unchanged_labs)), l=.5, s=.8)
ul = len(np.unique(lab1))
for ilab, lab in enumerate(unchanged_labs):
labs_to_numbers_dict[lab] = ilab + ul
colors = [palette[labs_to_numbers_dict[x]] for x in np.array(lab2)]
fig, ax= plt.subplots(nrows=1,ncols=1,figsize=figsize)
ax.scatter(z.T[0], z.T[1],
color=colors, alpha = 1, linewidth= 0, s=10)
ax.axis('off')
plt.show()
lab1 = clustered_labels.astype('str')
lab2 = all_content['symbols'].astype('str')
compareLabellingSchemes(lab1, lab2, x)
plot_with_labels(clusterable_embedding, clustered_labels, figsize=(16,16))
###Output
_____no_output_____ |
14 Text und Machine Learning/2.4 Classifying Text.ipynb | ###Markdown
Document classifier Daten- Wir brauchen zuerst daten um unser Modell zu trainieren
###Code
from textblob.classifiers import NaiveBayesClassifier
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
###Output
_____no_output_____
###Markdown
Training
###Code
cl = NaiveBayesClassifier(train)
###Output
_____no_output_____
###Markdown
Test- Wie gut performed unser Modell bei Daten die es noch nie gesehen hat?
###Code
cl.accuracy(test)
###Output
_____no_output_____
###Markdown
- Zu 80% korrekt, ok fรผr mich :) Features- Welche wรถrter sorgen am meisten dafรผr dass etwas positiv oder negativ klassifiziert wird?
###Code
cl.show_informative_features(5)
###Output
Most Informative Features
contains(this) = True neg : pos = 2.3 : 1.0
contains(this) = False pos : neg = 1.8 : 1.0
contains(This) = False neg : pos = 1.6 : 1.0
contains(an) = False neg : pos = 1.6 : 1.0
contains(I) = True neg : pos = 1.4 : 1.0
###Markdown
Er ist der meinung wenn "this" vorkommt ist es eher positiv, was natรผrlich quatsch ist, aber das hat er nun mal so gelernt, deswegen braucht ihr gute trainingsdaten. Klassifizierung
###Code
cl.classify("Their burgers are amazing") # "pos"
cl.classify("I don't like their pizza.") # "neg"
###Output
_____no_output_____
###Markdown
Klassizierung nach Sรคtzen
###Code
from textblob import TextBlob
blob = TextBlob("The beer was amazing. "
"But the hangover was horrible. My boss was not happy.",
classifier=cl)
for sentence in blob.sentences:
print(("%s (%s)") % (sentence,sentence.classify()))
###Output
The beer was amazing. (pos)
But the hangover was horrible. (neg)
My boss was not happy. (neg)
###Markdown
Mit schweizer Songtexten Kommentare klassifizieren
###Code
import os,glob
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from io import open
train = []
countries = ["schweiz", "deutschland"]
for country in countries:
out = []
folder_path = 'songtexte/%s' % country
for filename in glob.glob(os.path.join(folder_path, '*.txt')):
with open(filename, 'r') as f:
text = f.read()
words = word_tokenize(text)
words=[word.lower() for word in words if word.isalpha()]
for word in words:
out.append(word)
out = set(out)
for word in out:
train.append((word,country))
#print (filename)
#print (len(text))
train
from textblob.classifiers import NaiveBayesClassifier
c2 = NaiveBayesClassifier(train)
c2.classify("Ich gehe durch den Wald") # "deutsch"
c2.classify("Hรคsch es guet") # "deutsch"
###Output
_____no_output_____
###Markdown
Hardcore Beispiel mit Film-review daten mit NLTK- https://www.nltk.org/book/ch06.html- Wir nutzen nur noch die 100 hรคufigsten Wรถrter in den Texten und schauen ob sie bei positiv oder negativ vorkommen
###Code
review = (" ").join(train[0][0])
print(review)
import random
import nltk
nltk.download('movie_reviews')
from nltk.corpus import movie_reviews
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
(" ").join(documents[0][0])
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features[0:10]
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000]
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
featuresets = [(document_features(d), c) for (d,c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
classifier.classify(document_features("a movie with bad actors".split(" ")))
classifier.classify(document_features("an uplifting movie with russel crowe".split(" ")))
classifier.show_most_informative_features(10)
###Output
Most Informative Features
contains(unimaginative) = True neg : pos = 8.2 : 1.0
contains(shoddy) = True neg : pos = 6.9 : 1.0
contains(turkey) = True neg : pos = 6.7 : 1.0
contains(singers) = True pos : neg = 6.4 : 1.0
contains(suvari) = True neg : pos = 6.2 : 1.0
contains(mena) = True neg : pos = 6.2 : 1.0
contains(atrocious) = True neg : pos = 6.1 : 1.0
contains(stretched) = True neg : pos = 6.1 : 1.0
contains(schumacher) = True neg : pos = 6.1 : 1.0
contains(unravel) = True pos : neg = 5.8 : 1.0
###Markdown
Document classifier Daten- Wir brauchen zuerst daten um unser Modell zu trainieren
###Code
from textblob.classifiers import NaiveBayesClassifier
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
###Output
_____no_output_____
###Markdown
Training
###Code
a = NaiveBayesClassifier(train)
###Output
_____no_output_____
###Markdown
Test- Wie gut performed unser Modell bei Daten die es noch nie gesehen hat?
###Code
a.accuracy(test)
###Output
_____no_output_____
###Markdown
- Zu 80% korrekt, ok fรผr mich :) Features- Welche wรถrter sorgen am meisten dafรผr dass etwas positiv oder negativ klassifiziert wird?
###Code
cl.show_informative_features(5)
###Output
Most Informative Features
contains(this) = True neg : pos = 2.3 : 1.0
contains(this) = False pos : neg = 1.8 : 1.0
contains(This) = False neg : pos = 1.6 : 1.0
contains(an) = False neg : pos = 1.6 : 1.0
contains(I) = False pos : neg = 1.4 : 1.0
###Markdown
Er ist der meinung wenn "this" vorkommt ist es eher positiv, was natรผrlich quatsch ist, aber das hat er nun mal so gelernt, deswegen braucht ihr gute trainingsdaten. Klassifizierung
###Code
cl.classify("Their burgers are amazing") # "pos"
cl.classify("I don't like their pizza.") # "neg"
a.classify("I love my job.")
###Output
_____no_output_____
###Markdown
Klassizierung nach Sรคtzen
###Code
from textblob import TextBlob
blob = TextBlob("The beer was amazing. "
"But the hangover was horrible. My boss was not happy.",
classifier=a)
for sentence in blob.sentences:
print(("%s (%s)") % (sentence,sentence.classify()))
###Output
The beer was amazing. (pos)
But the hangover was horrible. (neg)
My boss was not happy. (neg)
###Markdown
Mit schweizer Songtexten Kommentare klassifizieren- http://www.falleri.ch
###Code
import os,glob
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from io import open
train = []
countries = ["schweiz", "deutschland"]
for country in countries:
out = []
folder_path = 'songtexte/%s' % country
for filename in glob.glob(os.path.join(folder_path, '*.txt')):
with open(filename, 'r') as f:
text = f.read()
words = word_tokenize(text)
words=[word.lower() for word in words if word.isalpha()]
for word in words:
out.append(word)
out = set(out)
for word in out:
train.append((word,country))
#print (filename)
#print (len(text))
train
from textblob.classifiers import NaiveBayesClassifier
c2 = NaiveBayesClassifier(train)
c2.classify("Ich gehe durch den Wald") # "deutsch"
c2.classify("Hรคsch es guet") # "deutsch"
c2.classify("Ich fahre mit meinem Porsche auf der Autobahn.")
c2.show_informative_features(5)
###Output
Most Informative Features
contains(ich) = True schwei : deutsc = 1.2 : 1.0
contains(im) = True schwei : deutsc = 1.2 : 1.0
contains(dir) = True schwei : deutsc = 1.2 : 1.0
contains(zur) = True schwei : deutsc = 1.2 : 1.0
contains(wer) = True schwei : deutsc = 1.2 : 1.0
###Markdown
Orakel Ihr kรถnnt natรผrlich jetzt Euer eigenes Orakel bauen wie hier: - http://home.datacomm.ch/cgi-heeb/dialect/chochi.pl?Hand=Hand&nicht=net&heute=hit&Fenster=Feischter&gestern=gescht&Abend=Abend&gehorchen=folge&Mond=Manat&jeweils=abe&Holzsplitter=Schepfa&Senden=Jetzt+analysieren%21 Hardcore Beispiel mit Film-review daten mit NLTK- https://www.nltk.org/book/ch06.html- Wir nutzen nur noch die 100 hรคufigsten Wรถrter in den Texten und schauen ob sie bei positiv oder negativ vorkommen
###Code
import random
import nltk
nltk.download("movie_reviews")
from nltk.corpus import movie_reviews
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
(" ").join(documents[1][0])
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000]
word_features[0:10]
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000]
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
featuresets = [(document_features(d), c) for (d,c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
classifier.classify(document_features("a movie with bad actors".split(" ")))
classifier.classify(document_features("an uplifting movie with russel crowe".split(" ")))
classifier.show_most_informative_features(10)
###Output
Most Informative Features
contains(sans) = True neg : pos = 8.9 : 1.0
contains(uplifting) = True pos : neg = 8.7 : 1.0
contains(mediocrity) = True neg : pos = 7.6 : 1.0
contains(fabric) = True pos : neg = 6.4 : 1.0
contains(overwhelmed) = True pos : neg = 6.4 : 1.0
contains(topping) = True pos : neg = 5.8 : 1.0
contains(sunny) = True pos : neg = 5.8 : 1.0
contains(wits) = True pos : neg = 5.8 : 1.0
contains(lang) = True pos : neg = 5.8 : 1.0
contains(ugh) = True neg : pos = 5.7 : 1.0
###Markdown
Document classifier Daten- Wir brauchen zuerst daten um unser Modell zu trainieren
###Code
from textblob.classifiers import NaiveBayesClassifier
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
###Output
_____no_output_____
###Markdown
Training
###Code
cl = NaiveBayesClassifier(train)
###Output
_____no_output_____
###Markdown
Test- Wie gut performed unser Modell bei Daten die es noch nie gesehen hat?
###Code
cl.accuracy(test)
###Output
_____no_output_____
###Markdown
- Zu 80% korrekt, ok fรผr mich :) Features- Welche wรถrter sorgen am meisten dafรผr dass etwas positiv oder negativ klassifiziert wird?
###Code
cl.show_informative_features(5)
###Output
Most Informative Features
contains(this) = True neg : pos = 2.3 : 1.0
contains(this) = False pos : neg = 1.8 : 1.0
contains(This) = False neg : pos = 1.6 : 1.0
contains(an) = False neg : pos = 1.6 : 1.0
contains(I) = False pos : neg = 1.4 : 1.0
###Markdown
Er ist der meinung wenn "this" vorkommt ist es eher positiv, was natรผrlich quatsch ist, aber das hat er nun mal so gelernt, deswegen braucht ihr gute trainingsdaten. Klassifizierung
###Code
cl.classify("Their burgers are amazing") # "pos"
cl.classify("I don't like their pizza.") # "neg"
cl.classify("I hate cars.")
cl.classify("Zurich is beautiful.")
cl.classify("Zurich")
###Output
_____no_output_____
###Markdown
Klassizierung nach Sรคtzen
###Code
from textblob import TextBlob
blob = TextBlob("The beer was amazing. "
"But the hangover was horrible. My boss was not happy.",
classifier=cl)
for sentence in blob.sentences:
print(("%s (%s)") % (sentence,sentence.classify()))
###Output
The beer was amazing. (pos)
But the hangover was horrible. (neg)
My boss was not happy. (neg)
###Markdown
Mit schweizer Songtexten Kommentare klassifizieren
###Code
import os,glob
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from io import open
train = []
countries = ["schweiz", "deutschland"]
for country in countries:
out = []
folder_path = 'songtexte/%s' % country
for filename in glob.glob(os.path.join(folder_path, '*.txt')):#alle Dateien einlesen
with open(filename, 'r') as f:
text = f.read()
words = word_tokenize(text)
words=[word.lower() for word in words if word.isalpha()]
for word in words:
out.append(word)
out = set(out)
for word in out:
train.append((word,country))
#print (filename)
#print (len(text))
train
from textblob.classifiers import NaiveBayesClassifier
c2 = NaiveBayesClassifier(train)
c2.classify("Ich gehe durch den Wald") # "deutsch"
c2.classify("Hรคsch es guet") # "deutsch"
c2.classify("Wรถtsch da?")
c2.show_informative_features(5)
###Output
Most Informative Features
contains(zur) = True schwei : deutsc = 1.3 : 1.0
contains(froh) = True schwei : deutsc = 1.3 : 1.0
contains(wer) = True schwei : deutsc = 1.3 : 1.0
contains(das) = True schwei : deutsc = 1.3 : 1.0
contains(macht) = True schwei : deutsc = 1.3 : 1.0
###Markdown
Hardcore Beispiel mit Film-review daten mit NLTK- https://www.nltk.org/book/ch06.html- Wir nutzen nur noch die 100 hรคufigsten Wรถrter in den Texten und schauen ob sie bei positiv oder negativ vorkommen
###Code
import random
import nltk
nltk.download('movie_reviews')
review = (" ").join(train[0][0])
print(review)
from nltk.corpus import movie_reviews
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
(" ").join(documents[0][0])
(" ").join(documents[1][1])
#ist hier ein Zwischenschritt
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000] #wir nehmen die 2000 hรคufigsten Wรถrter
word_features
all_words = nltk.FreqDist(w.lower() for w in movie_reviews.words())
word_features = list(all_words)[:2000] #wir nehmen die 2000 hรคufigsten Wรถrter
def document_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains({})'.format(word)] = (word in document_words)
return features
print(document_features(movie_reviews.words('pos/cv957_8737.txt')))
featuresets = [(document_features(d), c) for (d,c) in documents]
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = nltk.NaiveBayesClassifier.train(train_set)
classifier.classify(document_features("a movie with bad actors".split(" ")))
classifier.classify(document_features("an uplifting movie with russel crowe".split(" "))) #split: er nimmt nur Wรถrtelisten
classifier.show_most_informative_features(10)
###Output
_____no_output_____ |
Clusterer_interface.ipynb | ###Markdown
Example
###Code
filename = "data/airbnb.csv"
df = pd.read_csv(filename).dropna()
df = df.groupby(['neighbourhood','city']).mean()
df = df[df.columns.difference(['id','longitude','latitude'])]
df = df._get_numeric_data()
print(df.shape)
df.head(5)
###Output
(610, 8)
###Markdown
K-means Elbow method
###Code
c.elbow_k_means(df)
###Output
_____no_output_____
###Markdown
Silhouette plot
###Code
c.silhouette_clusters_K_Means(df)
###Output
For n_clusters = 2 The average silhouette_score is : 0.546679819262802
###Markdown
Clustering
###Code
n_clusters = 4
clusters = c.k_means(df, n_clusters = n_clusters)
###Output
Reduced inertia: 2738.632031981707
Clusters centers:
###Markdown
Visualization
###Code
c.plot_cluster_2D(df, clusters)
c.plot_cluster_3D(df, clusters, False)
###Output
_____no_output_____
###Markdown
Hierarchical clustering Clustering
###Code
clusters = c.hierarchical_clusters(df, k=4)
###Output
_____no_output_____
###Markdown
Visualization
###Code
c.plot_cluster_2D(df, clusters)
c.plot_cluster_3D(df, clusters)
###Output
_____no_output_____
###Markdown
Gaussian Mixture Models Clustering with BIC
###Code
clusters = c.best_GMM_clusters(df, criterion='bic')
c.plot_cluster_2D(df, clusters)
c.plot_cluster_3D(df, clusters)
###Output
_____no_output_____ |
11-2-Time-Series.ipynb | ###Markdown
Chapter 11 - Time Series 11.2 - Time Series Basics
###Code
from datetime import datetime as dt
from dateutil.parser import parse
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
The basic time series object in `pandas` is a `Series` index.
###Code
qs1 = []
# Instantiate a time series: First day of each Q in the year 2019.
for i in range(1, 13, 3):
t = dt(2019, i, 1)
qs1.append(t)
display(qs1)
# Use the time series as the index to a Series
s1 = pd.Series([10,20,30,40], index=qs1)
display(s1)
###Output
_____no_output_____
###Markdown
Under the hood, the `datetime` objects are stored as a `DateTimeIndex`.
###Code
display(s1.index)
print(type(s1.index))
###Output
_____no_output_____
###Markdown
Like other `Series`, arithmetic operations on two `Series` objects are aligned on the dates.
###Code
# Instantiate a new Series with Q1 - Q3 dates
qs2 = [parse('2019-01-01'), parse('2019-04-01'), parse('2019-07-01')]
s2 = pd.Series([25,45,65], index=qs2)
display(s2)
# Add s1 and s2 Series objects
display(s1+s2) # There is no value for 2019-10-01 so resultant of sum is NaN
###Output
_____no_output_____
###Markdown
Indexing, Selection, Subsetting You can use the date as the index to pull out its value. Different date formats are permitted.
###Code
display(s2)
print(s2['20190101'])
print(s2['07/01/2019'])
###Output
_____no_output_____
###Markdown
Use `pd.date_range` to automatically generate dates with a start date and number of periods.
###Code
d1 = pd.Series(range(0,20), pd.date_range('2018/07/01', periods=20))
display(d1.head(10))
d2 = pd.Series(range(0,400), pd.date_range('20190701', periods=400))
display(d2.head(5))
print('...')
display(d2.tail(3))
###Output
_____no_output_____
###Markdown
Now, filtering can be performed on the `DateTimeIndex`.
###Code
display(d2['2019'].iloc[:5]) # Filter by year
display(d2['2019-09'].iloc[:5]) # Filter by year & month
display(d2[dt(2019,10, 3):].iloc[:5]) # Filter by datetime
###Output
_____no_output_____
###Markdown
Filtering can also be done using truncating
###Code
# Truncate (remove) all values before 1 Aug 2019
d2.truncate(before='2019-08-01').iloc[:10]
# Truncate (Remove) all values after 5 Jul
d2.truncate(after='2019-07-05')
###Output
_____no_output_____
###Markdown
Time Series with Duplicate Indices Sometimes, there may be multiple data observations falling on the same timestamp.
###Code
dup_dates = pd.DatetimeIndex(['2020-07-28', '2020-07-29',
'2020-07-29', '2020-07-30',
'2020-07-31'])
s3 = pd.Series(np.arange(5), index=dup_dates)
display(s3)
# Checking index.is_unique can validate that the index contains duplicates
print(s3.index.is_unique)
# Indexing to this time series produces a slice if there are duplicates
print(s3['20200729'])
# If there are no duplicates, this produces a scalar
print(s3['20200728'])
print(s3['20200731'])
# To aggregate having non-unique timestamps, level=0 needs to be specified
print(s3.groupby(level=0).sum())
print(s3.groupby(level=0).mean())
###Output
2020-07-28 0
2020-07-29 3
2020-07-30 3
2020-07-31 4
dtype: int64
2020-07-28 0.0
2020-07-29 1.5
2020-07-30 3.0
2020-07-31 4.0
dtype: float64
|
ImageCollection/metadata.ipynb | ###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
###Code
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
###Code
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
Collection: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1581772792877035, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'oli_tirs', 'lc8', 'c1', 't1', 'l8', 'tier1', 'radiance'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1581206400000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'oli_tirs', 'lc8', 'c1', 't1', 'l8', 'tier1', 'radiance'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}]}
Count: 9
Date range: {'type': 'Date', 'value': 1395168392050} {'type': 'Date', 'value': 1406227557220}
Sun elevation statistics: {'type': 'DataDictionary', 'values': {'max': 67.10252380371094, 'mean': 61.01998392740885, 'min': 46.471065521240234, 'sample_sd': 7.251804209519804, 'sample_var': 52.58866429320915, 'sum': 549.1798553466797, 'sum_sq': 33931.65526085679, 'total_count': 9, 'total_sd': 6.837066576518139, 'total_var': 46.74547937174147, 'valid_count': 9, 'weight_sum': 9, 'weighted_sum': 549.1798553466797}}
Least cloudy image: {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}
Recent images: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1581772792877035, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'oli_tirs', 'lc8', 'c1', 't1', 'l8', 'tier1', 'radiance'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1581206400000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'oli_tirs', 'lc8', 'c1', 't1', 'l8', 'tier1', 'radiance'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1581772792877035, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}]}
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
###Code
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('Installing geemap ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
import ee
import geemap
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
###Code
Map = geemap.Map(center=[40,-100], zoom=4)
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____
###Markdown
Pydeck Earth Engine IntroductionThis is an introduction to using [Pydeck](https://pydeck.gl) and [Deck.gl](https://deck.gl) with [Google Earth Engine](https://earthengine.google.com/) in Jupyter Notebooks. If you wish to run this locally, you'll need to install some dependencies. Installing into a new Conda environment is recommended. To create and enter the environment, run:```conda create -n pydeck-ee -c conda-forge python jupyter notebook pydeck earthengine-api requests -ysource activate pydeck-eejupyter nbextension install --sys-prefix --symlink --overwrite --py pydeckjupyter nbextension enable --sys-prefix --py pydeck```then open Jupyter Notebook with `jupyter notebook`. Now in a Python Jupyter Notebook, let's first import required packages:
###Code
from pydeck_earthengine_layers import EarthEngineLayer
import pydeck as pdk
import requests
import ee
###Output
_____no_output_____
###Markdown
AuthenticationUsing Earth Engine requires authentication. If you don't have a Google account approved for use with Earth Engine, you'll need to request access. For more information and to sign up, go to https://signup.earthengine.google.com/. If you haven't used Earth Engine in Python before, you'll need to run the following authentication command. If you've previously authenticated in Python or the command line, you can skip the next line.Note that this creates a prompt which waits for user input. If you don't see a prompt, you may need to authenticate on the command line with `earthengine authenticate` and then return here, skipping the Python authentication.
###Code
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create MapNext it's time to create a map. Here we create an `ee.Image` object
###Code
# Initialize objects
ee_layers = []
view_state = pdk.ViewState(latitude=37.7749295, longitude=-122.4194155, zoom=10, bearing=0, pitch=45)
# %%
# Add Earth Engine dataset
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
_____no_output_____
###Markdown
Then just pass these layers to a `pydeck.Deck` instance, and call `.show()` to create a map:
###Code
r = pdk.Deck(layers=ee_layers, initial_view_state=view_state)
r.show()
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
###Code
# %%capture
# !pip install earthengine-api
# !pip install geehydro
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for the first time or if you are getting an authentication error.
###Code
# ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
Collection: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1580563126058134, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'tier1', 'lc8', 'c1', 'oli_tirs', 't1', 'l8', 'radiance'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1579910400000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'tier1', 'lc8', 'c1', 'oli_tirs', 't1', 'l8', 'radiance'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}]}
Count: 9
Date range: {'type': 'Date', 'value': 1395168392050} {'type': 'Date', 'value': 1406227557220}
Sun elevation statistics: {'type': 'DataDictionary', 'values': {'max': 67.10252380371094, 'mean': 61.01998392740885, 'min': 46.471065521240234, 'sample_sd': 7.251804209519804, 'sample_var': 52.58866429320915, 'sum': 549.1798553466797, 'sum_sq': 33931.65526085679, 'total_count': 9, 'total_sd': 6.837066576518139, 'total_var': 46.74547937174147, 'valid_count': 9, 'weight_sum': 9, 'weighted_sum': 549.1798553466797}}
Least cloudy image: {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}
Recent images: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1580563126058134, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'tier1', 'lc8', 'c1', 'oli_tirs', 't1', 'l8', 'radiance'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1579910400000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'tier1', 'lc8', 'c1', 'oli_tirs', 't1', 'l8', 'radiance'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580563126058134, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}]}
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in binder Run in Google Colab Install Earth Engine APIInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.The magic command `%%capture` can be used to hide output from a specific cell.
###Code
# %%capture
# !pip install earthengine-api
# !pip install geehydro
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import ee
import folium
import geehydro
###Output
_____no_output_____
###Markdown
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` if you are running this notebook for this first time or if you are getting an authentication error.
###Code
# ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
###Code
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
Collection: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1580044754541352, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'tier1', 'oli_tirs', 'c1', 'radiance', 'lc8', 'l8', 't1'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1578700800000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'tier1', 'oli_tirs', 'c1', 'radiance', 'lc8', 'l8', 't1'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}]}
Count: 9
Date range: {'type': 'Date', 'value': 1395168392050} {'type': 'Date', 'value': 1406227557220}
Sun elevation statistics: {'type': 'DataDictionary', 'values': {'max': 67.10252380371094, 'mean': 61.01998392740885, 'min': 46.471065521240234, 'sample_sd': 7.251804209519804, 'sample_var': 52.58866429320915, 'sum': 549.1798553466797, 'sum_sq': 33931.65526085679, 'total_count': 9, 'total_sd': 6.837066576518139, 'total_var': 46.74547937174147, 'valid_count': 9, 'weight_sum': 9, 'weighted_sum': 549.1798553466797}}
Least cloudy image: {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}
Recent images: {'type': 'ImageCollection', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}}], 'id': 'LANDSAT/LC08/C01/T1_TOA', 'version': 1580044754541352, 'properties': {'system:visualization_0_min': '0.0', 'type_name': 'ImageCollection', 'visualization_1_bands': 'B5,B4,B3', 'thumb': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_thumb.png', 'visualization_1_max': '30000.0', 'description': '<p>Landsat 8 Collection 1 Tier 1\n calibrated top-of-atmosphere (TOA) reflectance.\n Calibration coefficients are extracted from the image metadata. See<a href="http://www.sciencedirect.com/science/article/pii/S0034425709000169">\n Chander et al. (2009)</a> for details on the TOA computation.</p></p>\n<p><b>Revisit Interval</b>\n<br>\n 16 days\n</p>\n<p><b>Bands</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Resolution</th>\n<th scope="col">Wavelength</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>B1</td>\n<td>\n 30 meters\n</td>\n<td>0.43 - 0.45 ยตm</td>\n<td><p>Coastal aerosol</p></td>\n</tr>\n<tr>\n<td>B2</td>\n<td>\n 30 meters\n</td>\n<td>0.45 - 0.51 ยตm</td>\n<td><p>Blue</p></td>\n</tr>\n<tr>\n<td>B3</td>\n<td>\n 30 meters\n</td>\n<td>0.53 - 0.59 ยตm</td>\n<td><p>Green</p></td>\n</tr>\n<tr>\n<td>B4</td>\n<td>\n 30 meters\n</td>\n<td>0.64 - 0.67 ยตm</td>\n<td><p>Red</p></td>\n</tr>\n<tr>\n<td>B5</td>\n<td>\n 30 meters\n</td>\n<td>0.85 - 0.88 ยตm</td>\n<td><p>Near infrared</p></td>\n</tr>\n<tr>\n<td>B6</td>\n<td>\n 30 meters\n</td>\n<td>1.57 - 1.65 ยตm</td>\n<td><p>Shortwave infrared 1</p></td>\n</tr>\n<tr>\n<td>B7</td>\n<td>\n 30 meters\n</td>\n<td>2.11 - 2.29 ยตm</td>\n<td><p>Shortwave infrared 2</p></td>\n</tr>\n<tr>\n<td>B8</td>\n<td>\n 15 meters\n</td>\n<td>0.52 - 0.90 ยตm</td>\n<td><p>Band 8 Panchromatic</p></td>\n</tr>\n<tr>\n<td>B9</td>\n<td>\n 15 meters\n</td>\n<td>1.36 - 1.38 ยตm</td>\n<td><p>Cirrus</p></td>\n</tr>\n<tr>\n<td>B10</td>\n<td>\n 30 meters\n</td>\n<td>10.60 - 11.19 ยตm</td>\n<td><p>Thermal infrared 1, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>B11</td>\n<td>\n 30 meters\n</td>\n<td>11.50 - 12.51 ยตm</td>\n<td><p>Thermal infrared 2, resampled from 100m to 30m</p></td>\n</tr>\n<tr>\n<td>BQA</td>\n<td>\n</td>\n<td></td>\n<td><p>Landsat Collection 1 QA Bitmask (<a href="https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band">See Landsat QA page</a>)</p></td>\n</tr>\n<tr>\n<td colspan=100>\n Bitmask for BQA\n<ul>\n<li>\n Bit 0: Designated Fill\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bit 1: Terrain Occlusion\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 2-3: Radiometric Saturation\n<ul>\n<li>0: No bands contain saturation</li>\n<li>1: 1-2 bands contain saturation</li>\n<li>2: 3-4 bands contain saturation</li>\n<li>3: 5 or more bands contain saturation</li>\n</ul>\n</li>\n<li>\n Bit 4: Cloud\n<ul>\n<li>0: No</li>\n<li>1: Yes</li>\n</ul>\n</li>\n<li>\n Bits 5-6: Cloud Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 7-8: Cloud Shadow Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 9-10: Snow / Ice Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n<li>\n Bits 11-12: Cirrus Confidence\n<ul>\n<li>0: Not Determined / Condition does not exist.</li>\n<li>1: Low, (0-33 percent confidence)</li>\n<li>2: Medium, (34-66 percent confidence)</li>\n<li>3: High, (67-100 percent confidence)</li>\n</ul>\n</li>\n</ul>\n</td>\n</tr>\n</table>\n<p><b>Image Properties</b>\n<table class="eecat">\n<tr>\n<th scope="col">Name</th>\n<th scope="col">Type</th>\n<th scope="col">Description</th>\n</tr>\n<tr>\n<td>BPF_NAME_OLI</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain OLI bands.</p></td>\n</tr>\n<tr>\n<td>BPF_NAME_TIRS</td>\n<td>STRING</td>\n<td><p>The file name for the Bias Parameter File (BPF) used to generate the product, if applicable. This only applies to products that contain TIRS bands.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>CLOUD_COVER_LAND</td>\n<td>DOUBLE</td>\n<td><p>Percentage cloud cover over land, -1 = not calculated.</p></td>\n</tr>\n<tr>\n<td>COLLECTION_CATEGORY</td>\n<td>STRING</td>\n<td><p>Tier of scene. (T1 or T2)</p></td>\n</tr>\n<tr>\n<td>COLLECTION_NUMBER</td>\n<td>DOUBLE</td>\n<td><p>Number of collection.</p></td>\n</tr>\n<tr>\n<td>CPF_NAME</td>\n<td>STRING</td>\n<td><p>Calibration parameter file name.</p></td>\n</tr>\n<tr>\n<td>DATA_TYPE</td>\n<td>STRING</td>\n<td><p>Data type identifier. (L1T or L1G)</p></td>\n</tr>\n<tr>\n<td>DATE_ACQUIRED</td>\n<td>STRING</td>\n<td><p>Image acquisition date. "YYYY-MM-DD"</p></td>\n</tr>\n<tr>\n<td>DATUM</td>\n<td>STRING</td>\n<td><p>Datum used in image creation.</p></td>\n</tr>\n<tr>\n<td>EARTH_SUN_DISTANCE</td>\n<td>DOUBLE</td>\n<td><p>Earth sun distance in astronomical units (AU).</p></td>\n</tr>\n<tr>\n<td>ELEVATION_SOURCE</td>\n<td>STRING</td>\n<td><p>Elevation model source used for standard terrain corrected (L1T) products.</p></td>\n</tr>\n<tr>\n<td>ELLIPSOID</td>\n<td>STRING</td>\n<td><p>Ellipsoid used in image creation.</p></td>\n</tr>\n<tr>\n<td>EPHEMERIS_TYPE</td>\n<td>STRING</td>\n<td><p>Ephemeris data type used to perform geometric correction. (Definitive or Predictive)</p></td>\n</tr>\n<tr>\n<td>FILE_DATE</td>\n<td>DOUBLE</td>\n<td><p>File date in milliseconds since epoch.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL</td>\n<td>DOUBLE</td>\n<td><p>Combined Root Mean Square Error (RMSE) of the geometric residuals\n(metres) in both across-track and along-track directions\nmeasured on the GCPs used in geometric precision correction.\nNot present in L1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_X</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the X direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GEOMETRIC_RMSE_MODEL_Y</td>\n<td>DOUBLE</td>\n<td><p>RMSE of the Y direction geometric residuals (in metres) measured\non the GCPs used in geometric precision correction. Not present in\nL1G products.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_PANCHROMATIC</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_REFLECTIVE</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the reflective band.</p></td>\n</tr>\n<tr>\n<td>GRID_CELL_SIZE_THERMAL</td>\n<td>DOUBLE</td>\n<td><p>Grid cell size used in creating the image for the thermal band.</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_MODEL</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used. Not used in L1GT products.\nValues: 0 - 999 (0 is used for L1T products that have used\nMulti-scene refinement).</p></td>\n</tr>\n<tr>\n<td>GROUND_CONTROL_POINTS_VERSION</td>\n<td>DOUBLE</td>\n<td><p>The number of ground control points used in the verification of\nthe terrain corrected product. Values: -1 to 1615 (-1 = not available)</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY</td>\n<td>DOUBLE</td>\n<td><p>Image quality, 0 = worst, 9 = best, -1 = quality not calculated</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_OLI</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the OLI bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>IMAGE_QUALITY_TIRS</td>\n<td>DOUBLE</td>\n<td><p>The composite image quality for the TIRS bands. Values: 9 = Best. 1 = Worst. 0 = Image quality not calculated. This parameter is only present if OLI bands are present in the product.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K1_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K1 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 10 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>K2_CONSTANT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Calibration K2 constant for Band 11 radiance to temperature conversion.</p></td>\n</tr>\n<tr>\n<td>LANDSAT_PRODUCT_ID</td>\n<td>STRING</td>\n<td><p>The naming convention of each Landsat Collection 1 Level-1 image based\non acquisition parameters and processing parameters.</p>\n<p>Format: LXSS_LLLL_PPPRRR_YYYYMMDD_yyyymmdd_CC_TX</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager,\nT = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>SS = Satellite (08 = Landsat 8)</li>\n<li>LLLL = Processing Correction Level (L1TP = precision and terrain,\nL1GT = systematic terrain, L1GS = systematic)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYYMMDD = Acquisition Date expressed in Year, Month, Day</li>\n<li>yyyymmdd = Processing Date expressed in Year, Month, Day</li>\n<li>CC = Collection Number (01)</li>\n<li>TX = Collection Category (RT = Real Time, T1 = Tier 1, T2 = Tier 2)</li>\n</ul></td>\n</tr>\n<tr>\n<td>LANDSAT_SCENE_ID</td>\n<td>STRING</td>\n<td><p>The Pre-Collection naming convention of each image is based on acquisition\nparameters. This was the naming convention used prior to Collection 1.</p>\n<p>Format: LXSPPPRRRYYYYDDDGSIVV</p>\n<ul>\n<li>L = Landsat</li>\n<li>X = Sensor (O = Operational Land Imager, T = Thermal Infrared Sensor, C = Combined OLI/TIRS)</li>\n<li>S = Satellite (08 = Landsat 8)</li>\n<li>PPP = WRS Path</li>\n<li>RRR = WRS Row</li>\n<li>YYYY = Year of Acquisition</li>\n<li>DDD = Julian Day of Acquisition</li>\n<li>GSI = Ground Station Identifier</li>\n<li>VV = Version</li>\n</ul></td>\n</tr>\n<tr>\n<td>MAP_PROJECTION</td>\n<td>STRING</td>\n<td><p>Projection used to represent the 3-dimensional surface of the earth for the Level-1 product.</p></td>\n</tr>\n<tr>\n<td>NADIR_OFFNADIR</td>\n<td>STRING</td>\n<td><p>Nadir or Off-Nadir condition of the scene.</p></td>\n</tr>\n<tr>\n<td>ORIENTATION</td>\n<td>STRING</td>\n<td><p>Orientation used in creating the image. Values: NOMINAL = Nominal Path, NORTH_UP = North Up, TRUE_NORTH = True North, USER = User</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the panchromatic band.</p></td>\n</tr>\n<tr>\n<td>PANCHROMATIC_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the panchromatic bands.</p></td>\n</tr>\n<tr>\n<td>PROCESSING_SOFTWARE_VERSION</td>\n<td>STRING</td>\n<td><p>Name and version of the processing software used to generate the L1 product.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 1.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 10.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 11.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 2.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 3.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 4.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 5.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 6.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 7.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 8.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated DN to radiance for Band 9.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 1 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_10</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 10 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_11</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 11 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 2 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 3 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 4 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 5 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 6 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 7 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 8 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>RADIANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative rescaling factor used to convert calibrated Band 9 DN to radiance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Additive rescaling factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_ADD_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Minimum achievable spectral reflectance value for Band 8.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_1</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 1 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_2</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 2 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_3</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 3 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_4</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 4 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_5</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 5 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_6</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 6 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_7</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 7 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_8</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 8 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTANCE_MULT_BAND_9</td>\n<td>DOUBLE</td>\n<td><p>Multiplicative factor used to convert calibrated Band 9 DN to reflectance.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REFLECTIVE_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the reflective bands.</p></td>\n</tr>\n<tr>\n<td>REQUEST_ID</td>\n<td>STRING</td>\n<td><p>Request id, nnnyymmdd0000_0000</p>\n<ul>\n<li>nnn = node number</li>\n<li>yy = year</li>\n<li>mm = month</li>\n<li>dd = day</li>\n</ul></td>\n</tr>\n<tr>\n<td>RESAMPLING_OPTION</td>\n<td>STRING</td>\n<td><p>Resampling option used in creating the image.</p></td>\n</tr>\n<tr>\n<td>RLUT_FILE_NAME</td>\n<td>STRING</td>\n<td><p>The file name for the Response Linearization Lookup Table (RLUT) used to generate the product, if applicable.</p></td>\n</tr>\n<tr>\n<td>ROLL_ANGLE</td>\n<td>DOUBLE</td>\n<td><p>The amount of spacecraft roll angle at the scene center.</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_1</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 1 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_10</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 10 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_11</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 11 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_2</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 2 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_3</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 3 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_4</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 4 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_5</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 5 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_6</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 6 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_7</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 7 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_8</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 8 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SATURATION_BAND_9</td>\n<td>STRING</td>\n<td><p>Flag indicating saturated pixels for band 9 ('Y'/'N')</p></td>\n</tr>\n<tr>\n<td>SCENE_CENTER_TIME</td>\n<td>STRING</td>\n<td><p>Scene center time of acquired image. HH:MM:SS.SSSSSSSZ</p>\n<ul>\n<li>HH = Hour (00-23)</li>\n<li>MM = Minutes</li>\n<li>SS.SSSSSSS = Fractional seconds</li>\n<li>Z = "Zulu" time (same as GMT)</li>\n</ul></td>\n</tr>\n<tr>\n<td>SENSOR_ID</td>\n<td>STRING</td>\n<td><p>Sensor used to capture data.</p></td>\n</tr>\n<tr>\n<td>SPACECRAFT_ID</td>\n<td>STRING</td>\n<td><p>Spacecraft identification.</p></td>\n</tr>\n<tr>\n<td>STATION_ID</td>\n<td>STRING</td>\n<td><p>Ground Station/Organisation that received the data.</p></td>\n</tr>\n<tr>\n<td>SUN_AZIMUTH</td>\n<td>DOUBLE</td>\n<td><p>Sun azimuth angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>SUN_ELEVATION</td>\n<td>DOUBLE</td>\n<td><p>Sun elevation angle in degrees for the image center location at the image centre acquisition time.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 path to the line-of-sight scene center of the image.</p></td>\n</tr>\n<tr>\n<td>TARGET_WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Nearest WRS-2 row to the line-of-sight scene center of the image. Rows 880-889 and 990-999 are reserved for the polar regions where it is undefined in the WRS-2.</p></td>\n</tr>\n<tr>\n<td>THERMAL_LINES</td>\n<td>DOUBLE</td>\n<td><p>Number of product lines for the thermal band.</p></td>\n</tr>\n<tr>\n<td>THERMAL_SAMPLES</td>\n<td>DOUBLE</td>\n<td><p>Number of product samples for the thermal band.</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_MODEL</td>\n<td>STRING</td>\n<td><p>Due to an anomalous condition on the Thermal Infrared\nSensor (TIRS) Scene Select Mirror (SSM) encoder electronics,\nthis field has been added to indicate which model was used to process the data.\n(Actual, Preliminary, Final)</p></td>\n</tr>\n<tr>\n<td>TIRS_SSM_POSITION_STATUS</td>\n<td>STRING</td>\n<td><p>TIRS SSM position status.</p></td>\n</tr>\n<tr>\n<td>TIRS_STRAY_LIGHT_CORRECTION_SOURCE</td>\n<td>STRING</td>\n<td><p>TIRS stray light correction source.</p></td>\n</tr>\n<tr>\n<td>TRUNCATION_OLI</td>\n<td>STRING</td>\n<td><p>Region of OLCI truncated.</p></td>\n</tr>\n<tr>\n<td>UTM_ZONE</td>\n<td>DOUBLE</td>\n<td><p>UTM zone number used in product map projection.</p></td>\n</tr>\n<tr>\n<td>WRS_PATH</td>\n<td>DOUBLE</td>\n<td><p>The WRS orbital path number (001 - 251).</p></td>\n</tr>\n<tr>\n<td>WRS_ROW</td>\n<td>DOUBLE</td>\n<td><p>Landsat satellite WRS row (001-248).</p></td>\n</tr>\n</table>\n<style>\n table.eecat {\n border: 1px solid black;\n border-collapse: collapse;\n font-size: 13px;\n }\n table.eecat td, tr, th {\n text-align: left; vertical-align: top;\n border: 1px solid gray; padding: 3px;\n }\n td.nobreak { white-space: nowrap; }\n</style>', 'source_tags': ['landsat', 'usgs'], 'visualization_1_name': 'Near Infrared (543)', 'visualization_0_max': '30000.0', 'title': 'USGS Landsat 8 Collection 1 Tier 1 TOA Reflectance', 'visualization_0_gain': '500.0', 'system:visualization_2_max': '30000.0', 'product_tags': ['global', 'toa', 'tier1', 'oli_tirs', 'c1', 'radiance', 'lc8', 'l8', 't1'], 'visualization_1_gain': '500.0', 'provider': 'USGS/Google', 'visualization_1_min': '0.0', 'system:visualization_2_name': 'Shortwave Infrared (753)', 'visualization_0_min': '0.0', 'system:visualization_1_bands': 'B5,B4,B3', 'system:visualization_1_max': '30000.0', 'visualization_0_name': 'True Color (432)', 'date_range': [1365638400000, 1578700800000], 'visualization_2_bands': 'B7,B5,B3', 'visualization_2_name': 'Shortwave Infrared (753)', 'period': 0, 'system:visualization_2_min': '0.0', 'system:visualization_0_bands': 'B4,B3,B2', 'visualization_2_min': '0.0', 'visualization_2_gain': '500.0', 'provider_url': 'http://landsat.usgs.gov/', 'sample': 'https://mw1.google.com/ges/dd/images/LANDSAT_TOA_sample.png', 'system:visualization_1_name': 'Near Infrared (543)', 'tags': ['landsat', 'usgs', 'global', 'toa', 'tier1', 'oli_tirs', 'c1', 'radiance', 'lc8', 'l8', 't1'], 'system:visualization_0_max': '30000.0', 'visualization_2_max': '30000.0', 'system:visualization_2_bands': 'B7,B5,B3', 'system:visualization_1_min': '0.0', 'system:visualization_0_name': 'True Color (432)', 'visualization_0_bands': 'B4,B3,B2'}, 'features': [{'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 465592.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 465585, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140724', 'properties': {'RADIANCE_MULT_BAND_5': 0.005925099831074476, 'RADIANCE_MULT_BAND_6': 0.0014735000440850854, 'RADIANCE_MULT_BAND_3': 0.011482000350952148, 'RADIANCE_MULT_BAND_4': 0.00968219991773367, 'RADIANCE_MULT_BAND_1': 0.01216800045222044, 'RADIANCE_MULT_BAND_2': 0.01245999988168478, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.76979738859893, 38.12703313441971], [-120.77158274982695, 38.12754115630432], [-121.42134145512932, 38.25446242506864], [-122.22152328015193, 38.40525302827857], [-122.89018639673915, 38.5266157865438], [-122.89242341654155, 38.526617857573015], [-122.89466073063308, 38.519621963160894], [-123.38187286142927, 36.80872337128997], [-123.38186259045791, 36.806647917217006], [-123.35901116184863, 36.80244946066433], [-122.88546161915531, 36.71490670011608], [-121.3092309147788, 36.40846418437395], [-121.30782254819886, 36.40844420946354], [-121.08696039686296, 37.12150541970936], [-121.06667332030511, 37.186300761679455], [-120.9265815780102, 37.63183285571133], [-120.88231915679422, 37.77176559071555], [-120.83617669320071, 37.917319414649754], [-120.8201155519523, 37.96798246241547], [-120.7756360373179, 38.10840000147115], [-120.76979738859893, 38.12703313441971]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 126.32495880126953, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-24', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 407.9683837890625, 'google:registration_offset_y': -124.7548828125, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.00231559993699193, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004966499982401729, 'RADIANCE_MULT_BAND_8': 0.010958000086247921, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.3199999928474426, 'GEOMETRIC_RMSE_VERIFY': 2.700000047683716, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.23000000417232513, 'GEOMETRIC_RMSE_MODEL': 5.454999923706055, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014205LGN01', 'WRS_PATH': 44, 'google:registration_count': 10, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.703000068664551, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.00600004196167, 'system:asset_size': 1201420225, 'system:index': 'LC08_044034_20140724', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140724181847_20140724190430.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140724_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.3333333432674408, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.41123962402344, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1406227557220, 'RADIANCE_ADD_BAND_5': -29.625259399414062, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.36752986907959, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4832499027252197, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.84012985229492, 'RADIANCE_ADD_BAND_2': -62.301029205322266, 'RADIANCE_ADD_BAND_3': -57.40987014770508, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.78820037841797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.57822036743164, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043005_00057', 'EARTH_SUN_DISTANCE': 1.0158073902130127, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488621091000, 'SCENE_CENTER_TIME': '18:45:57.2197370Z', 'SUN_ELEVATION': 63.77280807495117, 'BPF_NAME_OLI': 'LO8BPF20140724182241_20140724190337.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 568, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 213}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464392.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464385, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140708', 'properties': {'RADIANCE_MULT_BAND_5': 0.005915500223636627, 'RADIANCE_MULT_BAND_6': 0.0014711000258103013, 'RADIANCE_MULT_BAND_3': 0.011463000439107418, 'RADIANCE_MULT_BAND_4': 0.00966660026460886, 'RADIANCE_MULT_BAND_1': 0.012148000299930573, 'RADIANCE_MULT_BAND_2': 0.012439999729394913, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.05901267311019, 38.1813072066471], [-122.67195531509144, 38.48483894042248], [-122.90730152252529, 38.52707032726991], [-122.90792970602998, 38.52608585671175], [-122.91360364873563, 38.50706451546646], [-123.39734192537979, 36.8083407130841], [-123.39733405223458, 36.80681601889492], [-123.39114513279036, 36.8055936364345], [-123.34317991176952, 36.79681686843965], [-122.28073257380132, 36.59717466111698], [-121.36957092975639, 36.417575938065966], [-121.32540815303872, 36.40869214276654], [-121.32304292059108, 36.40865900248354], [-121.19650818732099, 36.81902664136925], [-121.07109421952906, 37.221019713169355], [-120.94367715094019, 37.62606705102397], [-120.90082928429048, 37.761553141330744], [-120.84740670701625, 37.93009641124127], [-120.82257019700445, 38.00830842766878], [-120.78499155821282, 38.12676852456719], [-120.78581606001764, 38.12745169022067], [-121.05901267311019, 38.1813072066471]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 122.4483642578125, 'CPF_NAME': 'LC08CPF_20140701_20140930_01.01', 'DATE_ACQUIRED': '2014-07-08', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 537.4625854492188, 'google:registration_offset_y': 10.817861557006836, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023119000252336264, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004958499921485782, 'RADIANCE_MULT_BAND_8': 0.010940000414848328, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 39.97999954223633, 'GEOMETRIC_RMSE_VERIFY': 2.5929999351501465, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 12.0600004196167, 'GEOMETRIC_RMSE_MODEL': 5.275000095367432, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014189LGN01', 'WRS_PATH': 44, 'google:registration_count': 96, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4619998931884766, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 3.9800000190734863, 'system:asset_size': 1303038285, 'system:index': 'LC08_044034_20140708', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140708181845_20140708190428.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140708_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.6486486196517944, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.33314895629883, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1404845155330, 'RADIANCE_ADD_BAND_5': -29.57748031616211, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.355649948120117, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.479249954223633, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.74198913574219, 'RADIANCE_ADD_BAND_2': -62.200538635253906, 'RADIANCE_ADD_BAND_3': -57.31726837158203, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.69982147216797, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.559550285339355, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043141_00057', 'EARTH_SUN_DISTANCE': 1.016627550125122, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488642210000, 'SCENE_CENTER_TIME': '18:45:55.3336140Z', 'SUN_ELEVATION': 65.8777847290039, 'BPF_NAME_OLI': 'LO8BPF20140708182239_20140708190335.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 506, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 187}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464992.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464985, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140622', 'properties': {'RADIANCE_MULT_BAND_5': 0.005919000133872032, 'RADIANCE_MULT_BAND_6': 0.0014720000326633453, 'RADIANCE_MULT_BAND_3': 0.011470000259578228, 'RADIANCE_MULT_BAND_4': 0.00967239961028099, 'RADIANCE_MULT_BAND_1': 0.01215600036084652, 'RADIANCE_MULT_BAND_2': 0.01244799979031086, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.31788298539182, 36.408586408656575], [-121.31606048880933, 36.40856066998137], [-121.31430578209141, 36.41384029313054], [-121.19200158675721, 36.81044051106826], [-121.0698899591177, 37.20193823329732], [-120.93870690267133, 37.61909130033321], [-120.89293182605338, 37.76384529883042], [-120.83512328469709, 37.946118996073274], [-120.81773649437956, 38.00098066904156], [-120.7804031777974, 38.11877040222991], [-120.77836404766627, 38.12549776014683], [-120.77830846404605, 38.127328891154846], [-120.8524461141277, 38.14202547398031], [-122.7997909930455, 38.50911447061385], [-122.89773302105861, 38.526622656657345], [-122.90027762321128, 38.526624804291615], [-122.90672528283095, 38.50462571143406], [-123.39027158134067, 36.80670618253543], [-121.39230131401504, 36.42355089690084], [-121.31788298539182, 36.408586408656575]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 121.76666259765625, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-22', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': -153.57119750976562, 'google:registration_offset_y': -44.11845779418945, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002313300035893917, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004961499944329262, 'RADIANCE_MULT_BAND_8': 0.010947000235319138, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 33.029998779296875, 'GEOMETRIC_RMSE_VERIFY': 2.681999921798706, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.390000104904175, 'GEOMETRIC_RMSE_MODEL': 5.414999961853027, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014173LGN01', 'WRS_PATH': 44, 'google:registration_count': 57, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4539999961853027, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.170000076293945, 'system:asset_size': 1269718296, 'system:index': 'LC08_044034_20140622', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140622181215_20140622190420.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140622_20170304_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.36538460850715637, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.362091064453125, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1403462747540, 'RADIANCE_ADD_BAND_5': -29.595190048217773, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.360050201416016, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4807300567626953, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.778358459472656, 'RADIANCE_ADD_BAND_2': -62.2377815246582, 'RADIANCE_ADD_BAND_3': -57.35158157348633, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.73257064819336, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.5664701461792, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043315_00059', 'EARTH_SUN_DISTANCE': 1.016323447227478, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488670339000, 'SCENE_CENTER_TIME': '18:45:47.5389440Z', 'SUN_ELEVATION': 67.07411193847656, 'BPF_NAME_OLI': 'LO8BPF20140622182144_20140622190327.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': 0, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 558, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 215}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 463792.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 463785, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140606', 'properties': {'RADIANCE_MULT_BAND_5': 0.005937200039625168, 'RADIANCE_MULT_BAND_6': 0.0014764999505132437, 'RADIANCE_MULT_BAND_3': 0.011505999602377415, 'RADIANCE_MULT_BAND_4': 0.009702100418508053, 'RADIANCE_MULT_BAND_1': 0.012192999944090843, 'RADIANCE_MULT_BAND_2': 0.01248599961400032, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.79200539048736, 38.12706906512293], [-120.79323597868374, 38.12758439698958], [-120.82683301978153, 38.13425518072935], [-122.57369124774934, 38.465867462644404], [-122.91132538951987, 38.52663370240754], [-122.91414613702007, 38.526635850439405], [-122.9189327723941, 38.510718361283075], [-123.40419439796977, 36.80678576741027], [-121.36227701906473, 36.41476296352091], [-121.32989516455781, 36.40824848906167], [-121.20432618246714, 36.815494543804164], [-121.07428782575109, 37.232255532839595], [-120.95966651326353, 37.59672218968956], [-120.90596782826022, 37.76651090203559], [-120.86494805861443, 37.895947164272634], [-120.83393920808882, 37.993514542680224], [-120.82433446488996, 38.02375043851124], [-120.79204501354904, 38.125755061557996], [-120.79200539048736, 38.12706906512293]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 124.43635559082031, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-06-06', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002320399973541498, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0004976699710823596, 'RADIANCE_MULT_BAND_8': 0.010979999788105488, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.709999084472656, 'GEOMETRIC_RMSE_VERIFY': 2.7200000286102295, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 3.930000066757202, 'GEOMETRIC_RMSE_MODEL': 5.419000148773193, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014157LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.4519999027252197, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.177000045776367, 'system:asset_size': 1264461529, 'system:index': 'LC08_044034_20140606', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140606181212_20140606190417.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140606_20170305_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.51054000854492, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1402080344240, 'RADIANCE_ADD_BAND_5': -29.6860294342041, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.382649898529053, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.4883499145507812, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -60.96493148803711, 'RADIANCE_ADD_BAND_2': -62.428829193115234, 'RADIANCE_ADD_BAND_3': -57.52762985229492, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -54.90058135986328, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.601969718933105, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703043447_00036', 'EARTH_SUN_DISTANCE': 1.014767050743103, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488689158000, 'SCENE_CENTER_TIME': '18:45:44.2439160Z', 'SUN_ELEVATION': 67.10252380371094, 'BPF_NAME_OLI': 'LO8BPF20140606171321_20140606190324.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 549, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 192}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 464692.5, 0, -15, 4264507.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 464685, 0, -30, 4264515]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140521', 'properties': {'RADIANCE_MULT_BAND_5': 0.005967800039798021, 'RADIANCE_MULT_BAND_6': 0.0014841000083833933, 'RADIANCE_MULT_BAND_3': 0.01156499981880188, 'RADIANCE_MULT_BAND_4': 0.009752199985086918, 'RADIANCE_MULT_BAND_1': 0.012256000190973282, 'RADIANCE_MULT_BAND_2': 0.012550000101327896, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.9221114406814, 37.68244619012667], [-120.89633560745239, 37.76390614408945], [-120.83746336237951, 37.94945600779687], [-120.82098495481172, 38.00141006480963], [-120.78179975086263, 38.125049388247994], [-120.78173908398541, 38.12705556142276], [-120.79512978776856, 38.12976361438609], [-121.73406240469221, 38.31178421248136], [-122.79279800879766, 38.50701449179694], [-122.88876971795369, 38.5241778933743], [-122.9038553878929, 38.52682543966657], [-123.3934724535376, 36.80801002145629], [-123.3934642377511, 36.80639615821769], [-123.14252377291987, 36.76031119223474], [-121.39556579260922, 36.42323515794831], [-121.3201532766815, 36.40807244280241], [-121.31926234184606, 36.40876798117092], [-121.1964526203538, 36.807060467012924], [-121.07492303846685, 37.19674766434507], [-120.94691203296651, 37.60392056819356], [-120.9221114406814, 37.68244619012667]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 129.40968322753906, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-21', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 93.1732177734375, 'google:registration_offset_y': -389.06402587890625, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023324000649154186, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005002400139346719, 'RADIANCE_MULT_BAND_8': 0.011037000454962254, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 35.439998626708984, 'GEOMETRIC_RMSE_VERIFY': 3.2890000343322754, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 14.020000457763672, 'GEOMETRIC_RMSE_MODEL': 5.670000076293945, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014141LGN01', 'WRS_PATH': 44, 'google:registration_count': 66, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 3.8980000019073486, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.117000102996826, 'system:asset_size': 1261385761, 'system:index': 'LC08_044034_20140521', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140521180614_20140521190408.02', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140521_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0.4370861053466797, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -48.76087951660156, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1400697934830, 'RADIANCE_ADD_BAND_5': -29.839229583740234, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.420740127563477, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.501189947128296, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.279541015625, 'RADIANCE_ADD_BAND_2': -62.75099182128906, 'RADIANCE_ADD_BAND_3': -57.824501037597656, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.18389892578125, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.661849975585938, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064217_00034', 'EARTH_SUN_DISTANCE': 1.0121588706970215, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488873846000, 'SCENE_CENTER_TIME': '18:45:34.8277940Z', 'SUN_ELEVATION': 65.65296173095703, 'BPF_NAME_OLI': 'LO8BPF20140521183116_20140521190315.02', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 404, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 150}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140505', 'properties': {'RADIANCE_MULT_BAND_5': 0.006009500008076429, 'RADIANCE_MULT_BAND_6': 0.0014944999711588025, 'RADIANCE_MULT_BAND_3': 0.011645999737083912, 'RADIANCE_MULT_BAND_4': 0.009820199571549892, 'RADIANCE_MULT_BAND_1': 0.012341000139713287, 'RADIANCE_MULT_BAND_2': 0.012637999840080738, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.23130694632096, 38.20890167865334], [-122.47808618435543, 38.442905249886934], [-122.9416241270812, 38.52616106461051], [-122.94257304228283, 38.52467261055228], [-122.94438908458714, 38.518980549130696], [-122.9480116995035, 38.506814434795785], [-123.42945547884437, 36.807365583536495], [-123.42944546960602, 36.80558241062019], [-121.35650439967876, 36.40925950162913], [-121.35462928167787, 36.409233706436694], [-121.2209704109367, 36.84467814167406], [-121.09380664017438, 37.25395464587639], [-120.98744109880928, 37.59368464704816], [-120.92971288838983, 37.77715018781449], [-120.874792117132, 37.95100539896876], [-120.85505283148036, 38.013433126642376], [-120.83525753541217, 38.07639805962481], [-120.81911222539682, 38.12806656677994], [-120.8214394607643, 38.1287277611953], [-120.83942642052946, 38.13230813141151], [-121.23130694632096, 38.20890167865334]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 134.8988800048828, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-05-05', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0023485999554395676, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005037300288677216, 'RADIANCE_MULT_BAND_8': 0.011114000342786312, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 24.25, 'GEOMETRIC_RMSE_VERIFY': 3.5369999408721924, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 30.09000015258789, 'GEOMETRIC_RMSE_MODEL': 7.320000171661377, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014125LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.623000144958496, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.675000190734863, 'system:asset_size': 1263423627, 'system:index': 'LC08_044034_20140505', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140505181139_20140505190416.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140505_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.10100173950195, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1399315542790, 'RADIANCE_ADD_BAND_5': -30.047359466552734, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.472509860992432, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.518630027770996, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -61.70698165893555, 'RADIANCE_ADD_BAND_2': -63.18870162963867, 'RADIANCE_ADD_BAND_3': -58.227840423583984, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -55.56882095336914, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.743189811706543, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064572_00027', 'EARTH_SUN_DISTANCE': 1.0086472034454346, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488903671000, 'SCENE_CENTER_TIME': '18:45:42.7916370Z', 'SUN_ELEVATION': 62.584102630615234, 'BPF_NAME_OLI': 'LO8BPF20140505183026_20140505190323.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 289, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 62}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 461392.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 461385, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140419', 'properties': {'RADIANCE_MULT_BAND_5': 0.006059799809008837, 'RADIANCE_MULT_BAND_6': 0.0015069999499246478, 'RADIANCE_MULT_BAND_3': 0.011742999777197838, 'RADIANCE_MULT_BAND_4': 0.009902399964630604, 'RADIANCE_MULT_BAND_1': 0.012445000000298023, 'RADIANCE_MULT_BAND_2': 0.012744000181555748, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8431379362771, 38.052617966765766], [-120.83578218089683, 38.07600217001765], [-120.81963729012756, 38.12767081181165], [-120.82234049239531, 38.12843879727159], [-122.94102091600229, 38.525570980595205], [-122.94293147316415, 38.52557196694168], [-122.94542248503689, 38.51776440194044], [-122.9490448046238, 38.50559823329617], [-123.430644945337, 36.8057166125035], [-123.42903372114263, 36.80507606772225], [-122.57913602686314, 36.64741782585057], [-121.50262683064466, 36.438064670880586], [-121.35593613505138, 36.40870641506648], [-121.35503796940482, 36.40940804319249], [-121.22502589113704, 36.8329762319502], [-121.10052631685265, 37.23379807333198], [-120.9755883879769, 37.632705519232594], [-120.88376082672839, 37.92399755184342], [-120.85385887049235, 38.01862509330369], [-120.8431379362771, 38.052617966765766]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 139.7012176513672, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-19', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002368299989029765, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.000507950026076287, 'RADIANCE_MULT_BAND_8': 0.011207000352442265, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 12.920000076293945, 'GEOMETRIC_RMSE_VERIFY': 3.380000114440918, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.75, 'GEOMETRIC_RMSE_MODEL': 6.547999858856201, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014109LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.453999996185303, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.798999786376953, 'system:asset_size': 1203236382, 'system:index': 'LC08_044034_20140419', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140419183133_20140419190432.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140419_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.512229919433594, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1397933159240, 'RADIANCE_ADD_BAND_5': -30.299020767211914, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.53508996963501, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5397300720214844, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.22378921508789, 'RADIANCE_ADD_BAND_2': -63.717918395996094, 'RADIANCE_ADD_BAND_3': -58.715518951416016, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.03422164916992, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.841540336608887, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703064332_00025', 'EARTH_SUN_DISTANCE': 1.004449725151062, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488882124000, 'SCENE_CENTER_TIME': '18:45:59.2402600Z', 'SUN_ELEVATION': 58.094696044921875, 'BPF_NAME_OLI': 'LO8BPF20140419183527_20140419190339.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 509, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 169}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15341, 15581], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7671, 7791], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140403', 'properties': {'RADIANCE_MULT_BAND_5': 0.00611429987475276, 'RADIANCE_MULT_BAND_6': 0.0015206000534817576, 'RADIANCE_MULT_BAND_3': 0.011849000118672848, 'RADIANCE_MULT_BAND_4': 0.009991499595344067, 'RADIANCE_MULT_BAND_1': 0.012556999921798706, 'RADIANCE_MULT_BAND_2': 0.01285799965262413, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-120.8473141778081, 38.05593855929062], [-120.8399593728871, 38.079323071287384], [-120.82522434534502, 38.126298845124154], [-120.82517062317932, 38.12810935862697], [-120.8677905264658, 38.13653674526281], [-121.37735830917396, 38.23574890955089], [-122.92397603591857, 38.5218201625494], [-122.94540185152168, 38.52557313562304], [-122.94781508421401, 38.52557420469068], [-122.9538620955667, 38.50519466790785], [-123.43541566635548, 36.80572425461524], [-123.43388775775958, 36.8051169737102], [-121.36103157158686, 36.408726677230895], [-121.3601864919046, 36.410036730606365], [-121.3547960201613, 36.42754948797928], [-121.22805212441246, 36.84032220234662], [-121.10161450053057, 37.247264521511426], [-120.99043851266156, 37.60225211028372], [-120.94687053372499, 37.7406010941523], [-120.88475337745422, 37.93745112674764], [-120.8473141778081, 38.05593855929062]]}, 'REFLECTIVE_SAMPLES': 7671, 'SUN_AZIMUTH': 143.3709716796875, 'CPF_NAME': 'LC08CPF_20140401_20140630_01.01', 'DATE_ACQUIRED': '2014-04-03', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.002389600034803152, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005125200259499252, 'RADIANCE_MULT_BAND_8': 0.011308000423014164, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 28.1200008392334, 'GEOMETRIC_RMSE_VERIFY': 3.2160000801086426, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 31.59000015258789, 'GEOMETRIC_RMSE_MODEL': 6.959000110626221, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014093LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15341, 'PANCHROMATIC_LINES': 15581, 'GEOMETRIC_RMSE_MODEL_Y': 4.63700008392334, 'REFLECTIVE_LINES': 7791, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 5.188000202178955, 'system:asset_size': 1208697743, 'system:index': 'LC08_044034_20140403', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140403182815_20140403190449.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140403_20170306_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -49.95764923095703, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1396550776290, 'RADIANCE_ADD_BAND_5': -30.571590423583984, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.602880001068115, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.562580108642578, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -62.78356170654297, 'RADIANCE_ADD_BAND_2': -64.29113006591797, 'RADIANCE_ADD_BAND_3': -59.24372863769531, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -56.53831100463867, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -11.94806957244873, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7791, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063782_00025', 'EARTH_SUN_DISTANCE': 0.9999619126319885, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488829355000, 'SCENE_CENTER_TIME': '18:46:16.2881730Z', 'SUN_ELEVATION': 52.549800872802734, 'BPF_NAME_OLI': 'LO8BPF20140403183209_20140403190356.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'N', 'SATURATION_BAND_2': 'N', 'SATURATION_BAND_3': 'N', 'SATURATION_BAND_4': 'N', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 385, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7671, 'GROUND_CONTROL_POINTS_VERIFY': 98}}, {'type': 'Image', 'bands': [{'id': 'B1', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B2', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B3', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B4', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B5', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B6', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B7', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B8', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [15321, 15601], 'crs': 'EPSG:32610', 'crs_transform': [15, 0, 460792.5, 0, -15, 4264207.5]}, {'id': 'B9', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B10', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'B11', 'data_type': {'type': 'PixelType', 'precision': 'float'}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}, {'id': 'BQA', 'data_type': {'type': 'PixelType', 'precision': 'int', 'min': 0, 'max': 65535}, 'dimensions': [7661, 7801], 'crs': 'EPSG:32610', 'crs_transform': [30, 0, 460785, 0, -30, 4264215]}], 'version': 1580044754541352, 'id': 'LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318', 'properties': {'RADIANCE_MULT_BAND_5': 0.006170900072902441, 'RADIANCE_MULT_BAND_6': 0.001534600043669343, 'RADIANCE_MULT_BAND_3': 0.011958000250160694, 'RADIANCE_MULT_BAND_4': 0.010084000416100025, 'RADIANCE_MULT_BAND_1': 0.012672999873757362, 'RADIANCE_MULT_BAND_2': 0.012977000325918198, 'K2_CONSTANT_BAND_11': 1201.1441650390625, 'K2_CONSTANT_BAND_10': 1321.078857421875, 'system:footprint': {'type': 'LinearRing', 'coordinates': [[-121.3637119499993, 36.41016684133052], [-121.35905784815819, 36.42528989660049], [-121.2315833015866, 36.840374852891664], [-121.09978718573184, 37.26438246506325], [-121.00571062336425, 37.564795515259384], [-120.98453376062118, 37.632161601008896], [-120.95100979452299, 37.73864548098522], [-120.90277241165228, 37.89149086576169], [-120.8836409072059, 37.951976016520376], [-120.85713152433351, 38.03584247073611], [-120.82804345546616, 38.12789513604401], [-122.38148159443172, 38.42337450676813], [-122.9500220192271, 38.525813632077686], [-122.95103687833704, 38.52422133103557], [-122.9569591344694, 38.504384836247866], [-123.43853932998316, 36.805122381748035], [-123.18722447462653, 36.759167415189125], [-121.5105534682754, 36.43765126135182], [-121.36447385999617, 36.408418528930035], [-121.3637119499993, 36.41016684133052]]}, 'REFLECTIVE_SAMPLES': 7661, 'SUN_AZIMUTH': 146.2395782470703, 'CPF_NAME': 'LC08CPF_20140101_20140331_01.01', 'DATE_ACQUIRED': '2014-03-18', 'ELLIPSOID': 'WGS84', 'google:registration_offset_x': 0, 'google:registration_offset_y': 0, 'STATION_ID': 'LGN', 'RESAMPLING_OPTION': 'CUBIC_CONVOLUTION', 'ORIENTATION': 'NORTH_UP', 'WRS_ROW': 34, 'RADIANCE_MULT_BAND_9': 0.0024117000866681337, 'TARGET_WRS_ROW': 34, 'RADIANCE_MULT_BAND_7': 0.0005172499804757535, 'RADIANCE_MULT_BAND_8': 0.0114120002835989, 'IMAGE_QUALITY_TIRS': 9, 'TRUNCATION_OLI': 'UPPER', 'CLOUD_COVER': 0.05999999865889549, 'GEOMETRIC_RMSE_VERIFY': 3.249000072479248, 'COLLECTION_CATEGORY': 'T1', 'GRID_CELL_SIZE_REFLECTIVE': 30, 'CLOUD_COVER_LAND': 0.10000000149011612, 'GEOMETRIC_RMSE_MODEL': 6.78000020980835, 'COLLECTION_NUMBER': 1, 'IMAGE_QUALITY_OLI': 9, 'LANDSAT_SCENE_ID': 'LC80440342014077LGN01', 'WRS_PATH': 44, 'google:registration_count': 0, 'PANCHROMATIC_SAMPLES': 15321, 'PANCHROMATIC_LINES': 15601, 'GEOMETRIC_RMSE_MODEL_Y': 4.747000217437744, 'REFLECTIVE_LINES': 7801, 'TIRS_STRAY_LIGHT_CORRECTION_SOURCE': 'TIRS', 'GEOMETRIC_RMSE_MODEL_X': 4.841000080108643, 'system:asset_size': 1105511852, 'system:index': 'LC08_044034_20140318', 'REFLECTANCE_ADD_BAND_1': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_2': -0.10000000149011612, 'DATUM': 'WGS84', 'REFLECTANCE_ADD_BAND_3': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_4': -0.10000000149011612, 'RLUT_FILE_NAME': 'LC08RLUT_20130211_20150302_01_11.h5', 'REFLECTANCE_ADD_BAND_5': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_6': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_7': -0.10000000149011612, 'REFLECTANCE_ADD_BAND_8': -0.10000000149011612, 'BPF_NAME_TIRS': 'LT8BPF20140318182855_20140318190505.01', 'GROUND_CONTROL_POINTS_VERSION': 4, 'DATA_TYPE': 'L1TP', 'UTM_ZONE': 10, 'LANDSAT_PRODUCT_ID': 'LC08_L1TP_044034_20140318_20170307_01_T1', 'REFLECTANCE_ADD_BAND_9': -0.10000000149011612, 'google:registration_ratio': 0, 'GRID_CELL_SIZE_PANCHROMATIC': 15, 'RADIANCE_ADD_BAND_4': -50.419559478759766, 'REFLECTANCE_MULT_BAND_7': 1.9999999494757503e-05, 'system:time_start': 1395168392050, 'RADIANCE_ADD_BAND_5': -30.854249954223633, 'REFLECTANCE_MULT_BAND_6': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_6': -7.67317008972168, 'REFLECTANCE_MULT_BAND_9': 1.9999999494757503e-05, 'PROCESSING_SOFTWARE_VERSION': 'LPGS_2.7.0', 'RADIANCE_ADD_BAND_7': -2.5862700939178467, 'REFLECTANCE_MULT_BAND_8': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_1': -63.364051818847656, 'RADIANCE_ADD_BAND_2': -64.88555908203125, 'RADIANCE_ADD_BAND_3': -59.79148864746094, 'REFLECTANCE_MULT_BAND_1': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_8': -57.06106185913086, 'REFLECTANCE_MULT_BAND_3': 1.9999999494757503e-05, 'RADIANCE_ADD_BAND_9': -12.058540344238281, 'REFLECTANCE_MULT_BAND_2': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_5': 1.9999999494757503e-05, 'REFLECTANCE_MULT_BAND_4': 1.9999999494757503e-05, 'THERMAL_LINES': 7801, 'TIRS_SSM_POSITION_STATUS': 'NOMINAL', 'GRID_CELL_SIZE_THERMAL': 30, 'NADIR_OFFNADIR': 'NADIR', 'RADIANCE_ADD_BAND_11': 0.10000000149011612, 'REQUEST_ID': '0501703063989_00019', 'EARTH_SUN_DISTANCE': 0.9953709244728088, 'TIRS_SSM_MODEL': 'ACTUAL', 'FILE_DATE': 1488849349000, 'SCENE_CENTER_TIME': '18:46:32.0535800Z', 'SUN_ELEVATION': 46.471065521240234, 'BPF_NAME_OLI': 'LO8BPF20140318183249_20140318190412.01', 'RADIANCE_ADD_BAND_10': 0.10000000149011612, 'ROLL_ANGLE': -0.0010000000474974513, 'K1_CONSTANT_BAND_10': 774.8853149414062, 'SATURATION_BAND_1': 'Y', 'SATURATION_BAND_2': 'Y', 'SATURATION_BAND_3': 'Y', 'SATURATION_BAND_4': 'Y', 'SATURATION_BAND_5': 'Y', 'MAP_PROJECTION': 'UTM', 'SATURATION_BAND_6': 'Y', 'SENSOR_ID': 'OLI_TIRS', 'SATURATION_BAND_7': 'Y', 'K1_CONSTANT_BAND_11': 480.8883056640625, 'SATURATION_BAND_8': 'N', 'SATURATION_BAND_9': 'N', 'TARGET_WRS_PATH': 44, 'RADIANCE_MULT_BAND_11': 0.00033420001273043454, 'RADIANCE_MULT_BAND_10': 0.00033420001273043454, 'GROUND_CONTROL_POINTS_MODEL': 527, 'SPACECRAFT_ID': 'LANDSAT_8', 'ELEVATION_SOURCE': 'GLS2000', 'THERMAL_SAMPLES': 7661, 'GROUND_CONTROL_POINTS_VERIFY': 164}}]}
###Markdown
Display Earth Engine data layers
###Code
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
###Output
_____no_output_____
###Markdown
View source on GitHub Notebook Viewer Run in Google Colab Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
###Code
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
###Output
_____no_output_____
###Markdown
Create an interactive map The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
###Code
Map = geemap.Map(center=[40,-100], zoom=4)
Map
###Output
_____no_output_____
###Markdown
Add Earth Engine Python script
###Code
# Add Earth Engine dataset
# Load a Landsat 8 ImageCollection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-03-01', '2014-08-01')
print('Collection: ', collection.getInfo())
# Get the number of images.
count = collection.size()
print('Count: ', count.getInfo())
# Get the date range of images in the collection.
range = collection.reduceColumns(ee.Reducer.minMax(), ["system:time_start"])
print('Date range: ', ee.Date(range.get('min')).getInfo(), ee.Date(range.get('max')).getInfo())
# Get statistics for a property of the images in the collection.
sunStats = collection.aggregate_stats('SUN_ELEVATION')
print('Sun elevation statistics: ', sunStats.getInfo())
# Sort by a cloud cover property, get the least cloudy image.
image = ee.Image(collection.sort('CLOUD_COVER').first())
print('Least cloudy image: ', image.getInfo())
# Limit the collection to the 10 most recent images.
recent = collection.sort('system:time_start', False).limit(10)
print('Recent images: ', recent.getInfo())
###Output
_____no_output_____
###Markdown
Display Earth Engine data layers
###Code
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
###Output
_____no_output_____ |
.ipynb_checkpoints/Lipidtools-checkpoint.ipynb | ###Markdown
LipidtoolsUtility classes for creating a new topology (.itp) file, making a sample minimal membrane containing that species, minimizing that system, generating and showing an image of that speciesAdd the following to the top of any Notebook to be able to use these tools >**%run Lipidtools.ipynb** Class **lipid**(name, head, link, alpha, beta).write(fileName) : **lipid** method **lipid**.create(verbose) : **lipid** method **lipid**.description(descr) : **lipid** method **lipid**.appendTo(fileName) : **lipid** method **lipid**.buildMembrane(name) : **lipid** method **lipid**.minimize(verbose,parameterFile) : **lipid** method **lipid**.show(x,y,z) : **lipid** * name: Martini notation for the lipid, eg: "POPC"* head: Martini notation for the head group, eg: "C P", "P E", or "PI"* link: Martini notation for the linking groups, eg: "G G" or "A A" * alpha: Martini notation for the alpha acyl tail, eg: "CDCC" or oleic * beta: Martini notation for the beta acyl tail, eg: "CCCC" for palmitic* methods * **create** creates a topology, depending on "verbose" outputs to the stream a summary or the output of that process, and returns an instance to this lipid * **description** set the description for the species, and returns an instance to this lipid * **write**(fileName) writes the topology to fileName.itp, returns an instance to this lipid * **append**(fileName) appends the topology to fileName.itp (removing any previous instance of this name), returns an instance to this lipid * **buildMembrane**(Name) creates a minimal membrane system name.gro containing this species * **minimize**(verbose,parameterFile) if a membrane system doesn't exist it makes one, and then minimzes it using the parameter (.mdp) file and writes a summary (if verbose=false) or the complete output in the stream, returns an instance to this lipid * method show(x,y,z) writes to output stream an image of the species using x,y,z to translate (move) the image in the camera field * fields * topology : string * success : boolean - whether minimize completed all steps**USAGE:** lipid("POPC","C P","G G","CDCC","CCCC").description("A general model phosphatidylcholine (PC) lipid corresponding to atomistic e.g. C16:0/18:1 1-palmitoyl-2-oleoyl (POPC) tails.").appendTo("./martini.ff/myLipids.itp").minimize(false,"Test.mdp").show().success() Will add the lipid to myLipids.itp, print out the summary result of minimizing a membrane containing this lipid using Test.mdp, and an image of the lipid, and return if the creation/embeding/minimization was successfull.
###Code
# import libraries used in these utilities
import os # Operating system specific commands
import re # Regular expression library
import datetime # Date and Time routines
import weakref # Object lifetime managment
import tempfile # Temporary paths and files
import shutil # Shell utilities
#defaults
defaultpath="./"
defaultmdp = os.path.join(defaultpath,"test.mdp")
defaultmartini = os.path.join(defaultpath,"martini.ff")
defaultinsane = os.path.join(defaultpath,"insane+SF.py")
#verify defaults exist
if not os.path.isfile(defaultmdp):
print("WARNING: default MDP [{}] missing".format(defaultmdp))
if not os.path.isfile(defaultinsane):
print("WARNING: default insane [{}] missing".format(defaultinsane))
if not os.path.isdir(defaultmartini):
print("WARNING: default martini path [{}] missing".format(defaultmartini))
#Exeception class for an imediate stop
class StopExecution(Exception):
def _render_traceback_(self):
pass
class lipid:
def __init__(self,name,head,link,alpha,beta):
if not name:
print("usage lipids(name,head,link,alpha,beta)")
self.name=name
self.head=head
self.link=link
self.alpha = alpha
self.beta = beta
self.topology = ""
self.success = False
self.parameterization = "; This topology follows the standard Martini 2.0 lipid definitions and building block rules.\n; Reference(s): \n; S.J. Marrink, A.H. de Vries, A.E. Mark. Coarse grained model for semi-quantitative lipid simulations. JPC-B, 108:750-760, \n; 2004. doi:10.1021/jp036508g \n; S.J. Marrink, H.J. Risselada, S. Yefimov, D.P. Tieleman, A.H. de Vries. The MARTINI force field: coarse grained model for \n; biomolecular simulations. JPC-B, 111:7812-7824, 2007. doi:10.1021/jp071097f \n; T.A. Wassenaar, H.I. Ingolfsson, R.A. Bockmann, D.P. Tieleman, S.J. Marrink. Computational lipidomics with insane: a versatile \n; tool for generating custom membranes for molecular simulations. JCTC, 150410125128004, 2015. doi:10.1021/acs.jctc.5b00209\n; Created: "+datetime.datetime.now().strftime("%Y.%m.%d")
startingdir = os.getcwd()
self.temp = tempfile.mkdtemp()
def create(self,verbose):
return self
def description(self,descr):
self.description = descr
return self
def appendTo(self,filename):
return self
def buildMembrane(self,name):
return self
def minimize(self,parameterFile,verbose):
return self
def show(self,x=0,y=0,z=0):
return self
def success(self):
return self.created
def __repr__(self):
return "Lipid ({})".format(self.name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(tmpdirname)
os.chdir(startingdir)
return self
###Output
_____no_output_____
###Markdown
Tests NB: these will only run when the notebook is loaded directly and run, they will not run when the notebook is loaded as a %run module from other notebooks Run this notebook stand alone to run all tests
###Code
with lipid("POPC","C P","G G","CDCC","CCCC") as Lipid:
Lipid.description("phosphatidylcholine Lipid")
Lipid.appendTo("./martini.ff/myLipids.itp")
Lipid.minimize("Test.mdp",False)
Lipid.show()
import datetime
lipidname = "OIPC"
tail = "CDDC CDCC"
link = "G G"
head = "C P"
description = "; A general model phosphatidylcholine (PC) lipid \n; C18:1(9c) oleic acid, and C18:2(9c,12c) linoleic acid\n"
modeledOn="; This topology follows the standard Martini 2.0 lipid definitions and building block rules.\n; Reference(s): \n; S.J. Marrink, A.H. de Vries, A.E. Mark. Coarse grained model for semi-quantitative lipid simulations. JPC-B, 108:750-760, \n; 2004. doi:10.1021/jp036508g \n; S.J. Marrink, H.J. Risselada, S. Yefimov, D.P. Tieleman, A.H. de Vries. The MARTINI force field: coarse grained model for \n; biomolecular simulations. JPC-B, 111:7812-7824, 2007. doi:10.1021/jp071097f \n; T.A. Wassenaar, H.I. Ingolfsson, R.A. Bockmann, D.P. Tieleman, S.J. Marrink. Computational lipidomics with insane: a versatile \n; tool for generating custom membranes for molecular simulations. JCTC, 150410125128004, 2015. doi:10.1021/acs.jctc.5b00209\n; Created: "
now = datetime.datetime.now()
membrane="testmembrane"
insane="../insane+SF.py"
mdparams="../test.mdp"
martinipath="../martini.ff/"
ITPCatalogue="./epithelial.cat"
ITPMasterFile="martini_v2_epithelial.itp"
modeledOn+= now.strftime("%Y.%m.%d")+"\n"
# Cleaning up intermediate files from previous runs
!rm -f *#*
!rm -f *step*
!rm -f {membrane}*
import fileinput
import os.path
print("Create itp")
!python {martinipath}/lipid-martini-itp-v06.py -o {lipidname}.itp -alname {lipidname} -name {lipidname} -alhead '{head}' -allink '{link}' -altail '{tail}'
#update description and parameters
with fileinput.FileInput(lipidname+".itp", inplace=True) as file:
for line in file:
if line == "; This is a ...\n":
print(description, end='')
elif line == "; Was modeled on ...\n":
print(modeledOn, end='')
else:
print(line, end='')
#Add this ITP file to the catalogue file
if not os.path.exists(ITPCatalogue):
ITPCatalogueData = []
else:
with open(ITPCatalogue, 'r') as file :
ITPCatalogueData = file.read().splitlines()
ITPCatalogueData = [x for x in ITPCatalogueData if not x==lipidname+".itp"]
ITPCatalogueData.append(lipidname+".itp")
with open(ITPCatalogue, 'w') as file :
file.writelines("%s\n" % item for item in ITPCatalogueData)
#build ITPFile
with open(martinipath+ITPMasterFile, 'w') as masterfile:
for ITPfilename in ITPCatalogueData:
with open(ITPfilename, 'r') as ITPfile :
for line in ITPfile:
masterfile.write(line)
print("Done")
# build a simple membrane to visualize this species
!python2 {insane} -o {membrane}.gro -p {membrane}.top -d 0 -x 3 -y 3 -z 3 -sol PW -center -charge 0 -orient -u {lipidname}:1 -l {lipidname}:1 -itpPath {martinipath}
import os #Operating system specific commands
import re #Regular expression library
print("Test")
print("Grompp")
grompp = !gmx grompp -f {mdparams} -c {membrane}.gro -p {membrane}.top -o {membrane}.tpr
success=True
for line in grompp:
if re.search("ERROR", line):
success=False
if re.search("Fatal error", line):
success=False
#if not success:
print(line)
if success:
print("Run")
!export GMX_MAXCONSTRWARN=-1
!export GMX_SUPPRESS_DUMP=1
run = !gmx mdrun -v -deffnm {membrane}
summary=""
logfile = membrane+".log"
if not os.path.exists(logfile):
print("no log file")
print("== === ====")
for line in run:
print(line)
else:
try:
file = open(logfile, "r")
fe = False
for line in file:
if fe:
success=False
summary=line
elif re.search("^Steepest Descents.*converge", line):
success=True
summary=line
break
elif re.search("Fatal error", line):
fe = True
except IOError as exc:
sucess=False;
summary=exc;
if success:
print("Success")
else:
print(summary)
###Output
Test
Grompp
:-) GROMACS - gmx grompp, 2018.1 (-:
GROMACS is written by:
Emile Apol Rossen Apostolov Paul Bauer Herman J.C. Berendsen
Par Bjelkmar Aldert van Buuren Rudi van Drunen Anton Feenstra
Gerrit Groenhof Aleksei Iupinov Christoph Junghans Anca Hamuraru
Vincent Hindriksen Dimitrios Karkoulis Peter Kasson Jiri Kraus
Carsten Kutzner Per Larsson Justin A. Lemkul Viveca Lindahl
Magnus Lundborg Pieter Meulenhoff Erik Marklund Teemu Murtola
Szilard Pall Sander Pronk Roland Schulz Alexey Shvetsov
Michael Shirts Alfons Sijbers Peter Tieleman Teemu Virolainen
Christian Wennberg Maarten Wolf
and the project leaders:
Mark Abraham, Berk Hess, Erik Lindahl, and David van der Spoel
Copyright (c) 1991-2000, University of Groningen, The Netherlands.
Copyright (c) 2001-2017, The GROMACS development team at
Uppsala University, Stockholm University and
the Royal Institute of Technology, Sweden.
check out http://www.gromacs.org for more information.
GROMACS is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 2.1
of the License, or (at your option) any later version.
GROMACS: gmx grompp, version 2018.1
Executable: /usr/bin/gmx
Data prefix: /usr
Working dir: /home/richard/projects/epithelial/buildITP
Command line:
gmx grompp -f ../test.mdp -c testmembrane.gro -p testmembrane.top -o testmembrane.tpr
Ignoring obsolete mdp entry 'title'
Setting the LD random seed to -913580286
Generated 1 of the 741 non-bonded parameter combinations
Excluding 1 bonded neighbours molecule type 'OIPC'
Excluding 1 bonded neighbours molecule type 'OIPC'
Excluding 1 bonded neighbours molecule type 'PW'
Removing all charge groups because cutoff-scheme=Verlet
Number of degrees of freedom in T-Coupling group System is 1149.00
GROMACS reminds you: "Harvard makes mistakes too, you know. Kissinger taught there." (Woody Allen)
Analysing residue names:
There are: 32 Other residues
Analysing residues not classified as Protein/DNA/RNA/Water and splitting into groups...
This run will generate roughly 0 Mb of data
Run
Success
|
archive/logistic/logistic.ipynb | ###Markdown
Experimenting with random data
###Code
N = 1000
D = 2
X = np.random.randn(N,D)
ones = np.ones((N,1))
print(X.shape, ones.shape)
n=500
X[:n,:] = X[:n,:] - 2*np.ones((n,D))
X[n:,:] = X[n:,:] + 2*np.ones((n,D))
Xb = np.concatenate((ones,X), axis=1)
T = np.array([0]*n + [1]*(N-n))
print(Xb.shape)
w = np.random.randn(D+1)
print(w.shape)
z = Xb.dot(w)
print(z.shape)
w_sol = np.array([0,4,4])
plt.scatter(X[:,0],X[:,1], c=T, alpha=0.5)
learning_rate = 0.1
lambda_reg = 0.1
for i in range(10000):
Y = sigmoid(Xb.dot(w))
w += learning_rate*(np.dot((T-Y).T,Xb) - lambda_reg*w)
if i%1000==0:
print(cross_entropy(T,Y))
print('Final w = ', w)
xx, yy = np.mgrid[-6:6:.01, -6:6:.01]
grid = np.c_[xx.ravel(), yy.ravel()]
ones = np.ones((grid.shape[0],1))
grid_b = np.concatenate((ones,grid), axis=1)
probs = forward(grid, w[1:3], w[0])
zi = griddata(xx, yy, probs, xi, yi, interp='linear')
f, ax = plt.subplots(figsize=(8, 6))
contour = ax.contourf(xx, yy, probs, 25, cmap="RdBu",
vmin=0, vmax=1)
print(np.sum(probs>1e-5))
###Output
1103335
###Markdown
With e-commerce data
###Code
file_name = "../ann_logistic_extra/ecommerce_data.csv"
from process import get_binary_data, get_data
X, Y = get_binary_data(file_name)
print(X.shape)
np.savetxt("foo.csv", X, delimiter=",")
from sklearn.utils import shuffle
X, Y = shuffle(X,Y)
D = X.shape[1]
W = np.random.randn(D)
b = 0
X_train = X[:-100,:]
Y_train = Y[:-100]
X_test = X[-100:,:]
Y_test = Y[-100:]
train_costs = []
test_costs = []
learning_rate = 0.1
lambda_reg = 0.1
for i in range(50000):
pYtrain = forward(X_train, W, b)
pYtest = forward(X_test, W, b)
ctrain = cross_entropy(Y_train,pYtrain)
ctest = cross_entropy(Y_test,pYtest)
W -= learning_rate*(X_train.T.dot(pYtrain-Y_train) + lambda_reg*W)
b -= learning_rate*(pYtrain-Y_train).sum()
if i%1000==0:
print(i, ": ", ctrain, ", ", ctest)
train_costs.append(ctrain)
test_costs.append(ctest)
print("Final training classification rate :", classification_rate(P=np.round(pYtrain), Y=Y_train))
print("Final test classification rate :", classification_rate(P=np.round(pYtest), Y=Y_test))
plt.plot(train_costs, label='train_costs')
plt.plot(test_costs, label='test_costs')
print("Final weights: ", W)
tmp = np.array(test_costs)
np.savetxt('foo.csv', tmp, delimiter=',')
train_costs = []
test_costs = []
learning_rate = 0.1
lambda_reg = 0.5
for i in range(10000):
pYtrain = forward(X_train, W, b)
pYtest = forward(X_test, W, b)
ctrain = cross_entropy(Y_train,pYtrain)
ctest = cross_entropy(Y_test,pYtest)
W += learning_rate*(X_train.T.dot(pYtrain-Y_train))
b += learning_rate*(pYtrain-Y_train).sum()
if i%500==0:
print(i, ": ", ctrain, ", ", ctest)
train_costs.append(ctrain)
test_costs.append(ctest)
print("Final training classification rate :", classification_rate(P=np.round(pYtrain), Y=Y_train))
print("Final test classification rate :", classification_rate(P=np.round(pYtest), Y=Y_test))
plt.plot(train_costs, label='train_costs')
plt.plot(test_costs, label='test_costs')
print("Final weights: ", W)
###Output
0 : 0.491100161491 , 0.439036971167
500 : 0.172261416722 , 0.222558414698
1000 : 0.17181989045 , 0.220833733962
1500 : 0.172182938304 , 0.222286570665
2000 : 0.179050113087 , 0.237655150202
2500 : 0.189640473069 , 0.254723850106
3000 : 0.258387382684 , 0.323268906003
3500 : 0.171939474022 , 0.22136977376
4000 : 0.173798197528 , 0.226658992562
4500 : 0.171621051307 , 0.219243114577
5000 : 0.172631989486 , 0.223710152074
5500 : 0.172343768005 , 0.222827960732
6000 : 0.171683149903 , 0.22005159866
6500 : 0.17347333421 , 0.225969366835
7000 : 0.171620503577 , 0.219349648486
7500 : 0.334022244668 , 0.41909865773
8000 : 0.194753784851 , 0.261478134258
8500 : 0.17450304299 , 0.228419301815
9000 : 0.683330281243 , 0.69135591332
9500 : 0.171646713793 , 0.21948867148
Final training classification rate : 0.973154362416
Final test classification rate : 0.96
Final weights: [ 2.57661175 10.89220217 1.03284006 3.24934831 0.71448471
-0.15681118 -0.32330776 -1.5218886 ]
###Markdown
L1 regularization
###Code
N = 50
D = 50
# uniformly distributed numbers between -5, +5
X = (np.random.random((N, D)) - 0.5)*10
# true weights - only the first 3 dimensions of X affect Y
true_w = np.array([1, 0.5, -0.5] + [0]*(D - 3))
print(true_w)
# generate Y - add noise with variance 0.5
Y = np.round(sigmoid(X.dot(true_w) + np.random.randn(N)*0.5))
print(Y)
costs = [] # keep track of squared error cost
w = np.random.randn(D) / np.sqrt(D) # randomly initialize w
learning_rate = 0.001
l1 = 3.0 # try different values - what effect does it have on w?
for t in range(5000):
# update w
Yhat = sigmoid(X.dot(w))
delta = Yhat - Y
w -= learning_rate*(X.T.dot(delta) + l1*np.sign(w))
# find and store the cost
cost = -(Y*np.log(Yhat) + (1-Y)*np.log(1 - Yhat)).mean() + l1*np.abs(w).mean()
costs.append(cost)
plt.plot(costs, label='cost')
plt.plot(true_w, label='true w')
plt.plot(w, label='w_map')
###Output
_____no_output_____
###Markdown
Donut
###Code
N = 1000
D = 2
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
T = np.array([0]*(N//2) + [1]*(N//2)) # labels: first 50 are 0, last 50 are 1
plt.scatter(X[:,0], X[:,1], c=T)
plt.show()
ones = np.ones((N,1))
r = np.sum(np.sqrt(X*X), axis=1).reshape((N,1))
Xb = np.concatenate((ones, r, X), axis=1)
# print(X.shape, r.shape, ones.shape)
w = np.random.randn(D+2)
error = []
for i in range(5000):
Y = sigmoid(Xb.dot(w))
e = cross_entropy(T, Y)
error.append(e)
if i%1000==0:
print("error: ", e)
w += 0.0001*(Xb.T.dot(T-Y) - 0.1*w)
plt.plot(error)
plt.title("Cross-entropy error")
print("Final weights: ", w)
print("Classification rate: ", 1 - np.abs(np.round(Y)-T).sum()/N)
###Output
error: 3.81394995915
error: 0.174204332976
error: 0.12143722358
error: 0.101130008159
error: 0.0900940518642
Final weights: [ -1.12938022e+01 1.23940316e+00 -1.00532830e-02 -8.89319354e-03]
Classification rate: 0.979
###Markdown
XOR
###Code
N = 4
D = 2
# XOR
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
])
T = np.array([0, 1, 1, 0])
# add a column of ones
# ones = np.array([[1]*N]).T
ones = np.ones((N, 1))
# add a column of xy = x*y
xy = (X[:,0] * X[:,1]).reshape(N, 1)
Xb = np.concatenate((ones, xy, X), axis=1)
print(Xb)
plt.scatter(Xb[1,:],Xb[2,:],Xb[3,:])
###Output
_____no_output_____ |
chapter09.ipynb | ###Markdown
9. Overview of time-domain EEG analyses
###Code
import numpy as np
import scipy.io
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Figure 9.1a
###Code
data = scipy.io.loadmat('sampleEEGdata')
#get all the data we need from the eeg file. Working with .mat files like this is not ideal, as you can clearly see below.
#A better way to access this data would be to re-save the sampleEEGdata.mat file as v-7.3 in matlab, or convert it to hdf5,
#then open it in python using h5py or pytables. Since I'd rather not mess with the batteries-included-ness of this book,
#I'll keep the data as-is and extract what we'll need.
eeg_data = data["EEG"][0,0]["data"]
eeg_pts = data["EEG"][0,0]["pnts"][0,0] #number of points in EEG data
eeg_times = data["EEG"][0,0]["times"][0]
eeg_rate = float(data["EEG"][0,0]["srate"][0]) #make float for division purposes later
eeg_trials = data["EEG"][0,0]["trials"][0,0]
eeg_epoch=data["EEG"][0,0]["epoch"][0]
which_channel_to_plot = 'FCz' #specify label of channel to plot
eeg_chan_locs_labels=data["EEG"][0,0]["chanlocs"][0]["labels"]
channel_index = (eeg_chan_locs_labels == which_channel_to_plot) #specify index (channel number) of label
x_axis_limit = (-200, 1000) #in milliseconds
num_trials2plot = 12
plt.figure(figsize=(10, 6))
# pick a random trials using random.choice (from numpy.random)
random_trial_to_plot = np.random.choice(np.arange(eeg_trials), num_trials2plot)
# figure out how many subplots we need
n_rows = np.ceil(num_trials2plot/np.ceil(np.sqrt(num_trials2plot))).astype(int)
n_cols = np.ceil(np.sqrt(num_trials2plot)).astype(int)
fig, ax = plt.subplots(n_rows, n_cols, sharex='all')
for ii in range(num_trials2plot):
idx = np.unravel_index(ii, (n_rows, n_cols))
#plot trial and specify x-axis and title
ax[idx].plot(eeg_times, np.squeeze(eeg_data[channel_index,:,random_trial_to_plot[ii] - 1]))
ax[idx].set(title=f"Trial {random_trial_to_plot[ii]}", yticks=[])
fig.tight_layout()
###Output
_____no_output_____
###Markdown
Figure 9.1b
###Code
#plot all trials
plt.plot(eeg_times,np.squeeze(eeg_data[channel_index,:,:]),'y')
#plot the event-related potential (ERP), i.e. the average time-domain signal
plt.plot(eeg_times,np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2)),'k',linewidth=2)
_=plt.title("All EEG traces, and their average")
#now plot only the ERP
plt.plot(eeg_times,np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2))) #axis=2 specifies which axis to compute the mean along
plt.vlines(0,-10,10,linestyles='dashed')
plt.hlines(0,-1000,1500)
plt.axis([-300,1000,-10,10])
plt.xlabel("Time from stimlulus onset (ms)")
plt.ylabel(r'$ \mu V $') #latex interpreter looks for dollar signs
plt.title("ERP (average of " + str(eeg_trials) + " trials) from electrode " + eeg_chan_locs_labels[channel_index][0][0])
plt.gca().invert_yaxis() #EEG convention to flip y axis
###Output
_____no_output_____
###Markdown
Figure 9.2To my knowledge, Python (specifically, scipy) does not have a function that is completely analgous to MATLAB's firls(). A very close approximation that I will use instead is an n-th order Butterworth bandpass filter. TODO
###Code
import scipy.signal as sig
chan2plot = "P7"
channel_index = eeg_chan_locs_labels == chan2plot #specify index (channel number) of label
erp = np.squeeze(np.mean(eeg_data[channel_index,:,:],axis=2))
nyquist = eeg_rate/2.
transition_width = 0.15
#low-pass filter data
#we'll look at filtering in detail in chapter 14
#filter form 0-40
filter_high = 40 #Hz; high cut off
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to40 = sig.filtfilt(b, a, erp, padlen=150) #use filfilt (filters forwards and backwards to eliminate phase shift)
#next, filter from 0-10
filter_high = 10 #Hz
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to10 = sig.filtfilt(b, a, erp, padlen=150)
#next, filter from 5-15
filter_low = 5 # Hz
filter_high = 15 # Hz
b, a = sig.butter(5, np.array([filter_low*(1-transition_width), filter_high*(1+transition_width)])/nyquist,btype="bandpass")
erp_5to15 = sig.filtfilt(b, a, erp, padlen=150)
plt.figure()
plt.plot(eeg_times,erp,'k')
plt.plot(eeg_times,erp_0to40,'c')
plt.plot(eeg_times,erp_0to10,'r')
plt.plot(eeg_times,erp_5to15,'m')
plt.xlim([-200,1200])
plt.gca().invert_yaxis()
plt.xlabel("time (ms)")
plt.ylabel("voltage " + r"$(\mu V)$")
plt.title("Raw and filtered signal")
_=plt.legend(['raw','0-40 Hz','0-10Hz','5-15Hz'])
###Output
_____no_output_____
###Markdown
Figure 9.3
###Code
plt.figure()
plt.subplot(211)
plt.plot(eeg_times,np.squeeze(eeg_data.mean(axis=0)))
plt.xlim([-200, 1000])
plt.gca().invert_yaxis() #flip for EEG conventions
plt.title("ERP from all sensors")
#topographical variance plot
plt.subplot(212)
plt.plot(eeg_times,np.squeeze(eeg_data.mean(axis=0).var(axis=1)))
plt.xlim([-200,1000])
plt.xlabel("Time (ms)")
plt.ylabel("var "+r'$ (\mu V) $')
plt.title("Topographical variance")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Figures 9.4-9.5 use the function topoplot from MATLAB toolbox EEGlabTODO Figure 9.6
###Code
use_rts = True #or false
#get RTs from each trial to use for sorting trials. In this experiment,
#the RT was always the first event after the stimulus (the time=0 event).
#Normally, you should build in exceptions in case there was no response or
#another event occured between the stimulus and response. This was already
#done for the current dataset.
rts = np.zeros(len(eeg_epoch))
for ei in range(len(eeg_epoch)):
#first, find the index at which time = 0 event occurs
time0event = eeg_epoch[ei]["eventlatency"][0] == 0 #bool array of where time=0 occurs
time0event = np.where(time0event == time0event.max())[0][0] # find the index of the True value in this array
rts[ei] = eeg_epoch[ei]["eventlatency"][0][time0event+1]
if use_rts:
rts_idx=np.argsort(rts)
else:
rts_idx = np.argsort(np.squeeze(eeg_data[46,333,:]))
#plot the trials for one channel, in (un)sorted order
plt.imshow(np.squeeze(eeg_data[46,:,rts_idx]),
extent=[eeg_times[0], eeg_times[-1], 1, eeg_trials],
aspect="auto",
cmap=plt.get_cmap("jet"),
origin="lower",
interpolation="none")
plt.xlabel("time from stim onset (ms)")
plt.ylabel("trial number")
plt.clim([-30,30])
plt.colorbar(label=r"$\mu V$")
plt.axis([-200,1200,1,99])
plt.grid(False)
if use_rts:
rtplot=plt.plot(rts[rts_idx],np.arange(1,eeg_trials+1),'k',linewidth=3, label= "Reaction time")
plt.legend(bbox_to_anchor=[1.5,1]) #put the legend outside of the image
###Output
_____no_output_____
###Markdown
็ฌฌ9็ซ : RNN, CNN 80. ID็ชๅทใธใฎๅคๆ***ๅ้ก51ใงๆง็ฏใใๅญฆ็ฟใใผใฟไธญใฎๅ่ชใซใฆใใผใฏใชID็ชๅทใไปไธใใใ๏ผๅญฆ็ฟใใผใฟไธญใงๆใ้ ปๅบใใๅ่ชใซ$1$๏ผ2็ช็ฎใซ้ ปๅบใใๅ่ชใซ$2$๏ผโฆโฆใจใใฃใๆนๆณใง๏ผๅญฆ็ฟใใผใฟไธญใง2ๅไปฅไธๅบ็พใใๅ่ชใซID็ชๅทใไปไธใใ๏ผใใใฆ๏ผไธใใใใๅ่ชๅใซๅฏพใใฆ๏ผID็ชๅทใฎๅใ่ฟใ้ขๆฐใๅฎ่ฃ
ใใ๏ผใใ ใ๏ผๅบ็พ้ ปๅบฆใ2ๅๆชๆบใฎๅ่ชใฎID็ชๅทใฏใในใฆ$0$ใจใใ๏ผ
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip
!unzip NewsAggregatorDataset.zip
!wc -l ./newsCorpora.csv
!head -10 ./newsCorpora.csv
# ่ชญ่พผๆใฎใจใฉใผๅ้ฟใฎใใใใใซใฏใฉใผใใผใทใงใณใใทใณใฐใซใฏใฉใผใใผใทใงใณใซ็ฝฎๆ
!sed -e 's/"/'\''/g' ./newsCorpora.csv > ./newsCorpora_re.csv
import pandas as pd
from sklearn.model_selection import train_test_split
# ใใผใฟใฎ่ชญ่พผ
df = pd.read_csv('./newsCorpora_re.csv', header=None, sep='\t', names=['ID', 'TITLE', 'URL', 'PUBLISHER', 'CATEGORY', 'STORY', 'HOSTNAME', 'TIMESTAMP'])
# ใใผใฟใฎๆฝๅบ
df = df.loc[df['PUBLISHER'].isin(['Reuters', 'Huffington Post', 'Businessweek', 'Contactmusic.com', 'Daily Mail']), ['TITLE', 'CATEGORY']]
# ใใผใฟใฎๅๅฒ
train, valid_test = train_test_split(df, test_size=0.2, shuffle=True, random_state=123, stratify=df['CATEGORY'])
valid, test = train_test_split(valid_test, test_size=0.5, shuffle=True, random_state=123, stratify=valid_test['CATEGORY'])
train.reset_index(drop=True, inplace=True)
valid.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
# ไบไพๆฐใฎ็ขบ่ช
print('ใๅญฆ็ฟใใผใฟใ')
print(train['CATEGORY'].value_counts())
print('ใๆค่จผใใผใฟใ')
print(valid['CATEGORY'].value_counts())
print('ใ่ฉไพกใใผใฟใ')
print(test['CATEGORY'].value_counts())
from collections import defaultdict
import string
# ๅ่ชใฎ้ ปๅบฆ้่จ
d = defaultdict(int)
table = str.maketrans(string.punctuation, ' '*len(string.punctuation)) # ่จๅทใในใใผในใซ็ฝฎๆใใใใผใใซ
for text in train['TITLE']:
for word in text.translate(table).split():
d[word] += 1
d = sorted(d.items(), key=lambda x:x[1], reverse=True)
# ๅ่ชID่พๆธใฎไฝๆ
word2id = {word: i + 1 for i, (word, cnt) in enumerate(d) if cnt > 1} # ๅบ็พ้ ปๅบฆใ2ๅไปฅไธใฎๅ่ชใ็ป้ฒ
print(f'IDๆฐ: {len(set(word2id.values()))}\n')
print('---้ ปๅบฆไธไฝ20่ช---')
for key in list(word2id)[:20]:
print(f'{key}: {word2id[key]}')
import torch
def tokenizer(text, word2id=word2id, unk=0):
""" ๅ
ฅๅใใญในใใในใใผในใงๅๅฒใIDๅใซๅคๆ(่พๆธใซใชใใใฐunkใงๆๅฎใใๆฐๅญใ่จญๅฎ)"""
table = str.maketrans(string.punctuation, ' '*len(string.punctuation))
return [word2id.get(word, unk) for word in text.translate(table).split()]
# ็ขบ่ช
text = train.iloc[1, train.columns.get_loc('TITLE')]
print(f'ใใญในใ: {text}')
print(f'IDๅ: {tokenizer(text)}')
###Output
ใใญในใ: Amazon Plans to Fight FTC Over Mobile-App Purchases
IDๅ: [169, 539, 1, 683, 1237, 82, 279, 1898, 4199]
###Markdown
81. RNNใซใใไบๆธฌ***ID็ชๅทใง่กจ็พใใใๅ่ชๅ$\boldsymbol{x} = (x_1, x_2, \dots, x_T)$ใใใ๏ผใใ ใ๏ผ$T$ใฏๅ่ชๅใฎ้ทใ๏ผ$x_t \in \mathbb{R}^{V}$ใฏๅ่ชใฎID็ชๅทใฎone-hot่กจ่จใงใใ๏ผ$V$ใฏๅ่ชใฎ็ทๆฐใงใใ๏ผ๏ผๅๅธฐๅใใฅใผใฉใซใใใใฏใผใฏ๏ผRNN: Recurrent Neural Network๏ผใ็จใ๏ผๅ่ชๅ$\boldsymbol{x}$ใใใซใใดใช$y$ใไบๆธฌใใใขใใซใจใใฆ๏ผๆฌกๅผใๅฎ่ฃ
ใใ๏ผ$\overrightarrow h_0 = 0, \\\overrightarrow h_t = {\rm \overrightarrow{RNN}}(\mathrm{emb}(x_t), \overrightarrow h_{t-1}), \\y = {\rm softmax}(W^{(yh)} \overrightarrow h_T + b^{(y)})$ใใ ใ๏ผ$\mathrm{emb}(x) \in \mathbb{R}^{d_w}$ใฏๅ่ชๅใ่พผใฟ๏ผๅ่ชใฎone-hot่กจ่จใใๅ่ชใใฏใใซใซๅคๆใใ้ขๆฐ๏ผ๏ผ$\overrightarrow h_t \in \mathbb{R}^{d_h}$ใฏๆๅป$t$ใฎ้ ใ็ถๆ
ใใฏใใซ๏ผ${\rm \overrightarrow{RNN}}(x,h)$ใฏๅ
ฅๅ$x$ใจๅๆๅปใฎ้ ใ็ถๆ
$h$ใใๆฌก็ถๆ
ใ่จ็ฎใใRNNใฆใใใ๏ผ$W^{(yh)} \in \mathbb{R}^{L \times d_h}$ใฏ้ ใ็ถๆ
ใใฏใใซใใใซใใดใชใไบๆธฌใใใใใฎ่กๅ๏ผ$b^{(y)} \in \mathbb{R}^{L}$ใฏใใคใขใน้
ใงใใ๏ผ$d_w, d_h, L$ใฏใใใใ๏ผๅ่ชๅใ่พผใฟใฎๆฌกๅ
ๆฐ๏ผ้ ใ็ถๆ
ใใฏใใซใฎๆฌกๅ
ๆฐ๏ผใฉใใซๆฐใงใใ๏ผ๏ผRNNใฆใใใ${\rm \overrightarrow{RNN}}(x,h)$ใซใฏๆงใ
ใชๆงๆใ่ใใใใใ๏ผๅ
ธๅไพใจใใฆๆฌกๅผใๆใใใใ๏ผ${\rm \overrightarrow{RNN}}(x,h) = g(W^{(hx)} x + W^{(hh)}h + b^{(h)})$ใใ ใ๏ผ$W^{(hx)} \in \mathbb{R}^{d_h \times d_w}๏ผW^{(hh)} \in \mathbb{R}^{d_h \times d_h}, b^{(h)} \in \mathbb{R}^{d_h}$ใฏRNNใฆใใใใฎใใฉใกใผใฟ๏ผ$g$ใฏๆดปๆงๅ้ขๆฐ๏ผไพใใฐ$\tanh$ใReLUใชใฉ๏ผใงใใ๏ผใชใ๏ผใใฎๅ้กใงใฏใใฉใกใผใฟใฎๅญฆ็ฟใ่กใใ๏ผใฉใณใใ ใซๅๆๅใใใใใฉใกใผใฟใง$y$ใ่จ็ฎใใใ ใใงใใ๏ผๆฌกๅ
ๆฐใชใฉใฎใใคใใผใใฉใกใผใฟใฏ๏ผ$d_w = 300, d_h=50$ใชใฉ๏ผ้ฉๅฝใชๅคใซ่จญๅฎใใ๏ผไปฅ้ใฎๅ้กใงใๅๆงใงใใ๏ผ๏ผ
###Code
import torch
from torch import nn
class RNN(nn.Module):
def __init__(self, vocab_size, emb_size, padding_idx, output_size, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx)
self.rnn = nn.RNN(emb_size, hidden_size, nonlinearity='tanh', batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
self.batch_size = x.size()[0]
hidden = self.init_hidden() # h0ใฎใผใญใใฏใใซใไฝๆ
emb = self.emb(x)
# emb.size() = (batch_size, seq_len, emb_size)
out, hidden = self.rnn(emb, hidden)
# out.size() = (batch_size, seq_len, hidden_size)
out = self.fc(out[:, -1, :])
# out.size() = (batch_size, output_size)
return out
def init_hidden(self):
hidden = torch.zeros(1, self.batch_size, self.hidden_size)
return hidden
from torch.utils.data import Dataset
class CreateDataset(Dataset):
def __init__(self, X, y, tokenizer):
self.X = X
self.y = y
self.tokenizer = tokenizer
def __len__(self): # len(Dataset)ใง่ฟใๅคใๆๅฎ
return len(self.y)
def __getitem__(self, index): # Dataset[index]ใง่ฟใๅคใๆๅฎ
text = self.X[index]
inputs = self.tokenizer(text)
return {
'inputs': torch.tensor(inputs, dtype=torch.int64),
'labels': torch.tensor(self.y[index], dtype=torch.int64)
}
# ใฉใใซใใฏใใซใฎไฝๆ
category_dict = {'b': 0, 't': 1, 'e':2, 'm':3}
y_train = train['CATEGORY'].map(lambda x: category_dict[x]).values
y_valid = valid['CATEGORY'].map(lambda x: category_dict[x]).values
y_test = test['CATEGORY'].map(lambda x: category_dict[x]).values
# Datasetใฎไฝๆ
dataset_train = CreateDataset(train['TITLE'], y_train, tokenizer)
dataset_valid = CreateDataset(valid['TITLE'], y_valid, tokenizer)
dataset_test = CreateDataset(test['TITLE'], y_test, tokenizer)
print(f'len(Dataset)ใฎๅบๅ: {len(dataset_train)}')
print('Dataset[index]ใฎๅบๅ:')
for var in dataset_train[1]:
print(f' {var}: {dataset_train[1][var]}')
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
# ใขใใซใฎๅฎ็พฉ
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE)
# ๅ
้ ญ10ไปถใฎไบๆธฌๅคๅๅพ
for i in range(10):
X = dataset_train[i]['inputs']
print(torch.softmax(model(X.unsqueeze(0)), dim=-1))
###Output
tensor([[0.3273, 0.2282, 0.2454, 0.1992]], grad_fn=<SoftmaxBackward>)
tensor([[0.1324, 0.4295, 0.2220, 0.2162]], grad_fn=<SoftmaxBackward>)
tensor([[0.4091, 0.2159, 0.1736, 0.2014]], grad_fn=<SoftmaxBackward>)
tensor([[0.2081, 0.3668, 0.2390, 0.1861]], grad_fn=<SoftmaxBackward>)
tensor([[0.2383, 0.3205, 0.2695, 0.1717]], grad_fn=<SoftmaxBackward>)
tensor([[0.3224, 0.1460, 0.1993, 0.3324]], grad_fn=<SoftmaxBackward>)
tensor([[0.2012, 0.2345, 0.3660, 0.1982]], grad_fn=<SoftmaxBackward>)
tensor([[0.2072, 0.2365, 0.2525, 0.3038]], grad_fn=<SoftmaxBackward>)
tensor([[0.2681, 0.3235, 0.1889, 0.2195]], grad_fn=<SoftmaxBackward>)
tensor([[0.1969, 0.3336, 0.3064, 0.1631]], grad_fn=<SoftmaxBackward>)
###Markdown
82. ็ขบ็็ๅพ้
้ไธๆณใซใใๅญฆ็ฟ***็ขบ็็ๅพ้
้ไธๆณ๏ผSGD: Stochastic Gradient Descent๏ผใ็จใใฆ๏ผๅ้ก81ใงๆง็ฏใใใขใใซใๅญฆ็ฟใใ๏ผ่จ็ทดใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็ใ่กจ็คบใใชใใใขใใซใๅญฆ็ฟใ๏ผ้ฉๅฝใชๅบๆบ๏ผไพใใฐ10ใจใใใฏใชใฉ๏ผใง็ตไบใใใ๏ผ
###Code
from torch.utils.data import DataLoader
import time
from torch import optim
def calculate_loss_and_accuracy(model, dataset, device=None, criterion=None):
"""ๆๅคฑใปๆญฃ่งฃ็ใ่จ็ฎ"""
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
loss = 0.0
total = 0
correct = 0
with torch.no_grad():
for data in dataloader:
# ใใใคในใฎๆๅฎ
inputs = data['inputs'].to(device)
labels = data['labels'].to(device)
# ้ ไผๆญ
outputs = model(inputs)
# ๆๅคฑ่จ็ฎ
if criterion != None:
loss += criterion(outputs, labels).item()
# ๆญฃ่งฃ็่จ็ฎ
pred = torch.argmax(outputs, dim=-1)
total += len(inputs)
correct += (pred == labels).sum().item()
return loss / len(dataset), correct / total
def train_model(dataset_train, dataset_valid, batch_size, model, criterion, optimizer, num_epochs, collate_fn=None, device=None):
"""ใขใใซใฎๅญฆ็ฟใๅฎ่กใใๆๅคฑใปๆญฃ่งฃ็ใฎใญใฐใ่ฟใ"""
# ใใใคในใฎๆๅฎ
model.to(device)
# dataloaderใฎไฝๆ
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
dataloader_valid = DataLoader(dataset_valid, batch_size=1, shuffle=False)
# ในใฑใธใฅใผใฉใฎ่จญๅฎ
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs, eta_min=1e-5, last_epoch=-1)
# ๅญฆ็ฟ
log_train = []
log_valid = []
for epoch in range(num_epochs):
# ้ๅงๆๅปใฎ่จ้ฒ
s_time = time.time()
# ่จ็ทดใขใผใใซ่จญๅฎ
model.train()
for data in dataloader_train:
# ๅพ้
ใใผใญใงๅๆๅ
optimizer.zero_grad()
# ้ ไผๆญ + ่ชคๅทฎ้ไผๆญ + ้ใฟๆดๆฐ
inputs = data['inputs'].to(device)
labels = data['labels'].to(device)
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# ่ฉไพกใขใผใใซ่จญๅฎ
model.eval()
# ๆๅคฑใจๆญฃ่งฃ็ใฎ็ฎๅบ
loss_train, acc_train = calculate_loss_and_accuracy(model, dataset_train, device, criterion=criterion)
loss_valid, acc_valid = calculate_loss_and_accuracy(model, dataset_valid, device, criterion=criterion)
log_train.append([loss_train, acc_train])
log_valid.append([loss_valid, acc_valid])
# ใใงใใฏใใคใณใใฎไฟๅญ
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, f'checkpoint{epoch + 1}.pt')
# ็ตไบๆๅปใฎ่จ้ฒ
e_time = time.time()
# ใญใฐใๅบๅ
print(f'epoch: {epoch + 1}, loss_train: {loss_train:.4f}, accuracy_train: {acc_train:.4f}, loss_valid: {loss_valid:.4f}, accuracy_valid: {acc_valid:.4f}, {(e_time - s_time):.4f}sec')
# ๆค่จผใใผใฟใฎๆๅคฑใ3ใจใใใฏ้ฃ็ถใงไฝไธใใชใใฃใๅ ดๅใฏๅญฆ็ฟ็ตไบ
if epoch > 2 and log_valid[epoch - 3][0] <= log_valid[epoch - 2][0] <= log_valid[epoch - 1][0] <= log_valid[epoch][0]:
break
# ในใฑใธใฅใผใฉใ1ในใใใ้ฒใใ
scheduler.step()
return {'train': log_train, 'valid': log_valid}
def visualize_logs(log):
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].plot(np.array(log['train']).T[0], label='train')
ax[0].plot(np.array(log['valid']).T[0], label='valid')
ax[0].set_xlabel('epoch')
ax[0].set_ylabel('loss')
ax[0].legend()
ax[1].plot(np.array(log['train']).T[1], label='train')
ax[1].plot(np.array(log['valid']).T[1], label='valid')
ax[1].set_xlabel('epoch')
ax[1].set_ylabel('accuracy')
ax[1].legend()
plt.show()
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
LEARNING_RATE = 1e-3
BATCH_SIZE = 1
NUM_EPOCHS = 10
# ใขใใซใฎๅฎ็พฉ
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
83. ใใใใใๅใปGPUไธใงใฎๅญฆ็ฟ***ๅ้ก82ใฎใณใผใใๆนๅคใ๏ผ$B$ไบไพใใจใซๆๅคฑใปๅพ้
ใ่จ็ฎใใฆๅญฆ็ฟใ่กใใใใใซใใ๏ผ$B$ใฎๅคใฏ้ฉๅฝใซ้ธใน๏ผ๏ผใพใ๏ผGPUไธใงๅญฆ็ฟใๅฎ่กใใ๏ผ
###Code
class Padsequence():
"""Dataloaderใใใใใใใใๅใๅบใใใจใซๆๅคง็ณปๅ้ทใงใใใฃใณใฐ"""
def __init__(self, padding_idx):
self.padding_idx = padding_idx
def __call__(self, batch):
sorted_batch = sorted(batch, key=lambda x: x['inputs'].shape[0], reverse=True)
sequences = [x['inputs'] for x in sorted_batch]
sequences_padded = torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True, padding_value=self.padding_idx)
labels = torch.LongTensor([x['labels'] for x in sorted_batch])
return {'inputs': sequences_padded, 'labels': labels}
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
LEARNING_RATE = 5e-2
BATCH_SIZE = 32
NUM_EPOCHS = 10
# ใขใใซใฎๅฎ็พฉ
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
# ใใใคในใฎๆๅฎ
#device = torch.device('cuda')
device = torch.device('cpu')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train, device)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test, device)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
84. ๅ่ชใใฏใใซใฎๅฐๅ
ฅ***ไบๅๅญฆ็ฟๆธใฟใฎๅ่ชใใฏใใซ๏ผไพใใฐ๏ผGoogle Newsใใผใฟใปใใ๏ผ็ด1,000ๅๅ่ช๏ผใงใฎๅญฆ็ฟๆธใฟๅ่ชใใฏใใซ๏ผใงๅ่ชๅใ่พผใฟ$emb(x)$ใๅๆๅใ๏ผๅญฆ็ฟใใ๏ผ
###Code
# ๅญฆ็ฟๆธใฟๅ่ชใใฏใใซใฎใใฆใณใญใผใ
FILE_ID = "0B7XkCwpI5KDYNlNUTTlSS21pQmM"
FILE_NAME = "GoogleNews-vectors-negative300.bin.gz"
!wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=$FILE_ID' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=$FILE_ID" -O $FILE_NAME && rm -rf /tmp/cookies.txt
from gensim.models import KeyedVectors
# ๅญฆ็ฟๆธใฟใขใใซใฎใญใผใ
model = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin.gz', binary=True)
# ๅญฆ็ฟๆธใฟๅ่ชใใฏใใซใฎๅๅพ
VOCAB_SIZE = len(set(word2id.values())) + 1
EMB_SIZE = 300
weights = np.zeros((VOCAB_SIZE, EMB_SIZE))
words_in_pretrained = 0
for i, word in enumerate(word2id.keys()):
try:
weights[i] = model[word]
words_in_pretrained += 1
except KeyError:
weights[i] = np.random.normal(scale=0.4, size=(EMB_SIZE,))
weights = torch.from_numpy(weights.astype((np.float32)))
print(f'ๅญฆ็ฟๆธใฟใใฏใใซๅฉ็จๅ่ชๆฐ: {words_in_pretrained} / {VOCAB_SIZE}')
print(weights.size())
class RNN(nn.Module):
def __init__(self, vocab_size, emb_size, padding_idx, output_size, hidden_size, num_layers, emb_weights=None, bidirectional=False):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = bidirectional + 1 # ๅๆนๅ๏ผ1ใๅๆนๅ๏ผ2
if emb_weights != None: # ๆๅฎใใใใฐๅใ่พผใฟๅฑคใฎ้ใฟใemb_weightsใงๅๆๅ
self.emb = nn.Embedding.from_pretrained(emb_weights, padding_idx=padding_idx)
else:
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx)
self.rnn = nn.RNN(emb_size, hidden_size, num_layers, nonlinearity='tanh', bidirectional=bidirectional, batch_first=True)
self.fc = nn.Linear(hidden_size * self.num_directions, output_size)
def forward(self, x):
self.batch_size = x.size()[0]
hidden = self.init_hidden() # h0ใฎใผใญใใฏใใซใไฝๆ
emb = self.emb(x)
# emb.size() = (batch_size, seq_len, emb_size)
out, hidden = self.rnn(emb, hidden)
# out.size() = (batch_size, seq_len, hidden_size * num_directions)
out = self.fc(out[:, -1, :])
# out.size() = (batch_size, output_size)
return out
def init_hidden(self):
hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_size)
return hidden
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
NUM_LAYERS = 1
LEARNING_RATE = 5e-2
BATCH_SIZE = 32
NUM_EPOCHS = 10
# ใขใใซใฎๅฎ็พฉ
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, emb_weights=weights)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
# ใใใคในใฎๆๅฎ
#device = torch.device('cuda')
device = torch.device('cpu')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train, device)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test, device)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
85. ๅๆนๅRNNใปๅคๅฑคๅ***้ ๆนๅใจ้ๆนๅใฎRNNใฎไธกๆนใ็จใใฆๅ
ฅๅใใญในใใใจใณใณใผใใ๏ผใขใใซใๅญฆ็ฟใใ๏ผ$\overleftarrow h_{T+1} = 0, \\\overleftarrow h_t = {\rm \overleftarrow{RNN}}(\mathrm{emb}(x_t), \overleftarrow h_{t+1}), \\y = {\rm softmax}(W^{(yh)} [\overrightarrow h_T; \overleftarrow h_1] + b^{(y)})$ใใ ใ๏ผ$\overrightarrow h_t \in \mathbb{R}^{d_h}, \overleftarrow h_t \in \mathbb{R}^{d_h}$ใฏใใใใ๏ผ้ ๆนๅใใใณ้ๆนๅใฎRNNใงๆฑใใๆๅป$t$ใฎ้ ใ็ถๆ
ใใฏใใซ๏ผ${\rm \overleftarrow{RNN}}(x,h)$ใฏๅ
ฅๅ$x$ใจๆฌกๆๅปใฎ้ ใ็ถๆ
$h$ใใๅ็ถๆ
ใ่จ็ฎใใRNNใฆใใใ๏ผ$W^{(yh)} \in \mathbb{R}^{L \times 2d_h}$ใฏ้ ใ็ถๆ
ใใฏใใซใใใซใใดใชใไบๆธฌใใใใใฎ่กๅ๏ผ$b^{(y)} \in \mathbb{R}^{L}$ใฏใใคใขใน้
ใงใใ๏ผใพใ๏ผ$[a; b]$ใฏใใฏใใซ$a$ใจ$b$ใฎ้ฃ็ตใ่กจใใใใใซ๏ผๅๆนๅRNNใๅคๅฑคๅใใฆๅฎ้จใใ๏ผ
###Code
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
HIDDEN_SIZE = 50
NUM_LAYERS = 2
LEARNING_RATE = 5e-2
BATCH_SIZE = 32
NUM_EPOCHS = 10
# ใขใใซใฎๅฎ็พฉ
model = RNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, emb_weights=weights, bidirectional=True)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
# ใใใคในใฎๆๅฎ
#device = torch.device('cuda')
device = torch.device('cpu')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train, device)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test, device)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
86. ็ณใฟ่พผใฟใใฅใผใฉใซใใใใฏใผใฏ (CNN)***ID็ชๅทใง่กจ็พใใใๅ่ชๅ$\boldsymbol x = (x_1, x_2, \dots, x_T)$ใใใ๏ผใใ ใ๏ผ$T$ใฏๅ่ชๅใฎ้ทใ๏ผ$x_t \in \mathbb{R}^{V}$ใฏๅ่ชใฎID็ชๅทใฎone-hot่กจ่จใงใใ๏ผ$V$ใฏๅ่ชใฎ็ทๆฐใงใใ๏ผ๏ผ็ณใฟ่พผใฟใใฅใผใฉใซใใใใฏใผใฏ๏ผCNN: Convolutional Neural Network๏ผใ็จใ๏ผๅ่ชๅ$\boldsymbol x$ใใใซใใดใช$y$ใไบๆธฌใใใขใใซใๅฎ่ฃ
ใใ๏ผใใ ใ๏ผ็ณใฟ่พผใฟใใฅใผใฉใซใใใใฏใผใฏใฎๆงๆใฏไปฅไธใฎ้ใใจใใ๏ผ+ ๅ่ชๅใ่พผใฟใฎๆฌกๅ
ๆฐ: $d_w$+ ็ณใฟ่พผใฟใฎใใฃใซใฟใผใฎใตใคใบ: 3 ใใผใฏใณ+ ็ณใฟ่พผใฟใฎในใใฉใคใ: 1 ใใผใฏใณ+ ็ณใฟ่พผใฟใฎใใใฃใณใฐ: ใใ+ ็ณใฟ่พผใฟๆผ็ฎๅพใฎๅๆๅปใฎใใฏใใซใฎๆฌกๅ
ๆฐ: $d_h$+ ็ณใฟ่พผใฟๆผ็ฎๅพใซๆๅคงๅคใใผใชใณใฐ๏ผmax pooling๏ผใ้ฉ็จใ๏ผๅ
ฅๅๆใ$d_h$ๆฌกๅ
ใฎ้ ใใใฏใใซใง่กจ็พ ใใชใใก๏ผๆๅป$t$ใฎ็นๅพดใใฏใใซ$p_t \in \mathbb{R}^{d_h}$ใฏๆฌกๅผใง่กจใใใ๏ผ$p_t = g(W^{(px)} [\mathrm{emb}(x_{t-1}); \mathrm{emb}(x_t); \mathrm{emb}(x_{t+1})] + b^{(p)})$]ใใ ใ๏ผ$W^{(px)} \in \mathbb{R}^{d_h \times 3d_w}, b^{(p)} \in \mathbb{R}^{d_h}$ใฏCNNใฎใใฉใกใผใฟ๏ผ$g$ใฏๆดปๆงๅ้ขๆฐ๏ผไพใใฐ$\tanh$ใReLUใชใฉ๏ผ๏ผ$[a; b; c]$ใฏใใฏใใซ$a, b, c$ใฎ้ฃ็ตใงใใ๏ผใชใ๏ผ่กๅ$W^{(px)}$ใฎๅๆฐใ$3d_w$ใซใชใใฎใฏ๏ผ3ๅใฎใใผใฏใณใฎๅ่ชๅใ่พผใฟใ้ฃ็ตใใใใฎใซๅฏพใใฆ๏ผ็ทๅฝขๅคๆใ่กใใใใงใใ๏ผๆๅคงๅคใใผใชใณใฐใงใฏ๏ผ็นๅพดใใฏใใซใฎๆฌกๅ
ๆฏใซๅ
จๆๅปใซใใใๆๅคงๅคใๅใ๏ผๅ
ฅๅๆๆธใฎ็นๅพดใใฏใใซ$c \in \mathbb{R}^{d_h}$ใๆฑใใ๏ผ$c[i]$ใงใใฏใใซ$c$ใฎ$i$็ช็ฎใฎๆฌกๅ
ใฎๅคใ่กจใใใจใซใใใจ๏ผๆๅคงๅคใใผใชใณใฐใฏๆฌกๅผใง่กจใใใ๏ผ$c[i] = \max_{1 \leq t \leq T} p_t[i]$ ๆๅพใซ๏ผๅ
ฅๅๆๆธใฎ็นๅพดใใฏใใซ$c$ใซ่กๅ$W^{(yc)} \in \mathbb{R}^{L \times d_h}$ใจใใคใขใน้
$b^{(y)} \in \mathbb{R}^{L}$ใซใใ็ทๅฝขๅคๆใจใฝใใใใใฏใน้ขๆฐใ้ฉ็จใ๏ผใซใใดใช$y$ใไบๆธฌใใ๏ผ$y = {\rm softmax}(W^{(yc)} c + b^{(y)})$ใชใ๏ผใใฎๅ้กใงใฏใขใใซใฎๅญฆ็ฟใ่กใใ๏ผใฉใณใใ ใซๅๆๅใใใ้ใฟ่กๅใง$y$ใ่จ็ฎใใใ ใใงใใ๏ผ
###Code
from torch.nn import functional as F
class CNN(nn.Module):
def __init__(self, vocab_size, emb_size, padding_idx, output_size, out_channels, kernel_heights, stride, padding, emb_weights=None):
super().__init__()
if emb_weights != None: # ๆๅฎใใใใฐๅใ่พผใฟๅฑคใฎ้ใฟใemb_weightsใงๅๆๅ
self.emb = nn.Embedding.from_pretrained(emb_weights, padding_idx=padding_idx)
else:
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx)
self.conv = nn.Conv2d(1, out_channels, (kernel_heights, emb_size), stride, (padding, 0))
self.drop = nn.Dropout(0.3)
self.fc = nn.Linear(out_channels, output_size)
def forward(self, x):
# x.size() = (batch_size, seq_len)
emb = self.emb(x).unsqueeze(1)
# emb.size() = (batch_size, 1, seq_len, emb_size)
conv = self.conv(emb)
# conv.size() = (batch_size, out_channels, seq_len, 1)
act = F.relu(conv.squeeze(3))
# act.size() = (batch_size, out_channels, seq_len)
max_pool = F.max_pool1d(act, act.size()[2])
# max_pool.size() = (batch_size, out_channels, 1) -> seq_lenๆนๅใซๆๅคงๅคใๅๅพ
out = self.fc(self.drop(max_pool.squeeze(2)))
# out.size() = (batch_size, output_size)
return out
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1 # ่พๆธใฎIDๆฐ + ใใใฃใณใฐID
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
OUT_CHANNELS = 100
KERNEL_HEIGHTS = 3
STRIDE = 1
PADDING = 1
# ใขใใซใฎๅฎ็พฉ
model = CNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, OUT_CHANNELS, KERNEL_HEIGHTS, STRIDE, PADDING, emb_weights=weights)
# ๅ
้ ญ10ไปถใฎไบๆธฌๅคๅๅพ
for i in range(10):
X = dataset_train[i]['inputs']
print(torch.softmax(model(X.unsqueeze(0)), dim=-1))
###Output
tensor([[0.2147, 0.2028, 0.3156, 0.2669]], grad_fn=<SoftmaxBackward>)
tensor([[0.2341, 0.2047, 0.2999, 0.2612]], grad_fn=<SoftmaxBackward>)
tensor([[0.2225, 0.2443, 0.2795, 0.2538]], grad_fn=<SoftmaxBackward>)
tensor([[0.2110, 0.2021, 0.3058, 0.2811]], grad_fn=<SoftmaxBackward>)
tensor([[0.2178, 0.2192, 0.2740, 0.2890]], grad_fn=<SoftmaxBackward>)
tensor([[0.2105, 0.1714, 0.3283, 0.2899]], grad_fn=<SoftmaxBackward>)
tensor([[0.2456, 0.2161, 0.2943, 0.2440]], grad_fn=<SoftmaxBackward>)
tensor([[0.2146, 0.1642, 0.3159, 0.3053]], grad_fn=<SoftmaxBackward>)
tensor([[0.2119, 0.2169, 0.3154, 0.2557]], grad_fn=<SoftmaxBackward>)
tensor([[0.2306, 0.2179, 0.2853, 0.2662]], grad_fn=<SoftmaxBackward>)
###Markdown
87. ็ขบ็็ๅพ้
้ไธๆณใซใใCNNใฎๅญฆ็ฟ***็ขบ็็ๅพ้
้ไธๆณ๏ผSGD: Stochastic Gradient Descent๏ผใ็จใใฆ๏ผๅ้ก86ใงๆง็ฏใใใขใใซใๅญฆ็ฟใใ๏ผ่จ็ทดใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟไธใฎๆๅคฑใจๆญฃ่งฃ็ใ่กจ็คบใใชใใใขใใซใๅญฆ็ฟใ๏ผ้ฉๅฝใชๅบๆบ๏ผไพใใฐ10ใจใใใฏใชใฉ๏ผใง็ตไบใใใ๏ผ
###Code
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1
EMB_SIZE = 300
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
OUT_CHANNELS = 100
KERNEL_HEIGHTS = 3
STRIDE = 1
PADDING = 1
LEARNING_RATE = 5e-2
BATCH_SIZE = 64
NUM_EPOCHS = 10
# ใขใใซใฎๅฎ็พฉ
model = CNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, OUT_CHANNELS, KERNEL_HEIGHTS, STRIDE, PADDING, emb_weights=weights)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
# ใใใคในใฎๆๅฎ
device = torch.device('cuda')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train, device)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test, device)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
88. ใใฉใกใผใฟใใฅใผใใณใฐ***ๅ้ก85ใๅ้ก87ใฎใณใผใใๆนๅคใ๏ผใใฅใผใฉใซใใใใฏใผใฏใฎๅฝข็ถใใใคใใผใใฉใกใผใฟใ่ชฟๆดใใชใใ๏ผ้ซๆง่ฝใชใซใใดใชๅ้กๅจใๆง็ฏใใ๏ผ
###Code
!pip install optuna
from torch.nn import functional as F
class textCNN(nn.Module):
def __init__(self, vocab_size, emb_size, padding_idx, output_size, out_channels, conv_params, drop_rate, emb_weights=None):
super().__init__()
if emb_weights != None: # ๆๅฎใใใใฐๅใ่พผใฟๅฑคใฎ้ใฟใemb_weightsใงๅๆๅ
self.emb = nn.Embedding.from_pretrained(emb_weights, padding_idx=padding_idx)
else:
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx)
self.convs = nn.ModuleList([nn.Conv2d(1, out_channels, (kernel_height, emb_size), padding=(padding, 0)) for kernel_height, padding in conv_params])
self.drop = nn.Dropout(drop_rate)
self.fc = nn.Linear(len(conv_params) * out_channels, output_size)
def forward(self, x):
# x.size() = (batch_size, seq_len)
emb = self.emb(x).unsqueeze(1)
# emb.size() = (batch_size, 1, seq_len, emb_size)
conv = [F.relu(conv(emb)).squeeze(3) for i, conv in enumerate(self.convs)]
# conv[i].size() = (batch_size, out_channels, seq_len + padding * 2 - kernel_height + 1)
max_pool = [F.max_pool1d(i, i.size(2)) for i in conv]
# max_pool[i].size() = (batch_size, out_channels, 1) -> seq_lenๆนๅใซๆๅคงๅคใๅๅพ
max_pool_cat = torch.cat(max_pool, 1)
# max_pool_cat.size() = (batch_size, len(conv_params) * out_channels, 1) -> ใใฃใซใฟใผๅฅใฎ็ตๆใ็ตๅ
out = self.fc(self.drop(max_pool_cat.squeeze(2)))
# out.size() = (batch_size, output_size)
return out
import optuna
def objective(trial):
# ใใฅใผใใณใฐๅฏพ่ฑกใใฉใกใผใฟใฎใปใใ
emb_size = int(trial.suggest_discrete_uniform('emb_size', 100, 400, 100))
out_channels = int(trial.suggest_discrete_uniform('out_channels', 50, 200, 50))
drop_rate = trial.suggest_discrete_uniform('drop_rate', 0.0, 0.5, 0.1)
learning_rate = trial.suggest_loguniform('learning_rate', 5e-4, 5e-2)
momentum = trial.suggest_discrete_uniform('momentum', 0.5, 0.9, 0.1)
batch_size = int(trial.suggest_discrete_uniform('batch_size', 16, 128, 16))
# ๅบๅฎใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
CONV_PARAMS = [[2, 0], [3, 1], [4, 2]]
NUM_EPOCHS = 30
# ใขใใซใฎๅฎ็พฉ
model = textCNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, out_channels, CONV_PARAMS, drop_rate, emb_weights=weights)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)
# ใใใคในใฎๆๅฎ
device = torch.device('cuda')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, batch_size, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ๆๅคฑใฎ็ฎๅบ
loss_valid, _ = calculate_loss_and_accuracy(model, dataset_valid, device, criterion=criterion)
return loss_valid
# ๆ้ฉๅ
study = optuna.create_study()
study.optimize(objective, timeout=7200)
# ็ตๆใฎ่กจ็คบ
print('Best trial:')
trial = study.best_trial
print(' Value: {:.3f}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
# ใใฉใกใผใฟใฎ่จญๅฎ
VOCAB_SIZE = len(set(word2id.values())) + 1
EMB_SIZE = int(trial.params['emb_size'])
PADDING_IDX = len(set(word2id.values()))
OUTPUT_SIZE = 4
OUT_CHANNELS = int(trial.params['out_channels'])
CONV_PARAMS = [[2, 0], [3, 1], [4, 2]]
DROP_RATE = trial.params['drop_rate']
LEARNING_RATE = trial.params['learning_rate']
BATCH_SIZE = int(trial.params['batch_size'])
NUM_EPOCHS = 30
# ใขใใซใฎๅฎ็พฉ
model = textCNN(VOCAB_SIZE, EMB_SIZE, PADDING_IDX, OUTPUT_SIZE, OUT_CHANNELS, CONV_PARAMS, DROP_RATE, emb_weights=weights)
print(model)
# ๆๅคฑ้ขๆฐใฎๅฎ็พฉ
criterion = nn.CrossEntropyLoss()
# ใชใใใฃใใคใถใฎๅฎ็พฉ
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9)
# ใใใคในใฎๆๅฎ
device = torch.device('cuda')
# ใขใใซใฎๅญฆ็ฟ
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, collate_fn=Padsequence(PADDING_IDX), device=device)
# ใญใฐใฎๅฏ่ฆๅ
visualize_logs(log)
# ๆญฃ่งฃ็ใฎ็ฎๅบ
_, acc_train = calculate_loss_and_accuracy(model, dataset_train, device)
_, acc_test = calculate_loss_and_accuracy(model, dataset_test, device)
print(f'ๆญฃ่งฃ็๏ผๅญฆ็ฟใใผใฟ๏ผ๏ผ{acc_train:.3f}')
print(f'ๆญฃ่งฃ็๏ผ่ฉไพกใใผใฟ๏ผ๏ผ{acc_test:.3f}')
###Output
_____no_output_____
###Markdown
Import necessary libraries and functions.
###Code
import numpy as np, cmath,scipy as sp
import scipy.io
from matplotlib import pyplot as plt
from numpy import pi, sin, cos, exp, sqrt, log,log10, random, convolve#import basic functions from numpy that we'll need
from numpy.fft import fft, ifft
%matplotlib inline
###Output
_____no_output_____
###Markdown
Import optional library for pretty plotting.
###Code
import seaborn as sns
sns.set_palette('muted')
sns.set_style('darkgrid')
###Output
_____no_output_____
###Markdown
Figure 9.1a
###Code
data = scipy.io.loadmat('sampleEEGdata')
#get all the data we need from the eeg file. Working with .mat files like this is not ideal, as you can clearly see below.
#A better way to access this data would be to re-save the sampleEEGdata.mat file as v-7.3 in matlab, or convert it to hdf5,
#then open it in python using h5py or pytables. Since I'd rather not mess with the batteries-included-ness of this book,
#I'll keep the data as-is and extract what we'll need.
EEGdata = data["EEG"][0,0]["data"]
EEGpnts = data["EEG"][0,0]["pnts"][0,0] #number of points in EEG data
EEGtimes = data["EEG"][0,0]["times"][0]
EEGsrate = float(data["EEG"][0,0]["srate"][0]) #make float for division purposes later
EEGtrials = data["EEG"][0,0]["trials"][0,0]
EEGepoch=data["EEG"][0,0]["epoch"][0]
which_channel_to_plot = 'FCz'; #specify label of channel to plot
EEGchanlocslabels=data["EEG"][0,0]["chanlocs"][0]["labels"]
channel_index = EEGchanlocslabels == which_channel_to_plot #specify index (channel number) of label
x_axis_limit = [-200,1000] #in milliseconds
num_trials2plot = 12
plt.figure(figsize=(10,6))
#pick a random trials using random.choice (from numpy.random)
random_trial_to_plot = random.choice(xrange(EEGtrials),num_trials2plot)
for ii in xrange(num_trials2plot):
#figure out how many subplots we need
plt.subplot(np.ceil(num_trials2plot/np.ceil(sqrt(num_trials2plot))),np.ceil(sqrt(num_trials2plot)),ii+1)
#plot trial and specify x-axis and title
plt.plot(EEGtimes,np.squeeze(EEGdata[channel_index,:,random_trial_to_plot[ii] - 1]))
plt.title("Trial " + str(random_trial_to_plot[ii]))
plt.yticks([])
plt.xlim(x_axis_limit)
_=plt.tight_layout()
###Output
_____no_output_____
###Markdown
Figure 9.1b
###Code
#plot all trials
plt.plot(EEGtimes,np.squeeze(EEGdata[channel_index,:,:]),'y')
#plot the event-related potential (ERP), i.e. the average time-domain signal
plt.plot(EEGtimes,np.squeeze(np.mean(EEGdata[channel_index,:,:],axis=2)),'k',linewidth=2)
_=plt.title("All EEG traces, and their average")
#now plot only the ERP
plt.plot(EEGtimes,np.squeeze(np.mean(EEGdata[channel_index,:,:],axis=2))) #axis=2 specifies which axis to compute the mean along
plt.vlines(0,-10,10,linestyles='dashed')
plt.hlines(0,-1000,1500)
plt.axis([-300,1000,-10,10])
plt.xlabel("Time from stimlulus onset (ms)")
plt.ylabel(r'$ \mu V $') #latex interpreter looks for dollar signs
plt.title("ERP (average of " + str(EEGtrials) + " trials) from electrode " + EEGchanlocslabels[channel_index][0][0])
plt.gca().invert_yaxis() #EEG convention to flip y axis
###Output
_____no_output_____
###Markdown
Figure 9.2To my knowledge, Python (specifically, scipy) does not have a function that is completely analgous to MATLAB's firls(). A very close approximation that I will use instead is an n-th order Butterworth bandpass filter. TODO
###Code
import scipy.signal as sig
chan2plot = "P7"
channel_index = EEGchanlocslabels == chan2plot #specify index (channel number) of label
erp = np.squeeze(np.mean(EEGdata[channel_index,:,:],axis=2))
nyquist = EEGsrate/2.
transition_width = 0.15
#low-pass filter data
#we'll look at filtering in detail in chapter 14
#filter form 0-40
filter_high = 40 #Hz; high cut off
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to40 = sig.filtfilt(b, a, erp, padlen=150) #use filfilt (filters forwards and backwards to eliminate phase shift)
#next, filter from 0-10
filter_high = 10 #Hz
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to10 = sig.filtfilt(b, a, erp, padlen=150)
#next, filter from 5-15
filter_low = 5 #Hz
filter_high = 15 #Hz
b, a = sig.butter(5, np.array([filter_low*(1-transition_width), filter_high*(1+transition_width)])/nyquist,btype="bandpass")
erp_5to15 = sig.filtfilt(b, a, erp, padlen=150)
plt.figure()
plt.plot(EEGtimes,erp,'k')
plt.plot(EEGtimes,erp_0to40,'c')
plt.plot(EEGtimes,erp_0to10,'r')
plt.plot(EEGtimes,erp_5to15,'m')
plt.xlim([-200,1200])
plt.gca().invert_yaxis()
plt.xlabel("time (ms)")
plt.ylabel("voltage " + r"$(\mu V)$")
plt.title("Raw and filtered signal")
_=plt.legend(['raw','0-40 Hz','0-10Hz','5-15Hz'])
###Output
_____no_output_____
###Markdown
Figure 9.3
###Code
fig=plt.figure()
plt.subplot(211)
plt.plot(EEGtimes,np.squeeze(EEGdata.mean(axis=0)))
plt.xlim([-200, 1000])
plt.gca().invert_yaxis() #flip for EEG conventions
plt.title("ERP from all sensors")
#topographical variance plot
plt.subplot(212)
plt.plot(EEGtimes,np.squeeze(EEGdata.mean(axis=0).var(axis=1)))
plt.xlim([-200,1000])
plt.xlabel("Time (ms)")
plt.ylabel("var "+r'$ (\mu V) $')
plt.title("Topographical variance")
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Figures 9.4-9.5 use the function topoplot from MATLAB toolbox EEGlab TODO Figure 9.6
###Code
useRTs = True #or false
#get RTs from each trial to use for sorting trials. In this experiment,
#the RT was always the first event after the stimulus (the time=0 event).
#Normally, you should build in exceptions in case there was no response or
#another event occured between the stimulus and response. This was already
#done for the current dataset.
rts = np.zeros(len(EEGepoch))
for ei in xrange(len(EEGepoch)):
#first, find the index at which time = 0 event occurs
time0event = EEGepoch[ei]["eventlatency"][0] == 0 #bool array of where time=0 occurs
time0event = np.where(time0event == time0event.max())[0][0] # find the index of the True value in this array
rts[ei] = EEGepoch[ei]["eventlatency"][0][time0event+1]
if useRTs:
rts_idx=np.argsort(rts)
else:
rts_idx = np.argsort(np.squeeze(EEGdata[46,333,:]))
#plot the trials for one channel, in (un)sorted order
plt.imshow(np.squeeze(EEGdata[46,:,rts_idx]),
extent=[EEGtimes[0], EEGtimes[-1], 1, EEGtrials],
aspect="auto",
cmap=plt.get_cmap("jet"),
origin="lower",
interpolation="none")
plt.xlabel("time from stim onset (ms)")
plt.ylabel("trial number")
plt.clim([-30,30])
plt.colorbar(label=r"$\mu V$")
plt.axis([-200,1200,1,99])
plt.grid(False)
if useRTs:
rtplot=plt.plot(rts[rts_idx],np.arange(1,EEGtrials+1),'k',linewidth=3, label= "Reaction time")
plt.legend(bbox_to_anchor=[1.5,1]) #put the legend outside of the image
###Output
_____no_output_____
###Markdown
9. Overview of time-domain EEG analyses Imports
###Code
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import mne
from mne.externals.pymatreader import read_mat
###Output
_____no_output_____
###Markdown
Load data
###Code
# load data using MNE
data_in = read_mat('C:/users/micha/analyzing_neural_time_series/sampleEEGdata.mat')
EEG = data_in['EEG']
###Output
_____no_output_____
###Markdown
Figure 9.1a
###Code
# Choose example channel to plot
which_channel_to_plot = 'FCz' #specify label of channel to plot
channel_index = (np.asarray(EEG["chanlocs"]["labels"]) == which_channel_to_plot) #specify index (channel number) of label
chan_idx = int(np.linspace(0,len(channel_index)-1,len(channel_index))[channel_index])
# set plotting parameters
x_axis_limit = (-200, 1000) #in milliseconds
num_trials2plot = 12
# pick a random trials using random.choice (from numpy.random)
random_trial_to_plot = np.random.choice(np.arange(EEG['trials']), num_trials2plot)
# figure out how many subplots we need
n_rows = np.ceil(num_trials2plot/np.ceil(np.sqrt(num_trials2plot))).astype(int)
n_cols = np.ceil(np.sqrt(num_trials2plot)).astype(int)
fig, ax = plt.subplots(n_rows, n_cols, sharex='all', figsize=(10, 6))
for ii in range(num_trials2plot):
idx = np.unravel_index(ii, (n_rows, n_cols))
#plot trial and specify x-axis and title
ax[idx].plot(EEG['times'], np.squeeze(EEG['data'][channel_index,:,random_trial_to_plot[ii] - 1]))
ax[idx].set(title=f"Trial {random_trial_to_plot[ii]}", yticks=[])
fig.tight_layout();
###Output
_____no_output_____
###Markdown
Figure 9.1b
###Code
#plot all trials
fig, ax = plt.subplots( figsize=(8,6))
ax.plot(EEG['times'],np.squeeze(EEG['data'][channel_index,:,:]),'y')
#plot the event-related potential (ERP), i.e. the average time-domain signal
ax.plot(EEG['times'],np.squeeze(np.mean(EEG['data'][channel_index,:,:],axis=2)),'k',linewidth=2)
ax.set_title("All EEG traces, and their average")
plt.show()
#now plot only the ERP
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(EEG['times'],np.squeeze(np.mean(EEG['data'][channel_index],axis=2))) #axis=2 specifies which axis to compute the mean along
ax.vlines(0,-10,10,linestyles='dashed')
ax.hlines(0,-1000,1500)
ax.axis([-300,1000,-10,10])
ax.set_xlabel("Time from stimlulus onset (ms)")
ax.set_ylabel(r'$ \mu V $') #latex interpreter looks for dollar signs
ax.set(title="ERP (average of " + str(EEG["trials"]) + " trials) from electrode " + EEG["chanlocs"]["labels"][chan_idx])
ax.invert_yaxis() #EEG convention to flip y axis
plt.show()
###Output
_____no_output_____
###Markdown
Figure 9.2To my knowledge, Python (specifically, scipy) does not have a function that is completely analgous to MATLAB's firls(). A very close approximation that I will use instead is an n-th order Butterworth bandpass filter. TODO
###Code
import scipy.signal as sig
# pick example hannle to plot
chan2plot = "P7"
channel_index = np.asarray(EEG["chanlocs"]["labels"]) == chan2plot #specify index (channel number) of label
erp = np.squeeze(np.mean(EEG['data'][channel_index],axis=2))
# filter parameters
nyquist = EEG['srate'] / 2.
transition_width = 0.15
#low-pass filter data
#we'll look at filtering in detail in chapter 14
#filter form 0-40
filter_high = 40 #Hz; high cut off
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to40 = sig.filtfilt(b, a, erp, padlen=150) #use filfilt (filters forwards and backwards to eliminate phase shift)
#next, filter from 0-10
filter_high = 10 #Hz
b, a = sig.butter(5, np.array([filter_high*(1+transition_width)])/nyquist,btype="lowpass")
erp_0to10 = sig.filtfilt(b, a, erp, padlen=150)
#next, filter from 5-15
filter_low = 5 # Hz
filter_high = 15 # Hz
b, a = sig.butter(5, np.array([filter_low*(1-transition_width), filter_high*(1+transition_width)])/nyquist,btype="bandpass")
erp_5to15 = sig.filtfilt(b, a, erp, padlen=150)
# plot results
fig, ax = plt.subplots(figsize=[8,6])
ax.plot(EEG['times'],erp,'k')
ax.plot(EEG['times'],erp_0to40,'c')
ax.plot(EEG['times'],erp_0to10,'r')
ax.plot(EEG['times'],erp_5to15,'m')
# label plot
ax.set_xlim([-200,1200])
ax.invert_yaxis()
ax.set_xlabel("time (ms)")
ax.set_ylabel("voltage " + r"$(\mu V)$")
ax.set(title="Raw and filtered signal")
ax.legend(['raw','0-40 Hz','0-10Hz','5-15Hz'])
plt.show()
###Output
_____no_output_____
###Markdown
Figure 9.3
###Code
fig, (ax_1, ax_2) = plt.subplots(nrows=2, figsize=[12,6], tight_layout=True)
ax_1.plot(EEG['times'],np.squeeze(EEG['data'].mean(axis=2)).T)
ax_1.set_xlim([-200, 1000])
ax_1.invert_yaxis() #flip for EEG conventions
ax_1.set(title="ERP from all sensors")
#topographical variance plot
ax_2.plot(EEG['times'], np.squeeze(EEG['data'].mean(axis=2)).var(axis=0))
ax_2.set_xlim([-200,1000])
ax_2.set_xlabel("Time (ms)")
ax_2.set_ylabel("var "+r'$ (\mu V) $')
ax_2.set(title="Topographical variance")
###Output
_____no_output_____
###Markdown
Figure 9.4
###Code
# create mne Evoked object
# create channel montage
chan_labels = EEG['chanlocs']['labels']
coords = np.vstack([EEG['chanlocs']['Y'],EEG['chanlocs']['X'],EEG['chanlocs']['Z']]).T
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(chan_labels, coords)), coord_frame='head')
# create MNE Info and Evoked object
info = mne.create_info(chan_labels, EEG['srate'] ,ch_types='eeg')
evoked = mne.EvokedArray(EEG['data'].mean(axis=2), info, tmin=EEG['xmin'])
evoked.set_montage(montage);
# topoplot with colored dots vs. interpolated surface
# average voltage over trials for a given timepoint
TOI = 300 # ms
c = EEG['data'].mean(axis=2)[:, np.argmin(abs(EEG['times'] - TOI))]
# create figure
fig = plt.figure(figsize=(12,4), tight_layout=True)
# plot topomap without interpolation, 3D
clim = np.max(np.abs(c))
ax_1 = fig.add_subplot(131, projection='3d')
ax_1.scatter(EEG['chanlocs']['Y'], EEG['chanlocs']['X'],
EEG['chanlocs']['Z'], s=50, c=c,
cmap='coolwarm', vmin=-clim, vmax=clim)
ax_1.set(title='no interpolation, 3D')
# plot topomap without interpolation
ax_2 = fig.add_subplot(132)
ax_2.scatter(EEG['chanlocs']['Y'], EEG['chanlocs']['X'], s=50, c=c,
cmap='coolwarm', vmin=-clim, vmax=clim)
ax_2.set(title='no interpolation, 2D')
ax_2.set_xlim([-120,120])
ax_2.set_ylim([-100,100])
# plot interpolated data
# make colorbar axis
ax_3 = fig.add_subplot(133)
divider = make_axes_locatable(ax_3)
cax = divider.append_axes("right", size="5%", pad=0.05)
ax_3.set(title='interpolated')
evoked.plot_topomap(times=TOI/1000, axes=(ax_3, cax), time_format='');
###Output
_____no_output_____
###Markdown
Figure 9.5
###Code
# plot topomap, interpolated surface, 50 ms intervals
evoked.plot_topomap(times=np.linspace(-100,600,15)/1000, nrows=3);
###Output
_____no_output_____
###Markdown
Figure 9.6
###Code
use_rts = True #or false
#get RTs from each trial to use for sorting trials. In this experiment,
#the RT was always the first event after the stimulus (the time=0 event).
#Normally, you should build in exceptions in case there was no response or
#another event occured between the stimulus and response. This was already
#done for the current dataset.
rts = np.zeros(EEG['trials'])
for ei in range(EEG['trials']):
#first, find the index at which time = 0 event occurs
time0event = np.asarray(EEG['epoch']["eventlatency"][ei]) == 0 #bool array of where time=0 occurs
time0event = np.where(time0event == time0event.max())[0][0] # find the index of the True value in this array
rts[ei] = EEG['epoch']["eventlatency"][ei][time0event+1]
if use_rts:
rts_idx=np.argsort(rts)
else:
rts_idx = np.argsort(np.squeeze(EEG['data'][46,333,:]))
#plot the trials for one channel, in (un)sorted order
fig, ax = plt.subplots(figsize=[8,6])
im = ax.imshow(np.squeeze(EEG['data'][46,:,rts_idx]),
extent=[EEG['times'][0], EEG['times'][-1], 1, EEG['trials']],
aspect="auto",
cmap=plt.get_cmap("jet"),
origin="lower",
interpolation="none")
# add colorbar
fig.colorbar(im, label=r"$\mu V$")
im.set_clim([-30,30])
# label fig
ax.set_xlabel("time from stim onset (ms)")
ax.set_ylabel("trial number")
ax.axis([-200,1200,1,99])
# add legend
if use_rts:
rtplot=plt.plot(rts[rts_idx],np.arange(1,EEG['trials']+1),'k',linewidth=3, label= "Reaction time")
ax.legend(bbox_to_anchor=[1.5,1]) #put the legend outside of the image
###Output
_____no_output_____
###Markdown
chapter 09 ํํ๊ณผ ๋ ์ธ์ง 09-1. ํํ
###Code
lst = [1, 2, 3]
lst
lst = [1, 2, 3]
lst[0]
type(lst)
tpl = (1, 2, 3)
tpl
tpl = (1, 2, 3)
tpl[0]
type(tpl)
lst = [1, 2, 3]
lst.append(3)
lst
lst = [1, 2, 3]
lst[0] = 7
lst
tpl = (1, 2, 3)
tpl.append(3)
tpl
tpl = (1, 2, 3)
tpl[0] = 7
tpl
###Output
_____no_output_____
###Markdown
09-2. ํํ์ ์ด๋๋ค ์ธ ๊ฒ์ธ๊ฐ?
###Code
frns = [['๋์', 131120], ['์ง์ฐ', 130312], ['์ ์', 130904]]
frns
frns[2]
frns = [('๋์', 131120), ('์ง์ฐ', 130312), ('์ ์', 130904)]
frns
frns[2]
###Output
_____no_output_____
###Markdown
09-3. ํํ ๊ด๋ จ ํจ์์ ์ฐ์ฐ๋ค
###Code
nums = (3, 2, 5, 7, 1)
len(nums) # ๊ฐ์ ๊ฐ์๋?
max(nums) # ์ต๋๊ฐ์?
min(nums) # ์ต์๊ฐ์?
nums = (1, 2, 3, 1, 2)
nums.count(2) # 2๊ฐ ๋ช ๋ฒ ๋ฑ์ฅํด?
nums.index(1) # ๊ฐ์ฅ ์์(์ผ์ชฝ์) ์ ์ฅ๋ 1์ ์ธ๋ฑ์ค ๊ฐ์?
nums = (1, 2, 3)
3 in nums # nums์ 3์ด ์๋?
2 not in nums # num์ 2๊ฐ ์๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ๊ณผ (4, 5) ๋ฅผ ํฉํ ์๋ก์ด ํํ ์์ฑ
###Code
nums + (4, 5) # num์ (4, 5)๋ฅผ ๋ง๋ถ์ธ ๊ฒฐ๊ณผ๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ ๋ ๊ฐ๋ฅผ ์ด์ด ๋์ ์๋ก์ด ํํ ์์ฑ
###Code
nums * 2 # nums๋ฅผ ๋ ๊ฐ ๋ง๋ถ์ธ ๊ฒฐ๊ณผ๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ์ ์ผ๋ถ๋ก๋ง ์ด๋ค์ง ์๋ก์ด ํํ ์์ฑ
###Code
nums[0:3] # nums[0] ~ num[2]์ ๊บผ๋ด๋ฉด?
for i in (1, 3, 5, 7, 9):
print(i, end = ' ')
###Output
_____no_output_____
###Markdown
09-4. ๋ง์ด ๋์จ ๊น์ ๋ฆฌ์คํธ ์์ ์ ์ฅ๋ ๋ฐ์ดํฐ๋ฅผ ๋ฐ๊ฟ๋ณด์
###Code
frns = [('๋์', 131120), ('์ง์ฐ', 130312), ('์ ์', 130904)]
frns[0][0]
frns[0][1]
frns[0][0] = '๋์ค'
frns[0][1] = 123456
frns[0] = ('๋์ค', 123456)
frns
###Output
_____no_output_____
###Markdown
09-5. ๋ฒ์๋ฅผ ์ง์ ํ๋ ๋ ์ธ์ง
###Code
for i in range(1, 11):
print(i, end = ' ')
r = range(1, 10)
type(r)
r = range(1, 10)
9 in r
10 not in r
list((1, 2, 3)) # ํํ์ ๋ฆฌ์คํธ๋ก
list(range(1, 5)) # ๋ ์ธ์ง๋ฅผ ๋ฆฌ์คํธ๋ก
list("Hello") # ๋ฌธ์์ด์ ๋ฆฌ์คํธ๋ก
tuple([1, 2, 3]) # ๋ฆฌ์คํธ๋ฅผ ํํ๋ก
tuple(range(1, 5)) # ๋ ์ธ์ง๋ฅผ ํํ๋ก
tuple("Hello") # ๋ฌธ์์ด์ ํํ๋ก
lst = list(range(1, 16))
lst
tpl = tuple(range(1, 16))
tpl
range(1, 10, 2) # 1๋ถํฐ 10 ์ด์ ๊น์ง 2์ฉ ์ฆ๊ฐํ๋ ๋ ์ธ์ง
range(1, 10, 3) # 1๋ถํฐ 10 ์ด์ ๊น์ง 3์ฉ ์ฆ๊ฐํ๋ ๋ ์ธ์ง
list(range(1, 10, 2)) # 1๋ถํฐ 10 ์ด์ ๊น์ง 2์ฉ ์ฆ๊ฐํ๋ ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
list(range(1, 10, 3)) # 1๋ถํฐ 10 ์ด์ ๊น์ง 3์ฉ ์ฆ๊ฐํ๋ ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
###Output
_____no_output_____
###Markdown
09-6. ๋ ์ธ์ง ๋ฒ์ ๊ฑฐ๊พธ๋ก ์ง์ ํ๊ธฐ
###Code
list(range(2, 10))
list(range(2, 10, 1))
list(range(10, 2))
list(range(10, 2, 1))
list(range(10, 2, -1)) # 10๋ถํฐ 1์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
list(range(10, 2, -2)) # 10๋ถํฐ 2์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
list(range(10, 2, -3)) # 10๋ถํฐ 3์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
###Output
_____no_output_____
###Markdown
๊ณผ์ - ๋ ์ธ์ง๋ ํ ์ค ์๊ฒ ๋์์ผ๋ ๋ฐฑ์ค ๋ฌธ์ ๋ฅผ ์ค์ง๊ฒ ํ์ด์ผ๊ฒ ์ง์?
###Code
###Output
_____no_output_____
###Markdown
chapter 09 ํํ๊ณผ ๋ ์ธ์ง - ์ ์ํ : int- ์์ํ : float- ๋ฌธ์ํ : str(string)- ๋ถ์ธํ : bool(boolean)- ๋ฆฌ์คํธํ : list- ํํ : tpl(tuple) - ๋ฆฌ์คํธ์ ์ ์ฌ(๊ณง ๋ฐฐ์ด) - ๋ฆฌ์คํธ๋ ๊ฐ์ ์ถ๊ฐ, ์ ๊ฑฐ, ๋ณ๊ฒฝํ ์ ์๋๋ฐ ๋ฐํด ํํ์ ๊ฐ์ ์ฝ๊ธฐ๋ง ๊ฐ๋ฅ(ํํ์ ์ธ์๋ ๋ฌด์กฐ๊ฑด ์์) 09-1. ํํ
###Code
lst = [1, 2, 3]
lst
lst = [1, 2, 3]
lst[0]
type(lst)
tpl = (1, 2, 3)
tpl
tpl = (1, 2, 3)
tpl[0]
type(tpl)
lst = [1, 2, 3]
lst.append(3)
lst
lst = [1, 2, 3]
lst[0] = 7
lst
tpl = (1, 2, 3)
tpl.append(3)
tpl
tpl = (1, 2, 3)
tpl[0] = 7
tpl
###Output
_____no_output_____
###Markdown
09-2. ํํ์ ์ด๋๋ค ์ธ ๊ฒ์ธ๊ฐ?
###Code
frns = [['๋์', 131120], ['์ง์ฐ', 130312], ['์ ์', 130904]]
frns
frns[2]
frns = [('๋์', 131120), ('์ง์ฐ', 130312), ('์ ์', 130904)]
frns
frns[2]
###Output
_____no_output_____
###Markdown
09-3. ํํ ๊ด๋ จ ํจ์์ ์ฐ์ฐ๋ค
###Code
nums = (3, 2, 5, 7, 1)
len(nums) # ๊ฐ์ ๊ฐ์๋?
max(nums) # ์ต๋๊ฐ์?
min(nums) # ์ต์๊ฐ์?
nums = (1, 2, 3, 1, 2)
nums.count(2) # 2๊ฐ ๋ช ๋ฒ ๋ฑ์ฅํด?
nums.index(1) # ๊ฐ์ฅ ์์(์ผ์ชฝ์) ์ ์ฅ๋ 1์ ์ธ๋ฑ์ค ๊ฐ์?
nums = (1, 2, 3)
3 in nums # nums์ 3์ด ์๋?
2 not in nums # num์ 2๊ฐ ์๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ๊ณผ (4, 5) ๋ฅผ ํฉํ ์๋ก์ด ํํ ์์ฑ
###Code
nums + (4, 5) # num์ (4, 5)๋ฅผ ๋ง๋ถ์ธ ๊ฒฐ๊ณผ๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ ๋ ๊ฐ๋ฅผ ์ด์ด ๋์ ์๋ก์ด ํํ ์์ฑ
###Code
nums * 2 # nums๋ฅผ ๋ ๊ฐ ๋ง๋ถ์ธ ๊ฒฐ๊ณผ๋?
###Output
_____no_output_____
###Markdown
- nums์ ์ ์ฅ๋ ํํ์ ์ผ๋ถ๋ก๋ง ์ด๋ค์ง ์๋ก์ด ํํ ์์ฑ
###Code
nums[0:3] # nums[0] ~ num[2]์ ๊บผ๋ด๋ฉด?
for i in (1, 3, 5, 7, 9):
print(i, end = ' ')
###Output
_____no_output_____
###Markdown
09-4. ๋ง์ด ๋์จ ๊น์ ๋ฆฌ์คํธ ์์ ์ ์ฅ๋ ๋ฐ์ดํฐ๋ฅผ ๋ฐ๊ฟ๋ณด์
###Code
frns = [('๋์', 131120), ('์ง์ฐ', 130312), ('์ ์', 130904)]
frns[0][0]
frns[0][1]
frns[0][0] = '๋์ค'
frns[0][1] = 123456
frns[0] = ('๋์ค', 123456)
frns
###Output
_____no_output_____
###Markdown
09-5. ๋ฒ์๋ฅผ ์ง์ ํ๋ ๋ ์ธ์ง
###Code
for i in range(1, 11):
print(i, end = ' ')
r = range(1, 10)
type(r)
r = range(1, 10)
9 in r
10 not in r
list((1, 2, 3)) # ํํ์ ๋ฆฌ์คํธ๋ก
list(range(1, 5)) # ๋ ์ธ์ง๋ฅผ ๋ฆฌ์คํธ๋ก
list("Hello") # ๋ฌธ์์ด์ ๋ฆฌ์คํธ๋ก
tuple([1, 2, 3]) # ๋ฆฌ์คํธ๋ฅผ ํํ๋ก
tuple(range(1, 5)) # ๋ ์ธ์ง๋ฅผ ํํ๋ก
tuple("Hello") # ๋ฌธ์์ด์ ํํ๋ก
lst = list(range(1, 16))
lst
tpl = tuple(range(1, 16))
tpl
range(1, 10, 2) # 1๋ถํฐ 10 ์ด์ ๊น์ง 2์ฉ ์ฆ๊ฐํ๋ ๋ ์ธ์ง
range(1, 10, 3) # 1๋ถํฐ 10 ์ด์ ๊น์ง 3์ฉ ์ฆ๊ฐํ๋ ๋ ์ธ์ง
list(range(1, 10, 2)) # 1๋ถํฐ 10 ์ด์ ๊น์ง 2์ฉ ์ฆ๊ฐํ๋ ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
list(range(1, 10, 3)) # 1๋ถํฐ 10 ์ด์ ๊น์ง 3์ฉ ์ฆ๊ฐํ๋ ๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
###Output
_____no_output_____
###Markdown
09-6. ๋ ์ธ์ง ๋ฒ์ ๊ฑฐ๊พธ๋ก ์ง์ ํ๊ธฐ
###Code
list(range(2, 10))
list(range(2, 10, 1))
list(range(10, 2))
list(range(10, 2, 1))
list(range(10, 2, -1)) # 10๋ถํฐ 1์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
list(range(10, 2, -2)) # 10๋ถํฐ 2์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
list(range(10, 2, -3)) # 10๋ถํฐ 3์ฉ ๊ฐ์ํ์ฌ 3๊น์ง ์ด๋ฅด๋ ์ ์๋ค
###Output
_____no_output_____
###Markdown
๊ณผ์ - ๋ ์ธ์ง๋ ํ ์ค ์๊ฒ ๋์์ผ๋ ๋ฐฑ์ค ๋ฌธ์ ๋ฅผ ์ค์ง๊ฒ ํ์ด์ผ๊ฒ ์ง์?
###Code
###Output
_____no_output_____ |
Transfer_Learning_Lab.ipynb | ###Markdown
Lab: Transfer Learningtraining a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers.ไฝฟ็จImageNet้ขๅ
่ฎญ็ป็ๆๅผไฝไธบๅบ็กๆฅ่ฎญ็ป็ฝ็ป๏ผไฝๆฏ่ฆๆทปๅ ๆจ่ชๅทฑ็้ขๅค็ฝ็ปๅฑใไฝ ไนไผ็ๅฐๅจๆๆๅฑ้ขไธไฝฟ็จๅป็ปๆ้ๅๅจๆๆๅฑไธ่ฎญ็ป็ๅบๅซใ ๅป็ปๆ้ๅฝๅชๅฏนๆจกๅ่ฟ่กๅพฎ่ฐๆถ๏ผ้ๅธธไฝฟ็จๅป็ปๆๅผ๏ผๅ ไธบๅจ่ฎญ็ปๆ้ด๏ผๅๅไผ ๆญๅๆๅผๆดๆฐไธไผๅบ็จไบไปปไฝๅป็ปๅฑใๅฆๆไฝ ๆไธไธชImageNet pre-trainedๆจกๅ,ๅคง้จๅ็็ฝ็ปๅฏ่ฝ้็จไบไฝ ็ๆ
ๅต,ๆไปฅไฝ ๅฏ่ฝๅช้่ฆๅๆญ้กถ้จๅ
จๅฑ,ๅป็ปๆๆๅ
ถไปๅฑ,ๆๅๆทปๅ ไธไธชๆๅคไธชๅฑไธๅปๆง่กไธไบๅพฎ่ฐใ่ฟๆไธไธชไธๅป็ปๆ้็้้กน๏ผๅฎๅฐๅจImageNet้ข่ฎญ็ป็ๆ้(ๅฆๆ้็จ)ไธๅฏๅจๆจ็ๆจกๅ๏ผ็ถๅไป้ฃ้ๆง่ก่ฟไธๆญฅ็่ฎญ็ปใๅป็ป็ไธไธช้ขๅคๅฅฝๅค็ๆ้ไนไผๅจๅฝขๅผ็ๅ
ๅญไฝฟ็จๆ
ๅต,่ฎญ็ป้ๅบฆโโVGG็ญๆดๅคง็็ฝ็ป,ๆไธไธชๅคงๅคงๅคงๅ
ๅญไฝฟ็จๅ่พๆ
ข็้ๅบฆ,ๅฝๅฎ้่ฆๆง่กๅๅไผ ๆญๅ้้ๆดๆฐๆๆๅฑไธ่ไธๆฏไธๅฐ้จๅ(ๅฏ่ฝๆง่พๅฐ)ๅฑใๆณจๆ:ๅฆๆๆจ้ๅฐ้ฎ้ข๏ผๅฏไปฅ้่ฟๅๅปๅทฅไฝๅบๅทฆไธ่ง็Jupyterๅพฝๆ ๆพๅฐไธไธช่งฃๅณๆนๆก็ฌ่ฎฐๆฌใ
###Code
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
###Output
Using TensorFlow backend.
###Markdown
We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer. Pre-trained with frozen weightsTo start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).้ฆๅ
๏ผๆไปฌๅฐ็ๅฐๅจInceptionV3ๆจกๅไธญๅป็ปไบๆๆๆ้็ImageNet้ข่ฎญ็ปๆจกๅๆฏๅฆไฝๆง่ก็ใๆไปฌ่ฟๅฐๅ ้ค็ปๆๅฑๅนถๅจๅ
ถไธๆทปๅ ๆฐๅฑ๏ผๅฐฝ็ฎกๆจๅฏไปฅไฝฟ็จไธๅ็ๆนๆณ(ไธๆฏๅ ้ค็ปๆๅฑๅนถๆทปๅ ๆฐๅฑ๏ผ่ๆฏๅ ้คๆฏ่ฟ้ๆดๅค็ๅฑ๏ผ็ญ็ญ)ใYou can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
###Code
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
for layer in inception.layers:
layer.trainable = False
###Output
_____no_output_____
###Markdown
Dropping layersYou can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
###Code
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
inception.summary
###Output
_____no_output_____
###Markdown
In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:```inception.layers.pop()```Note that `pop()` works from the end of the model backwards. It's important to note two things here:1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work. Adding new layersNow, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).For example, if you had a previous layer named `inp`:```x = Dropout(0.2)(inp)```is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
###Code
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
x = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
#x = Dense(512, activation = 'relu')(x)
predictions = Dense(10, activation = 'softmax')(x)
###Output
_____no_output_____
###Markdown
We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
###Code
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
lambda_4 (Lambda) (None, 139, 139, 3) 0
_________________________________________________________________
inception_v3 (Model) (None, 3, 3, 2048) 21802784
_________________________________________________________________
global_average_pooling2d_3 ( (None, 2048) 0
_________________________________________________________________
dense_5 (Dense) (None, 10) 20490
=================================================================
Total params: 21,823,274
Trainable params: 20,490
Non-trainable params: 21,802,784
_________________________________________________________________
###Markdown
Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks.ไปไธๅผๅงๅฐฑๅๅปบไธไธชๆฐ็ๆจกๅไฝ็ณป็ปๆๆฏไธ้กนไผๅคง็ๅทฅไฝ!่ฏทๆณจๆ๏ผๅจInceptionV3ไนๅๆทปๅ ๅฑๅนถๅฐๅ
ถ่ฟฝๅ ๅฐๆซๅฐพ็ๆนๆณๆฏๅฆไฝไฝฟInceptionV3ๅจๆ่ฆไธญๆต็ผฉไธบไธ่ก็;ๅฆๆๆจไฝฟ็จInceptionๆจกๅ็ๆฎ้่พๅ
ฅ(ๆจๅฏไปฅไปinit .layers.inputไธญๆถ้ๅฎ)๏ผๅฎๅฐไผๅไปฅๅไธๆ ทๆพ็คบๆๆ็ๅฑใ็ฌ่ฎฐๆฌไธญๅฉไธ็ๅคง้จๅไปฃ็ ๅชๆฏ็จๆฅๅ ่ฝฝๆฐๆฎใๅฏนๅ
ถ่ฟ่ก้ขๅค็๏ผๅนถๅผๅงๅจKerasไธญ่ฟ่กๅน่ฎญ๏ผไธ่ฟ่ฟ้่ฟๆไธไธชๅพๅฅฝ็ๅฐๆน้่ฆ่ฏดๆโโKerasๅ่ฐใ Keras CallbacksKeras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.Kerasๅ่ฐๅ
่ฎธๆจๅจๅน่ฎญๆ้ดๆถ้ๅๅญๅจ้ขๅค็ไฟกๆฏ๏ผไพๅฆๆไฝณๆจกๅ๏ผๆ่
ๅฆๆ้ช่ฏ็ฒพๅบฆๅๆญขๆ้ซ๏ผ็่ณๅฏไปฅๆๅๅๆญขๅน่ฎญใ่ฟไบๆนๆณๅฏไปฅๅธฎๅฉ้ฟๅ
่ฟๆๅ๏ผๆ้ฟๅ
ๅ
ถไป้ฎ้ขใThere's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.่ฟ้่ฆๆๅฐไธคไธชๅ
ณ้ฎ็ๅ่ฐ๏ผModelCheckpointๅearly stopใ้กพๅๆไน๏ผๆจกๅๆฃๆฅ็นๆ นๆฎ็ปๅฎ็ๆๆ ไฟๅญๅฐ็ฎๅไธบๆญขๆๅฅฝ็ๆจกๅ๏ผ่ๅฆๆๆ้็ๆๆ ๅจ็ปๅฎ็ๆถ้ดๅไธๅๆนๅ๏ผ้ฃไนๆฉๆๅๆญขๅฐๅจๆๅฎ็ๆถ้ด็นไนๅ็ปๆๅน่ฎญใTo set these callbacks, you could do the following:่ฆ่ฎพ็ฝฎ่ฟไบๅ่ฐ๏ผๅฏไปฅๆง่กไปฅไธๆไฝ:```checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)```This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.่ฟๅฐๆ นๆฎ้ช่ฏๆๅคฑๅฐๆจกๅไฟๅญๅฐๆๅฎ็save_path๏ผๅนถไธๅชไฟๅญๆๅฅฝ็ๆจกๅใๅฆๆๆจๅฐsave_best_only่ฎพ็ฝฎไธบFalse๏ผ้ฃไนๆฏไธชๅๅ
้ฝๅฐไฟๅญๆจกๅ็ๅฆไธไธช็ๆฌใ```stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)```This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.่ฟๅฐ็ๆง้ช่ฏ็ๅ็กฎๆง๏ผๅฆๆๅฎๆฒกๆๆฏไนๅ5ไธชๆถๆ็ๆไฝณ้ช่ฏ็ฒพๅบฆไธ้่ถ
่ฟ0.0003๏ผ้ฃไนๅน่ฎญๅฐๆๅ็ปๆใYou still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):ๅฝๆจ่ฎญ็ปๆจกๅๆถ(่ฟๅๆๆๅ
ถไป็ธๅ
ณๆฐๆฎไธ่ตท)๏ผๆจไป็ถ้่ฆๅฎ้
ๅฐๅฐ่ฟไบๅ่ฐ่พๅ
ฅfit():```model.fit(callbacks=[checkpoint, stopper])```
###Code
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:2000]
y_one_hot_train = y_one_hot_train[:2000]
X_val = X_val[:400]
y_one_hot_val = y_one_hot_val[:400]
###Output
_____no_output_____
###Markdown
You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
###Code
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
###Output
Epoch 1/5
63/62 [==============================] - 233s - loss: 1.7256 - acc: 0.4137 - val_loss: 1.1955 - val_acc: 0.6650
Epoch 2/5
63/62 [==============================] - 228s - loss: 1.1022 - acc: 0.6409 - val_loss: 1.1578 - val_acc: 0.6200
Epoch 3/5
63/62 [==============================] - 226s - loss: 0.9580 - acc: 0.6835 - val_loss: 1.0928 - val_acc: 0.6500
Epoch 4/5
63/62 [==============================] - 226s - loss: 0.8361 - acc: 0.7187 - val_loss: 1.0487 - val_acc: 0.6675
Epoch 5/5
63/62 [==============================] - 225s - loss: 0.7097 - acc: 0.7684 - val_loss: 1.1868 - val_acc: 0.6425
###Markdown
Lab: Transfer LearningWelcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers. GPU usageIn our previous examples in this lesson, we've avoided using GPU, but this time around you'll have the option to enable it. You do not need it on to begin with, but make sure anytime you switch from non-GPU to GPU, or vice versa, that you save your notebook! If not, you'll likely be reverted to the previous checkpoint. We also suggest only using the GPU when performing the (mostly minor) training below - you'll want to conserve GPU hours for your Behavioral Cloning project coming up next!
###Code
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = None # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
###Output
Using TensorFlow backend.
###Markdown
We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer. Pre-trained with frozen weightsTo start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).You can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
###Code
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
for layer in inception.layers:
layer.trainable = False
###Output
_____no_output_____
###Markdown
Dropping layersYou can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
###Code
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
print(inception.summary())
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 139, 139, 3) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 69, 69, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 69, 69, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 69, 69, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 67, 67, 32) 9216 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 67, 67, 32) 96 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 67, 67, 32) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 67, 67, 64) 18432 activation_2[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 67, 67, 64) 192 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 67, 67, 64) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 33, 33, 64) 0 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 33, 33, 80) 5120 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 33, 33, 80) 240 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 33, 33, 80) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 31, 31, 192) 138240 activation_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 31, 31, 192) 576 conv2d_5[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 31, 31, 192) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 15, 15, 192) 0 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 15, 15, 64) 192 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 15, 15, 64) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 15, 15, 48) 9216 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 15, 15, 96) 55296 activation_9[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 15, 15, 48) 144 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 15, 15, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 15, 15, 48) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 15, 15, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 15, 15, 192) 0 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 15, 15, 64) 76800 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 15, 15, 96) 82944 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 15, 15, 32) 6144 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 15, 15, 64) 192 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 15, 15, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 15, 15, 96) 288 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 15, 15, 32) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 15, 15, 64) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 15, 15, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 15, 15, 96) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 15, 15, 32) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 15, 15, 256) 0 activation_6[0][0]
activation_8[0][0]
activation_11[0][0]
activation_12[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 15, 15, 64) 192 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 15, 15, 64) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 15, 15, 48) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 15, 15, 96) 55296 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 15, 15, 48) 144 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 15, 15, 96) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 15, 15, 48) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 15, 15, 96) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 15, 15, 256) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 15, 15, 64) 76800 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 15, 15, 96) 82944 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 15, 15, 64) 16384 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 15, 15, 64) 192 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 15, 15, 64) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 15, 15, 96) 288 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 15, 15, 64) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 15, 15, 64) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 15, 15, 64) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 15, 15, 96) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 15, 15, 64) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 15, 15, 288) 0 activation_13[0][0]
activation_15[0][0]
activation_18[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 15, 15, 64) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 15, 15, 64) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 15, 15, 48) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 15, 15, 96) 55296 activation_23[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 15, 15, 48) 144 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 15, 15, 96) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 15, 15, 48) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 15, 15, 96) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 15, 15, 288) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 15, 15, 64) 76800 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 15, 15, 96) 82944 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 15, 15, 64) 18432 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 15, 15, 64) 192 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 15, 15, 64) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 15, 15, 96) 288 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 15, 15, 64) 192 conv2d_26[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 15, 15, 64) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 15, 15, 64) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 15, 15, 96) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 15, 15, 64) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 15, 15, 288) 0 activation_20[0][0]
activation_22[0][0]
activation_25[0][0]
activation_26[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 15, 15, 64) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 15, 15, 64) 192 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 15, 15, 64) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 15, 15, 96) 55296 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 15, 15, 96) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 15, 15, 96) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 7, 7, 384) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 7, 7, 96) 82944 activation_29[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 7, 7, 384) 1152 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 7, 7, 96) 288 conv2d_30[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 7, 7, 384) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 7, 7, 96) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 7, 7, 288) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 7, 7, 768) 0 activation_27[0][0]
activation_30[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 7, 7, 128) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 7, 7, 128) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 7, 7, 128) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 7, 7, 128) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 7, 7, 128) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 7, 7, 128) 114688 activation_36[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 7, 7, 128) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 7, 7, 128) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 7, 7, 128) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 7, 7, 128) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 7, 7, 128) 114688 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 7, 7, 128) 114688 activation_37[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 7, 7, 128) 384 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 7, 7, 128) 384 conv2d_38[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 7, 7, 128) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 7, 7, 128) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 7, 7, 768) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 7, 7, 192) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 7, 7, 192) 172032 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 7, 7, 192) 172032 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 7, 7, 192) 576 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 7, 7, 192) 576 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 7, 7, 192) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 7, 7, 192) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 7, 7, 192) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 7, 7, 192) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 7, 7, 192) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 7, 7, 192) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 7, 7, 768) 0 activation_31[0][0]
activation_34[0][0]
activation_39[0][0]
activation_40[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 7, 7, 160) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 7, 7, 160) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 7, 7, 160) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 7, 7, 160) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 7, 7, 160) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 7, 7, 160) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 7, 7, 160) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 7, 7, 160) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 7, 7, 160) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 7, 7, 160) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 7, 7, 160) 179200 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 7, 7, 160) 179200 activation_47[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 7, 7, 160) 480 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 7, 7, 160) 480 conv2d_48[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 7, 7, 160) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 7, 7, 160) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 7, 7, 768) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 7, 7, 192) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 7, 7, 192) 215040 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 7, 7, 192) 215040 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 7, 7, 192) 576 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 7, 7, 192) 576 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 7, 7, 192) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 7, 7, 192) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 7, 7, 192) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 7, 7, 192) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 7, 7, 192) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 7, 7, 192) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 7, 7, 768) 0 activation_41[0][0]
activation_44[0][0]
activation_49[0][0]
activation_50[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 7, 7, 160) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 7, 7, 160) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 7, 7, 160) 179200 activation_55[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 7, 7, 160) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 7, 7, 160) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 7, 7, 160) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 7, 7, 160) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 7, 7, 160) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 7, 7, 160) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 7, 7, 160) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 7, 7, 160) 179200 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 7, 7, 160) 179200 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 7, 7, 160) 480 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 7, 7, 160) 480 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 7, 7, 160) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 7, 7, 160) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 7, 7, 768) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 7, 7, 192) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 7, 7, 192) 215040 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 7, 7, 192) 215040 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 7, 7, 192) 576 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 7, 7, 192) 576 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 7, 7, 192) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 7, 7, 192) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 7, 7, 192) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 7, 7, 192) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 7, 7, 192) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 7, 7, 192) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 7, 7, 768) 0 activation_51[0][0]
activation_54[0][0]
activation_59[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 7, 7, 192) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 7, 7, 192) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 7, 7, 192) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 7, 7, 192) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 7, 7, 192) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 7, 7, 192) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 7, 7, 192) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 7, 7, 192) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 7, 7, 192) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 7, 7, 192) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 7, 7, 192) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 7, 7, 192) 258048 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 7, 7, 192) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 7, 7, 192) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 7, 7, 192) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 7, 7, 192) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 7, 7, 768) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 7, 7, 192) 258048 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 7, 7, 192) 258048 activation_68[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 7, 7, 192) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 7, 7, 192) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 7, 7, 192) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 7, 7, 192) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 7, 7, 192) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 7, 7, 192) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 7, 7, 192) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 7, 7, 192) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_61[0][0]
activation_64[0][0]
activation_69[0][0]
activation_70[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 7, 7, 192) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 7, 7, 192) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 7, 7, 192) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 7, 7, 192) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 7, 7, 192) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 7, 7, 192) 258048 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 7, 7, 192) 576 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 7, 7, 192) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 7, 7, 192) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 7, 7, 192) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 3, 3, 320) 552960 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 3, 3, 192) 331776 activation_75[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 3, 3, 320) 960 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 3, 3, 192) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 3, 3, 320) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 3, 3, 192) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 3, 3, 768) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 3, 3, 1280) 0 activation_72[0][0]
activation_76[0][0]
max_pooling2d_4[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 3, 3, 448) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 3, 3, 448) 1344 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 3, 3, 448) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 3, 3, 384) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 3, 3, 384) 1548288 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 3, 3, 384) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 3, 3, 384) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 3, 3, 384) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 3, 3, 384) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 3, 3, 1280) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 3, 3, 320) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 3, 3, 384) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 3, 3, 384) 1152 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 3, 3, 384) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 3, 3, 384) 1152 conv2d_84[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 3, 3, 192) 245760 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 3, 3, 320) 960 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 3, 3, 384) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 3, 3, 384) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 3, 3, 384) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 3, 3, 384) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 3, 3, 192) 576 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 3, 3, 320) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 3, 3, 768) 0 activation_79[0][0]
activation_80[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 3, 3, 768) 0 activation_83[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 3, 3, 192) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
mixed9 (Concatenate) (None, 3, 3, 2048) 0 activation_77[0][0]
mixed9_0[0][0]
concatenate_1[0][0]
activation_85[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 3, 3, 448) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 3, 3, 448) 1344 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 3, 3, 448) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 3, 3, 384) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 3, 3, 384) 1548288 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 3, 3, 384) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 3, 3, 384) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 3, 3, 384) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 3, 3, 384) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 3, 3, 2048) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 3, 3, 320) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 3, 3, 384) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 3, 3, 384) 1152 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 3, 3, 384) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 3, 3, 384) 1152 conv2d_93[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 3, 3, 192) 393216 average_pooling2d_9[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 3, 3, 320) 960 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 3, 3, 384) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 3, 3, 384) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 3, 3, 384) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 3, 3, 384) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 3, 3, 192) 576 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 3, 3, 320) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_88[0][0]
activation_89[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 3, 3, 768) 0 activation_92[0][0]
activation_93[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 3, 3, 192) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_86[0][0]
mixed9_1[0][0]
concatenate_2[0][0]
activation_94[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
None
###Markdown
In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:```inception.layers.pop()```Note that `pop()` works from the end of the model backwards. It's important to note two things here:1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work. Adding new layersNow, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).For example, if you had a previous layer named `inp`:```x = Dropout(0.2)(inp)```is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
###Code
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
out = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
x = Dense(512, activation=tf.nn.relu)(out)
predictions = Dense(10,activation=tf.nn.softmax)(x)
###Output
_____no_output_____
###Markdown
We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
###Code
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
lambda_1 (Lambda) (None, 139, 139, 3) 0
_________________________________________________________________
inception_v3 (Model) (None, 3, 3, 2048) 21802784
_________________________________________________________________
global_average_pooling2d_1 ( (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
=================================================================
Total params: 22,857,002
Trainable params: 1,054,218
Non-trainable params: 21,802,784
_________________________________________________________________
###Markdown
Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks. Keras CallbacksKeras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.There's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.To set these callbacks, you could do the following:```checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)```This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.```stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)```This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.You still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):```model.fit(callbacks=[checkpoint, stopper])``` GPU timeThe rest of the notebook will give you the code for training, so you can turn on the GPU at this point - but first, **make sure to save your jupyter notebook**. Once the GPU is turned on, it will load whatever your last notebook checkpoint is. While we suggest reading through the code below to make sure you understand it, you can otherwise go ahead and select *Cell > Run All* (or *Kernel > Restart & Run All* if already using GPU) to run through all cells in the notebook.
###Code
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:10000]
y_one_hot_train = y_one_hot_train[:10000]
X_val = X_val[:2000]
y_one_hot_val = y_one_hot_val[:2000]
###Output
_____no_output_____
###Markdown
You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
###Code
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
###Output
Epoch 1/5
313/312 [==============================] - 52s 166ms/step - loss: 2.2473 - acc: 0.1694 - val_loss: 2.4345 - val_acc: 0.1305
Epoch 2/5
313/312 [==============================] - 48s 154ms/step - loss: 2.0575 - acc: 0.2298 - val_loss: 1.9968 - val_acc: 0.2485
Epoch 3/5
313/312 [==============================] - 48s 154ms/step - loss: 2.0088 - acc: 0.2480 - val_loss: 1.9359 - val_acc: 0.2715
Epoch 4/5
313/312 [==============================] - 48s 154ms/step - loss: 1.9933 - acc: 0.2555 - val_loss: 1.9452 - val_acc: 0.2790
Epoch 5/5
313/312 [==============================] - 48s 154ms/step - loss: 1.9740 - acc: 0.2688 - val_loss: 1.8939 - val_acc: 0.2880
###Markdown
Lab: Transfer LearningWelcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers. GPU usageIn our previous examples in this lesson, we've avoided using GPU, but this time around you'll have the option to enable it. You do not need it on to begin with, but make sure anytime you switch from non-GPU to GPU, or vice versa, that you save your notebook! If not, you'll likely be reverted to the previous checkpoint. We also suggest only using the GPU when performing the (mostly minor) training below - you'll want to conserve GPU hours for your Behavioral Cloning project coming up next!
###Code
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
###Output
Using TensorFlow backend.
###Markdown
We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer. Pre-trained with frozen weightsTo start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).You can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
###Code
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
layers = inception.layers
for i in range(len(layers)):
layers[i].trainable = False
print('All {} layer weights frozen'.format(len(layers)))
###Output
All 311 layer weights frozen
###Markdown
Dropping layersYou can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
###Code
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
print(inception.summary())
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 139, 139, 3) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 69, 69, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 69, 69, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 69, 69, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 67, 67, 32) 9216 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 67, 67, 32) 96 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 67, 67, 32) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 67, 67, 64) 18432 activation_2[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 67, 67, 64) 192 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 67, 67, 64) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 33, 33, 64) 0 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 33, 33, 80) 5120 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 33, 33, 80) 240 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 33, 33, 80) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 31, 31, 192) 138240 activation_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 31, 31, 192) 576 conv2d_5[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 31, 31, 192) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 15, 15, 192) 0 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 15, 15, 64) 192 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 15, 15, 64) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 15, 15, 48) 9216 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 15, 15, 96) 55296 activation_9[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 15, 15, 48) 144 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 15, 15, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 15, 15, 48) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 15, 15, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 15, 15, 192) 0 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 15, 15, 64) 76800 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 15, 15, 96) 82944 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 15, 15, 32) 6144 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 15, 15, 64) 192 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 15, 15, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 15, 15, 96) 288 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 15, 15, 32) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 15, 15, 64) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 15, 15, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 15, 15, 96) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 15, 15, 32) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 15, 15, 256) 0 activation_6[0][0]
activation_8[0][0]
activation_11[0][0]
activation_12[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 15, 15, 64) 192 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 15, 15, 64) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 15, 15, 48) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 15, 15, 96) 55296 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 15, 15, 48) 144 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 15, 15, 96) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 15, 15, 48) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 15, 15, 96) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 15, 15, 256) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 15, 15, 64) 76800 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 15, 15, 96) 82944 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 15, 15, 64) 16384 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 15, 15, 64) 192 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 15, 15, 64) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 15, 15, 96) 288 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 15, 15, 64) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 15, 15, 64) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 15, 15, 64) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 15, 15, 96) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 15, 15, 64) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 15, 15, 288) 0 activation_13[0][0]
activation_15[0][0]
activation_18[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 15, 15, 64) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 15, 15, 64) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 15, 15, 48) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 15, 15, 96) 55296 activation_23[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 15, 15, 48) 144 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 15, 15, 96) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 15, 15, 48) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 15, 15, 96) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 15, 15, 288) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 15, 15, 64) 76800 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 15, 15, 96) 82944 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 15, 15, 64) 18432 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 15, 15, 64) 192 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 15, 15, 64) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 15, 15, 96) 288 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 15, 15, 64) 192 conv2d_26[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 15, 15, 64) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 15, 15, 64) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 15, 15, 96) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 15, 15, 64) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 15, 15, 288) 0 activation_20[0][0]
activation_22[0][0]
activation_25[0][0]
activation_26[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 15, 15, 64) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 15, 15, 64) 192 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 15, 15, 64) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 15, 15, 96) 55296 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 15, 15, 96) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 15, 15, 96) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 7, 7, 384) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 7, 7, 96) 82944 activation_29[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 7, 7, 384) 1152 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 7, 7, 96) 288 conv2d_30[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 7, 7, 384) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 7, 7, 96) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 7, 7, 288) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 7, 7, 768) 0 activation_27[0][0]
activation_30[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 7, 7, 128) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 7, 7, 128) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 7, 7, 128) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 7, 7, 128) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 7, 7, 128) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 7, 7, 128) 114688 activation_36[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 7, 7, 128) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 7, 7, 128) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 7, 7, 128) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 7, 7, 128) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 7, 7, 128) 114688 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 7, 7, 128) 114688 activation_37[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 7, 7, 128) 384 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 7, 7, 128) 384 conv2d_38[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 7, 7, 128) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 7, 7, 128) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 7, 7, 768) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 7, 7, 192) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 7, 7, 192) 172032 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 7, 7, 192) 172032 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 7, 7, 192) 576 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 7, 7, 192) 576 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 7, 7, 192) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 7, 7, 192) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 7, 7, 192) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 7, 7, 192) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 7, 7, 192) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 7, 7, 192) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 7, 7, 768) 0 activation_31[0][0]
activation_34[0][0]
activation_39[0][0]
activation_40[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 7, 7, 160) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 7, 7, 160) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 7, 7, 160) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 7, 7, 160) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 7, 7, 160) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 7, 7, 160) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 7, 7, 160) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 7, 7, 160) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 7, 7, 160) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 7, 7, 160) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 7, 7, 160) 179200 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 7, 7, 160) 179200 activation_47[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 7, 7, 160) 480 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 7, 7, 160) 480 conv2d_48[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 7, 7, 160) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 7, 7, 160) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 7, 7, 768) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 7, 7, 192) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 7, 7, 192) 215040 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 7, 7, 192) 215040 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 7, 7, 192) 576 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 7, 7, 192) 576 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 7, 7, 192) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 7, 7, 192) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 7, 7, 192) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 7, 7, 192) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 7, 7, 192) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 7, 7, 192) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 7, 7, 768) 0 activation_41[0][0]
activation_44[0][0]
activation_49[0][0]
activation_50[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 7, 7, 160) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 7, 7, 160) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 7, 7, 160) 179200 activation_55[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 7, 7, 160) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 7, 7, 160) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 7, 7, 160) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 7, 7, 160) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 7, 7, 160) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 7, 7, 160) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 7, 7, 160) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 7, 7, 160) 179200 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 7, 7, 160) 179200 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 7, 7, 160) 480 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 7, 7, 160) 480 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 7, 7, 160) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 7, 7, 160) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 7, 7, 768) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 7, 7, 192) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 7, 7, 192) 215040 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 7, 7, 192) 215040 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 7, 7, 192) 576 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 7, 7, 192) 576 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 7, 7, 192) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 7, 7, 192) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 7, 7, 192) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 7, 7, 192) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 7, 7, 192) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 7, 7, 192) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 7, 7, 768) 0 activation_51[0][0]
activation_54[0][0]
activation_59[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 7, 7, 192) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 7, 7, 192) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 7, 7, 192) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 7, 7, 192) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 7, 7, 192) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 7, 7, 192) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 7, 7, 192) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 7, 7, 192) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 7, 7, 192) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 7, 7, 192) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 7, 7, 192) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 7, 7, 192) 258048 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 7, 7, 192) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 7, 7, 192) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 7, 7, 192) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 7, 7, 192) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 7, 7, 768) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 7, 7, 192) 258048 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 7, 7, 192) 258048 activation_68[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 7, 7, 192) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 7, 7, 192) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 7, 7, 192) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 7, 7, 192) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 7, 7, 192) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 7, 7, 192) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 7, 7, 192) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 7, 7, 192) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_61[0][0]
activation_64[0][0]
activation_69[0][0]
activation_70[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 7, 7, 192) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 7, 7, 192) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 7, 7, 192) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 7, 7, 192) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 7, 7, 192) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 7, 7, 192) 258048 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 7, 7, 192) 576 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 7, 7, 192) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 7, 7, 192) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 7, 7, 192) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 3, 3, 320) 552960 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 3, 3, 192) 331776 activation_75[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 3, 3, 320) 960 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 3, 3, 192) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 3, 3, 320) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 3, 3, 192) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 3, 3, 768) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 3, 3, 1280) 0 activation_72[0][0]
activation_76[0][0]
max_pooling2d_4[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 3, 3, 448) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 3, 3, 448) 1344 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 3, 3, 448) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 3, 3, 384) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 3, 3, 384) 1548288 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 3, 3, 384) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 3, 3, 384) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 3, 3, 384) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 3, 3, 384) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 3, 3, 1280) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 3, 3, 320) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 3, 3, 384) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 3, 3, 384) 1152 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 3, 3, 384) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 3, 3, 384) 1152 conv2d_84[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 3, 3, 192) 245760 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 3, 3, 320) 960 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 3, 3, 384) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 3, 3, 384) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 3, 3, 384) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 3, 3, 384) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 3, 3, 192) 576 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 3, 3, 320) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 3, 3, 768) 0 activation_79[0][0]
activation_80[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 3, 3, 768) 0 activation_83[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 3, 3, 192) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
mixed9 (Concatenate) (None, 3, 3, 2048) 0 activation_77[0][0]
mixed9_0[0][0]
concatenate_1[0][0]
activation_85[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 3, 3, 448) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 3, 3, 448) 1344 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 3, 3, 448) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 3, 3, 384) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 3, 3, 384) 1548288 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 3, 3, 384) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 3, 3, 384) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 3, 3, 384) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 3, 3, 384) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 3, 3, 2048) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 3, 3, 320) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 3, 3, 384) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 3, 3, 384) 1152 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 3, 3, 384) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 3, 3, 384) 1152 conv2d_93[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 3, 3, 192) 393216 average_pooling2d_9[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 3, 3, 320) 960 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 3, 3, 384) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 3, 3, 384) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 3, 3, 384) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 3, 3, 384) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 3, 3, 192) 576 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 3, 3, 320) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_88[0][0]
activation_89[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 3, 3, 768) 0 activation_92[0][0]
activation_93[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 3, 3, 192) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_86[0][0]
mixed9_1[0][0]
concatenate_2[0][0]
activation_94[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
None
###Markdown
In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:```inception.layers.pop()```Note that `pop()` works from the end of the model backwards. It's important to note two things here:1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work. Adding new layersNow, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).For example, if you had a previous layer named `inp`:```x = Dropout(0.2)(inp)```is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
###Code
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
x = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
x=Dense(512,activation='relu')(x)
predictions=Dense(10,activation='softmax')(x)
###Output
_____no_output_____
###Markdown
We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
###Code
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
lambda_1 (Lambda) (None, 139, 139, 3) 0
_________________________________________________________________
inception_v3 (Model) (None, 3, 3, 2048) 21802784
_________________________________________________________________
global_average_pooling2d_1 ( (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
=================================================================
Total params: 22,857,002
Trainable params: 1,054,218
Non-trainable params: 21,802,784
_________________________________________________________________
###Markdown
Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks. Keras CallbacksKeras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.There's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.To set these callbacks, you could do the following:```checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)```This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.```stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)```This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.You still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):```model.fit(callbacks=[checkpoint, stopper])``` GPU timeThe rest of the notebook will give you the code for training, so you can turn on the GPU at this point - but first, **make sure to save your jupyter notebook**. Once the GPU is turned on, it will load whatever your last notebook checkpoint is. While we suggest reading through the code below to make sure you understand it, you can otherwise go ahead and select *Cell > Run All* (or *Kernel > Restart & Run All* if already using GPU) to run through all cells in the notebook.
###Code
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:10000]
y_one_hot_train = y_one_hot_train[:10000]
X_val = X_val[:2000]
y_one_hot_val = y_one_hot_val[:2000]
###Output
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 3s 0us/step
###Markdown
You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
###Code
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
###Output
Epoch 1/5
313/312 [==============================] - 51s 163ms/step - loss: 1.2886 - acc: 0.5687 - val_loss: 0.9030 - val_acc: 0.7180
Epoch 2/5
313/312 [==============================] - 44s 142ms/step - loss: 0.9635 - acc: 0.6691 - val_loss: 0.9498 - val_acc: 0.6900
Epoch 3/5
313/312 [==============================] - 44s 142ms/step - loss: 0.8921 - acc: 0.6943 - val_loss: 0.9054 - val_acc: 0.6975
Epoch 4/5
313/312 [==============================] - 44s 142ms/step - loss: 0.8179 - acc: 0.7214 - val_loss: 0.9029 - val_acc: 0.7145
Epoch 5/5
313/312 [==============================] - 44s 141ms/step - loss: 0.7912 - acc: 0.7230 - val_loss: 0.9993 - val_acc: 0.6965
###Markdown
As you may have noticed, CIFAR-10 is a fairly tough dataset. However, given that we are only training on a small subset of the data, only training for five epochs, and not using any image augmentation, the results are still fairly impressive!We achieved ~70% validation accuracy here, although your results may vary. [Optional] Test without frozen weights, or by training from scratch.Since the majority of the model was frozen above, training speed is pretty quick. You may also want to check out the training speed, as well as final accuracy, if you don't freeze the weights. Note that this can be fairly slow, so we're marking this as optional in order to conserve GPU time. If you do want to see the results from doing so, go back to the first code cell and set `freeze_flag` to `False`. If you want to completely train from scratch without ImageNet pre-trained weights, follow the previous step as well as setting `weights_flag` to `None`. Then, go to *Kernel > Restart & Run All*. ComparisonSo that you don't use up your GPU time, we've tried out these results ourselves as well.Training Mode | Val Acc @ 1 epoch | Val Acc @ 5 epoch | Time per epoch---- | :----: | :----: | ----:Frozen weights | 65.5% | 70.3% | 50 secondsUnfrozen weights | 50.6% | 71.6% | 142 secondsNo pre-trained weights | 19.2% | 39.2% | 142 secondsFrom the above, we can see that the pre-trained model with frozen weights actually began converging the fastest (already at 65.5% after 1 epoch), while the model re-training from the pre-trained weights slightly edged it out after 5 epochs.However, this does not tell the whole story - the training accuracy was substantially higher, nearing 87% for the unfrozen weights model. It actually began overfit the data much more under this method. We would likely be able to counteract some of this issue by using data augmentation. On the flip side, the model using frozen weights could also have been improved by actually only freezing a portion of the weights; some of these are likely more specific to ImageNet classes as it gets later in the network, as opposed to the simpler features extracted early in the network. The Power of Transfer LearningComparing the last line to the other two really shows the power of transfer learning. After five epochs, a model without ImageNet pre-training had only achieved 39.2% accuracy, compared to over 70% for the other two. As such, pre-training the network has saved substantial time, especially given the additional training time needed when the weights are not frozen.There is also evidence found in various research that pre-training on ImageNet weights will result in a higher overall accuracy than completely training from scratch, even when using a substantially different dataset.
###Code
# Read data
import csv
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from keras.models import Sequential
from keras.layers import Input,Flatten,Dense,Lambda,Cropping2D,Convolution2D,Dropout,MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
import sklearn
# Setting up the model
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,20), (0,0))))
model.add(Convolution2D(24,(5,5),subsample=(2,2),activation='relu'))
model.add(Convolution2D(36,(5,5),subsample=(2,2),activation='relu'))
model.add(Convolution2D(48,(5,5),subsample=(2,2),activation='relu'))
model.add(Convolution2D(64,(3,3),activation='relu'))
model.add(Dropout(0.75))
model.add(Convolution2D(64,(3,3),activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
print(model.summary())
img = plt.imread('data/IMG/left_2016_12_01_13_30_48_287.jpg')
plt.imshow(img)
flipped = np.fliplr(img)
plt.imsave('examples/left_camera.jpg',img)
plt.imsave('examples/left_camera_flipped.jpg',flipped)
###Output
_____no_output_____
###Markdown
Lab: Transfer LearningWelcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers. GPU usageIn our previous examples in this lesson, we've avoided using GPU, but this time around you'll have the option to enable it. You do not need it on to begin with, but make sure anytime you switch from non-GPU to GPU, or vice versa, that you save your notebook! If not, you'll likely be reverted to the previous checkpoint. We also suggest only using the GPU when performing the (mostly minor) training below - you'll want to conserve GPU hours for your Behavioral Cloning project coming up next!
###Code
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
###Output
Using TensorFlow backend.
###Markdown
We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer. Pre-trained with frozen weightsTo start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).You can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
###Code
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
for layer in inception.layers:
layer.trainable = False
###Output
_____no_output_____
###Markdown
Dropping layersYou can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
###Code
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
inception.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 139, 139, 3) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 69, 69, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 69, 69, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 69, 69, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 67, 67, 32) 9216 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 67, 67, 32) 96 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 67, 67, 32) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 67, 67, 64) 18432 activation_2[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 67, 67, 64) 192 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 67, 67, 64) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 33, 33, 64) 0 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 33, 33, 80) 5120 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 33, 33, 80) 240 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 33, 33, 80) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 31, 31, 192) 138240 activation_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 31, 31, 192) 576 conv2d_5[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 31, 31, 192) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 15, 15, 192) 0 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 15, 15, 64) 192 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 15, 15, 64) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 15, 15, 48) 9216 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 15, 15, 96) 55296 activation_9[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 15, 15, 48) 144 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 15, 15, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 15, 15, 48) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 15, 15, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 15, 15, 192) 0 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 15, 15, 64) 76800 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 15, 15, 96) 82944 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 15, 15, 32) 6144 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 15, 15, 64) 192 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 15, 15, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 15, 15, 96) 288 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 15, 15, 32) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 15, 15, 64) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 15, 15, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 15, 15, 96) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 15, 15, 32) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 15, 15, 256) 0 activation_6[0][0]
activation_8[0][0]
activation_11[0][0]
activation_12[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 15, 15, 64) 192 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 15, 15, 64) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 15, 15, 48) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 15, 15, 96) 55296 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 15, 15, 48) 144 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 15, 15, 96) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 15, 15, 48) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 15, 15, 96) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 15, 15, 256) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 15, 15, 64) 76800 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 15, 15, 96) 82944 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 15, 15, 64) 16384 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 15, 15, 64) 192 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 15, 15, 64) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 15, 15, 96) 288 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 15, 15, 64) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 15, 15, 64) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 15, 15, 64) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 15, 15, 96) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 15, 15, 64) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 15, 15, 288) 0 activation_13[0][0]
activation_15[0][0]
activation_18[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 15, 15, 64) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 15, 15, 64) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 15, 15, 48) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 15, 15, 96) 55296 activation_23[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 15, 15, 48) 144 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 15, 15, 96) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 15, 15, 48) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 15, 15, 96) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 15, 15, 288) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 15, 15, 64) 76800 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 15, 15, 96) 82944 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 15, 15, 64) 18432 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 15, 15, 64) 192 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 15, 15, 64) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 15, 15, 96) 288 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 15, 15, 64) 192 conv2d_26[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 15, 15, 64) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 15, 15, 64) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 15, 15, 96) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 15, 15, 64) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 15, 15, 288) 0 activation_20[0][0]
activation_22[0][0]
activation_25[0][0]
activation_26[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 15, 15, 64) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 15, 15, 64) 192 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 15, 15, 64) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 15, 15, 96) 55296 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 15, 15, 96) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 15, 15, 96) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 7, 7, 384) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 7, 7, 96) 82944 activation_29[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 7, 7, 384) 1152 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 7, 7, 96) 288 conv2d_30[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 7, 7, 384) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 7, 7, 96) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 7, 7, 288) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 7, 7, 768) 0 activation_27[0][0]
activation_30[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 7, 7, 128) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 7, 7, 128) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 7, 7, 128) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 7, 7, 128) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 7, 7, 128) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 7, 7, 128) 114688 activation_36[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 7, 7, 128) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 7, 7, 128) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 7, 7, 128) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 7, 7, 128) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 7, 7, 128) 114688 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 7, 7, 128) 114688 activation_37[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 7, 7, 128) 384 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 7, 7, 128) 384 conv2d_38[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 7, 7, 128) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 7, 7, 128) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 7, 7, 768) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 7, 7, 192) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 7, 7, 192) 172032 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 7, 7, 192) 172032 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 7, 7, 192) 576 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 7, 7, 192) 576 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 7, 7, 192) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 7, 7, 192) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 7, 7, 192) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 7, 7, 192) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 7, 7, 192) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 7, 7, 192) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 7, 7, 768) 0 activation_31[0][0]
activation_34[0][0]
activation_39[0][0]
activation_40[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 7, 7, 160) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 7, 7, 160) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 7, 7, 160) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 7, 7, 160) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 7, 7, 160) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 7, 7, 160) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 7, 7, 160) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 7, 7, 160) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 7, 7, 160) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 7, 7, 160) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 7, 7, 160) 179200 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 7, 7, 160) 179200 activation_47[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 7, 7, 160) 480 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 7, 7, 160) 480 conv2d_48[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 7, 7, 160) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 7, 7, 160) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 7, 7, 768) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 7, 7, 192) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 7, 7, 192) 215040 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 7, 7, 192) 215040 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 7, 7, 192) 576 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 7, 7, 192) 576 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 7, 7, 192) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 7, 7, 192) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 7, 7, 192) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 7, 7, 192) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 7, 7, 192) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 7, 7, 192) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 7, 7, 768) 0 activation_41[0][0]
activation_44[0][0]
activation_49[0][0]
activation_50[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 7, 7, 160) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 7, 7, 160) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 7, 7, 160) 179200 activation_55[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 7, 7, 160) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 7, 7, 160) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 7, 7, 160) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 7, 7, 160) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 7, 7, 160) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 7, 7, 160) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 7, 7, 160) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 7, 7, 160) 179200 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 7, 7, 160) 179200 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 7, 7, 160) 480 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 7, 7, 160) 480 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 7, 7, 160) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 7, 7, 160) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 7, 7, 768) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 7, 7, 192) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 7, 7, 192) 215040 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 7, 7, 192) 215040 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 7, 7, 192) 576 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 7, 7, 192) 576 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 7, 7, 192) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 7, 7, 192) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 7, 7, 192) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 7, 7, 192) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 7, 7, 192) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 7, 7, 192) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 7, 7, 768) 0 activation_51[0][0]
activation_54[0][0]
activation_59[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 7, 7, 192) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 7, 7, 192) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 7, 7, 192) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 7, 7, 192) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 7, 7, 192) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 7, 7, 192) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 7, 7, 192) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 7, 7, 192) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 7, 7, 192) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 7, 7, 192) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 7, 7, 192) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 7, 7, 192) 258048 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 7, 7, 192) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 7, 7, 192) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 7, 7, 192) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 7, 7, 192) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 7, 7, 768) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 7, 7, 192) 258048 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 7, 7, 192) 258048 activation_68[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 7, 7, 192) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 7, 7, 192) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 7, 7, 192) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 7, 7, 192) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 7, 7, 192) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 7, 7, 192) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 7, 7, 192) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 7, 7, 192) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_61[0][0]
activation_64[0][0]
activation_69[0][0]
activation_70[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 7, 7, 192) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 7, 7, 192) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 7, 7, 192) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 7, 7, 192) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 7, 7, 192) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 7, 7, 192) 258048 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 7, 7, 192) 576 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 7, 7, 192) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 7, 7, 192) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 7, 7, 192) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 3, 3, 320) 552960 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 3, 3, 192) 331776 activation_75[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 3, 3, 320) 960 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 3, 3, 192) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 3, 3, 320) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 3, 3, 192) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 3, 3, 768) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 3, 3, 1280) 0 activation_72[0][0]
activation_76[0][0]
max_pooling2d_4[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 3, 3, 448) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 3, 3, 448) 1344 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 3, 3, 448) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 3, 3, 384) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 3, 3, 384) 1548288 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 3, 3, 384) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 3, 3, 384) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 3, 3, 384) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 3, 3, 384) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 3, 3, 1280) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 3, 3, 320) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 3, 3, 384) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 3, 3, 384) 1152 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 3, 3, 384) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 3, 3, 384) 1152 conv2d_84[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 3, 3, 192) 245760 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 3, 3, 320) 960 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 3, 3, 384) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 3, 3, 384) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 3, 3, 384) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 3, 3, 384) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 3, 3, 192) 576 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 3, 3, 320) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 3, 3, 768) 0 activation_79[0][0]
activation_80[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 3, 3, 768) 0 activation_83[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 3, 3, 192) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
mixed9 (Concatenate) (None, 3, 3, 2048) 0 activation_77[0][0]
mixed9_0[0][0]
concatenate_1[0][0]
activation_85[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 3, 3, 448) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 3, 3, 448) 1344 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 3, 3, 448) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 3, 3, 384) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 3, 3, 384) 1548288 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 3, 3, 384) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 3, 3, 384) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 3, 3, 384) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 3, 3, 384) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 3, 3, 2048) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 3, 3, 320) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 3, 3, 384) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 3, 3, 384) 1152 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 3, 3, 384) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 3, 3, 384) 1152 conv2d_93[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 3, 3, 192) 393216 average_pooling2d_9[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 3, 3, 320) 960 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 3, 3, 384) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 3, 3, 384) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 3, 3, 384) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 3, 3, 384) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 3, 3, 192) 576 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 3, 3, 320) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_88[0][0]
activation_89[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 3, 3, 768) 0 activation_92[0][0]
activation_93[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 3, 3, 192) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_86[0][0]
mixed9_1[0][0]
concatenate_2[0][0]
activation_94[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
###Markdown
In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:```inception.layers.pop()```Note that `pop()` works from the end of the model backwards. It's important to note two things here:1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work. Adding new layersNow, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).For example, if you had a previous layer named `inp`:```x = Dropout(0.2)(inp)```is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
###Code
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
x = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
x = Dense(512, activation = 'relu')(x)
predictions = Dense(10, activation = 'softmax')(x)
###Output
_____no_output_____
###Markdown
We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
###Code
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
lambda_1 (Lambda) (None, 139, 139, 3) 0
_________________________________________________________________
inception_v3 (Model) (None, 3, 3, 2048) 21802784
_________________________________________________________________
global_average_pooling2d_1 ( (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
=================================================================
Total params: 22,857,002
Trainable params: 1,054,218
Non-trainable params: 21,802,784
_________________________________________________________________
###Markdown
Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks. Keras CallbacksKeras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.There's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.To set these callbacks, you could do the following:```checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)```This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.```stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)```This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.You still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):```model.fit(callbacks=[checkpoint, stopper])``` GPU timeThe rest of the notebook will give you the code for training, so you can turn on the GPU at this point - but first, **make sure to save your jupyter notebook**. Once the GPU is turned on, it will load whatever your last notebook checkpoint is. While we suggest reading through the code below to make sure you understand it, you can otherwise go ahead and select *Cell > Run All* (or *Kernel > Restart & Run All* if already using GPU) to run through all cells in the notebook.
###Code
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:10000]
y_one_hot_train = y_one_hot_train[:10000]
X_val = X_val[:2000]
y_one_hot_val = y_one_hot_val[:2000]
###Output
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 2s 0us/step
###Markdown
You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
###Code
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
###Output
Epoch 1/5
312/312 [============================>.] - ETA: 0s - loss: 0.9493 - acc: 0.6760
###Markdown
Lab: Transfer LearningWelcome to the lab on Transfer Learning! Here, you'll get a chance to try out training a network with ImageNet pre-trained weights as a base, but with additional network layers of your own added on. You'll also get to see the difference between using frozen weights and training on all layers. GPU usageIn our previous examples in this lesson, we've avoided using GPU, but this time around you'll have the option to enable it. You do not need it on to begin with, but make sure anytime you switch from non-GPU to GPU, or vice versa, that you save your notebook! If not, you'll likely be reverted to the previous checkpoint. We also suggest only using the GPU when performing the (mostly minor) training below - you'll want to conserve GPU hours for your Behavioral Cloning project coming up next!
###Code
# Set a couple flags for training - you can ignore these for now
freeze_flag = True # `True` to freeze layers, `False` for full training
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# Loads in InceptionV3
from keras.applications.inception_v3 import InceptionV3
# We can use smaller than the default 299x299x3 input for InceptionV3
# which will speed up training. Keras v2.0.9 supports down to 139x139x3
input_size = 139
# Using Inception with ImageNet pre-trained weights. We are setting up our model here.
inception = InceptionV3(weights=weights_flag, include_top=False,
input_shape=(input_size,input_size,3))
"""incepton is equivalent to our model variable that we had been using till now"""
###Output
Using TensorFlow backend.
###Markdown
We'll use Inception V3 for this lab, although you can use the same techniques with any of the models in [Keras Applications](https://keras.io/applications/). Do note that certain models are only available in certain versions of Keras; this workspace uses Keras v2.0.9, for which you can see the available models [here](https://faroit.github.io/keras-docs/2.0.9/applications/).In the above, we've set Inception to use an `input_shape` of 139x139x3 instead of the default 299x299x3. This will help us to speed up our training a bit later (and we'll actually be upsampling from smaller images, so we aren't losing data here). In order to do so, we also must set `include_top` to `False`, which means the final fully-connected layer with 1,000 nodes for each ImageNet class is dropped, as well as a Global Average Pooling layer. Pre-trained with frozen weightsTo start, we'll see how an ImageNet pre-trained model with all weights frozen in the InceptionV3 model performs. We will also drop the end layer and append new layers onto it, although you could do this in different ways (not drop the end and add new layers, drop more layers than we will here, etc.).You can freeze layers by setting `layer.trainable` to False for a given `layer`. Within a `model`, you can get the list of layers with `model.layers`.
###Code
if freeze_flag == True:
## TODO: Iterate through the layers of the Inception model
## loaded above and set all of them to have trainable = False
for layer in inception.layers: #our model is inception here
layer.trainable = False
###Output
_____no_output_____
###Markdown
Dropping layersYou can drop layers from a model with `model.layers.pop()`. Before you do this, you should check out what the actual layers of the model are with Keras's `.summary()` function.
###Code
## TODO: Use the model summary function to see all layers in the
## loaded Inception model
inception.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 139, 139, 3) 0
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 69, 69, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 69, 69, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 69, 69, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 67, 67, 32) 9216 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 67, 67, 32) 96 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 67, 67, 32) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 67, 67, 64) 18432 activation_2[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 67, 67, 64) 192 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 67, 67, 64) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 33, 33, 64) 0 activation_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 33, 33, 80) 5120 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 33, 33, 80) 240 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 33, 33, 80) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 31, 31, 192) 138240 activation_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 31, 31, 192) 576 conv2d_5[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 31, 31, 192) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 15, 15, 192) 0 activation_5[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 15, 15, 64) 192 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 15, 15, 64) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 15, 15, 48) 9216 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 15, 15, 96) 55296 activation_9[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 15, 15, 48) 144 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 15, 15, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 15, 15, 48) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 15, 15, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 15, 15, 192) 0 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 15, 15, 64) 12288 max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 15, 15, 64) 76800 activation_7[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 15, 15, 96) 82944 activation_10[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 15, 15, 32) 6144 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 15, 15, 64) 192 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 15, 15, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 15, 15, 96) 288 conv2d_11[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 15, 15, 32) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 15, 15, 64) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 15, 15, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 15, 15, 96) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 15, 15, 32) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 15, 15, 256) 0 activation_6[0][0]
activation_8[0][0]
activation_11[0][0]
activation_12[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 15, 15, 64) 192 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 15, 15, 64) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 15, 15, 48) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 15, 15, 96) 55296 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 15, 15, 48) 144 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 15, 15, 96) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 15, 15, 48) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 15, 15, 96) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 15, 15, 256) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 15, 15, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 15, 15, 64) 76800 activation_14[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 15, 15, 96) 82944 activation_17[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 15, 15, 64) 16384 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 15, 15, 64) 192 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 15, 15, 64) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 15, 15, 96) 288 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 15, 15, 64) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 15, 15, 64) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 15, 15, 64) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 15, 15, 96) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 15, 15, 64) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 15, 15, 288) 0 activation_13[0][0]
activation_15[0][0]
activation_18[0][0]
activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 15, 15, 64) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 15, 15, 64) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 15, 15, 48) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 15, 15, 96) 55296 activation_23[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 15, 15, 48) 144 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 15, 15, 96) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 15, 15, 48) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 15, 15, 96) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 15, 15, 288) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 15, 15, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 15, 15, 64) 76800 activation_21[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 15, 15, 96) 82944 activation_24[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 15, 15, 64) 18432 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 15, 15, 64) 192 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 15, 15, 64) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 15, 15, 96) 288 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 15, 15, 64) 192 conv2d_26[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 15, 15, 64) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 15, 15, 64) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 15, 15, 96) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 15, 15, 64) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 15, 15, 288) 0 activation_20[0][0]
activation_22[0][0]
activation_25[0][0]
activation_26[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 15, 15, 64) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 15, 15, 64) 192 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 15, 15, 64) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 15, 15, 96) 55296 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 15, 15, 96) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 15, 15, 96) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 7, 7, 384) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 7, 7, 96) 82944 activation_29[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 7, 7, 384) 1152 conv2d_27[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 7, 7, 96) 288 conv2d_30[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 7, 7, 384) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 7, 7, 96) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 7, 7, 288) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 7, 7, 768) 0 activation_27[0][0]
activation_30[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 7, 7, 128) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 7, 7, 128) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 7, 7, 128) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 7, 7, 128) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 7, 7, 128) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 7, 7, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 7, 7, 128) 114688 activation_36[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 7, 7, 128) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 7, 7, 128) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 7, 7, 128) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 7, 7, 128) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 7, 7, 128) 114688 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 7, 7, 128) 114688 activation_37[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 7, 7, 128) 384 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 7, 7, 128) 384 conv2d_38[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 7, 7, 128) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 7, 7, 128) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 7, 7, 768) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 7, 7, 192) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 7, 7, 192) 172032 activation_33[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 7, 7, 192) 172032 activation_38[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 7, 7, 192) 576 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 7, 7, 192) 576 conv2d_34[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 7, 7, 192) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 7, 7, 192) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 7, 7, 192) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 7, 7, 192) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 7, 7, 192) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 7, 7, 192) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 7, 7, 768) 0 activation_31[0][0]
activation_34[0][0]
activation_39[0][0]
activation_40[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 7, 7, 160) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 7, 7, 160) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 7, 7, 160) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 7, 7, 160) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 7, 7, 160) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 7, 7, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 7, 7, 160) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 7, 7, 160) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 7, 7, 160) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 7, 7, 160) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 7, 7, 160) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 7, 7, 160) 179200 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 7, 7, 160) 179200 activation_47[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 7, 7, 160) 480 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 7, 7, 160) 480 conv2d_48[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 7, 7, 160) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 7, 7, 160) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 7, 7, 768) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 7, 7, 192) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 7, 7, 192) 215040 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 7, 7, 192) 215040 activation_48[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 7, 7, 192) 576 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 7, 7, 192) 576 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 7, 7, 192) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 7, 7, 192) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 7, 7, 192) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 7, 7, 192) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 7, 7, 192) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 7, 7, 192) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 7, 7, 768) 0 activation_41[0][0]
activation_44[0][0]
activation_49[0][0]
activation_50[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 7, 7, 160) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 7, 7, 160) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 7, 7, 160) 179200 activation_55[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 7, 7, 160) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 7, 7, 160) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 7, 7, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 7, 7, 160) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 7, 7, 160) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 7, 7, 160) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 7, 7, 160) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 7, 7, 160) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 7, 7, 160) 179200 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 7, 7, 160) 179200 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 7, 7, 160) 480 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 7, 7, 160) 480 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 7, 7, 160) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 7, 7, 160) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 7, 7, 768) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 7, 7, 192) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 7, 7, 192) 215040 activation_53[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 7, 7, 192) 215040 activation_58[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 7, 7, 192) 576 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 7, 7, 192) 576 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 7, 7, 192) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 7, 7, 192) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 7, 7, 192) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 7, 7, 192) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 7, 7, 192) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 7, 7, 192) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 7, 7, 768) 0 activation_51[0][0]
activation_54[0][0]
activation_59[0][0]
activation_60[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 7, 7, 192) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 7, 7, 192) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 7, 7, 192) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 7, 7, 192) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 7, 7, 192) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 7, 7, 192) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 7, 7, 192) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 7, 7, 192) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 7, 7, 192) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 7, 7, 192) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 7, 7, 192) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 7, 7, 192) 258048 activation_67[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 7, 7, 192) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 7, 7, 192) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 7, 7, 192) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 7, 7, 192) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 7, 7, 768) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 7, 7, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 7, 7, 192) 258048 activation_63[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 7, 7, 192) 258048 activation_68[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 7, 7, 192) 147456 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 7, 7, 192) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 7, 7, 192) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 7, 7, 192) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 7, 7, 192) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 7, 7, 192) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 7, 7, 192) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 7, 7, 192) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 7, 7, 192) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_61[0][0]
activation_64[0][0]
activation_69[0][0]
activation_70[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 7, 7, 192) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 7, 7, 192) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 7, 7, 192) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 7, 7, 192) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 7, 7, 192) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 7, 7, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 7, 7, 192) 258048 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 7, 7, 192) 576 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 7, 7, 192) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 7, 7, 192) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 7, 7, 192) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 3, 3, 320) 552960 activation_71[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 3, 3, 192) 331776 activation_75[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 3, 3, 320) 960 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 3, 3, 192) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 3, 3, 320) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 3, 3, 192) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 3, 3, 768) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 3, 3, 1280) 0 activation_72[0][0]
activation_76[0][0]
max_pooling2d_4[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 3, 3, 448) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 3, 3, 448) 1344 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 3, 3, 448) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 3, 3, 384) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 3, 3, 384) 1548288 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 3, 3, 384) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 3, 3, 384) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 3, 3, 384) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 3, 3, 384) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 3, 3, 384) 442368 activation_78[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 3, 3, 384) 442368 activation_82[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 3, 3, 1280) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 3, 3, 320) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 3, 3, 384) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 3, 3, 384) 1152 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 3, 3, 384) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 3, 3, 384) 1152 conv2d_84[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 3, 3, 192) 245760 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 3, 3, 320) 960 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 3, 3, 384) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 3, 3, 384) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 3, 3, 384) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 3, 3, 384) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 3, 3, 192) 576 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 3, 3, 320) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 3, 3, 768) 0 activation_79[0][0]
activation_80[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 3, 3, 768) 0 activation_83[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 3, 3, 192) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
mixed9 (Concatenate) (None, 3, 3, 2048) 0 activation_77[0][0]
mixed9_0[0][0]
concatenate_1[0][0]
activation_85[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 3, 3, 448) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 3, 3, 448) 1344 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 3, 3, 448) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 3, 3, 384) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 3, 3, 384) 1548288 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 3, 3, 384) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 3, 3, 384) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 3, 3, 384) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 3, 3, 384) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 3, 3, 384) 442368 activation_87[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 3, 3, 384) 442368 activation_91[0][0]
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 3, 3, 2048) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 3, 3, 320) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 3, 3, 384) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 3, 3, 384) 1152 conv2d_89[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 3, 3, 384) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 3, 3, 384) 1152 conv2d_93[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 3, 3, 192) 393216 average_pooling2d_9[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 3, 3, 320) 960 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 3, 3, 384) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 3, 3, 384) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 3, 3, 384) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 3, 3, 384) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 3, 3, 192) 576 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 3, 3, 320) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_88[0][0]
activation_89[0][0]
__________________________________________________________________________________________________
concatenate_2 (Concatenate) (None, 3, 3, 768) 0 activation_92[0][0]
activation_93[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 3, 3, 192) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_86[0][0]
mixed9_1[0][0]
concatenate_2[0][0]
activation_94[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
###Markdown
In a normal Inception network, you would see from the model summary that the last two layers were a global average pooling layer, and a fully-connected "Dense" layer. However, since we set `include_top` to `False`, both of these get dropped. If you otherwise wanted to drop additional layers, you would use:```inception.layers.pop()```Note that `pop()` works from the end of the model backwards. It's important to note two things here:1. How many layers you drop is up to you, typically. We dropped the final two already by setting `include_top` to False in the original loading of the model, but you could instead just run `pop()` twice to achieve similar results. (*Note:* Keras requires us to set `include_top` to False in order to change the `input_shape`.) Additional layers could be dropped by additional calls to `pop()`.2. If you make a mistake with `pop()`, you'll want to reload the model. If you use it multiple times, the model will continue to drop more and more layers, so you may need to check `model.summary()` again to check your work. Adding new layersNow, you can start to add your own layers. While we've used Keras's `Sequential` model before for simplicity, we'll actually use the [Model API](https://keras.io/models/model/) this time. This functions a little differently, in that instead of using `model.add()`, you explicitly tell the model which previous layer to attach to the current layer. This is useful if you want to use more advanced concepts like [skip layers](https://en.wikipedia.org/wiki/Residual_neural_network), for instance (which were used heavily in ResNet).For example, if you had a previous layer named `inp`:```x = Dropout(0.2)(inp)```is how you would attach a new dropout layer `x`, with it's input coming from a layer with the variable name `inp`.We are going to use the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), which consists of 60,000 32x32 images of 10 classes. We need to use Keras's `Input` function to do so, and then we want to re-size the images up to the `input_size` we specified earlier (139x139).
###Code
from keras.layers import Input, Lambda
import tensorflow as tf
# Makes the input placeholder layer 32x32x3 for CIFAR-10
cifar_input = Input(shape=(32,32,3))
# Re-sizes the input with Kera's Lambda layer & attach to cifar_input
resized_input = Lambda(lambda image: tf.image.resize_images(
image, (input_size, input_size)))(cifar_input)
# Feeds the re-sized input into Inception model
# You will need to update the model name if you changed it earlier!
inp = inception(resized_input)
"""Last step mein ham mostly input layer ke aage model ko append kar rahe hain"""
# Imports fully-connected "Dense" layers & Global Average Pooling
from keras.layers import Dense, GlobalAveragePooling2D
## TODO: Setting `include_top` to False earlier also removed the
## GlobalAveragePooling2D layer, but we still want it.
## Add it here, and make sure to connect it to the end of Inception
out1 = GlobalAveragePooling2D()(inp)
## TODO: Create two new fully-connected layers using the Model API
## format discussed above. The first layer should use `out`
## as its input, along with ReLU activation. You can choose
## how many nodes it has, although 512 or less is a good idea.
## The second layer should take this first layer as input, and
## be named "predictions", with Softmax activation and
## 10 nodes, as we'll be using the CIFAR10 dataset.
out2 = Dense(512, activation='relu')(out1)
predictions = Dense(10, activation='softmax')(out2)
###Output
_____no_output_____
###Markdown
We're almost done with our new model! Now we just need to use the actual Model API to create the full model.
###Code
# Imports the Model API
from keras.models import Model
# Creates the model, assuming your final layer is named "predictions"
model = Model(inputs=cifar_input, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, 32, 32, 3) 0
_________________________________________________________________
lambda_1 (Lambda) (None, 139, 139, 3) 0
_________________________________________________________________
inception_v3 (Model) (None, 3, 3, 2048) 21802784
_________________________________________________________________
global_average_pooling2d_1 ( (None, 2048) 0
_________________________________________________________________
dense_1 (Dense) (None, 512) 1049088
_________________________________________________________________
dense_2 (Dense) (None, 10) 5130
=================================================================
Total params: 22,857,002
Trainable params: 1,054,218
Non-trainable params: 21,802,784
_________________________________________________________________
###Markdown
Great job creating a new model architecture from Inception! Notice how this method of adding layers before InceptionV3 and appending to the end of it made InceptionV3 condense down into one line in the summary; if you use the Inception model's normal input (which you could gather from `inception.layers.input`), it would instead show all the layers like before.Most of the rest of the code in the notebook just goes toward loading our data, pre-processing it, and starting our training in Keras, although there's one other good point to make here - Keras callbacks. Keras CallbacksKeras [callbacks](https://keras.io/callbacks/) allow you to gather and store additional information during training, such as the best model, or even stop training early if the validation accuracy has stopped improving. These methods can help to avoid overfitting, or avoid other issues.There's two key callbacks to mention here, `ModelCheckpoint` and `EarlyStopping`. As the names may suggest, model checkpoint saves down the best model so far based on a given metric, while early stopping will end training before the specified number of epochs if the chosen metric no longer improves after a given amount of time.To set these callbacks, you could do the following:```checkpoint = ModelCheckpoint(filepath=save_path, monitor='val_loss', save_best_only=True)```This would save a model to a specified `save_path`, based on validation loss, and only save down the best models. If you set `save_best_only` to `False`, every single epoch will save down another version of the model.```stopper = EarlyStopping(monitor='val_acc', min_delta=0.0003, patience=5)```This will monitor validation accuracy, and if it has not decreased by more than 0.0003 from the previous best validation accuracy for 5 epochs, training will end early.You still need to actually feed these callbacks into `fit()` when you train the model (along with all other relevant data to feed into `fit`):```model.fit(callbacks=[checkpoint, stopper])``` GPU timeThe rest of the notebook will give you the code for training, so you can turn on the GPU at this point - but first, **make sure to save your jupyter notebook**. Once the GPU is turned on, it will load whatever your last notebook checkpoint is. While we suggest reading through the code below to make sure you understand it, you can otherwise go ahead and select *Cell > Run All* (or *Kernel > Restart & Run All* if already using GPU) to run through all cells in the notebook.
###Code
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
(X_train, y_train), (X_val, y_val) = cifar10.load_data()
# One-hot encode the labels
label_binarizer = LabelBinarizer()
y_one_hot_train = label_binarizer.fit_transform(y_train)
y_one_hot_val = label_binarizer.fit_transform(y_val)
# Shuffle the training & test data
X_train, y_one_hot_train = shuffle(X_train, y_one_hot_train)
X_val, y_one_hot_val = shuffle(X_val, y_one_hot_val)
# We are only going to use the first 10,000 images for speed reasons
# And only the first 2,000 images from the test set
X_train = X_train[:10000]
y_one_hot_train = y_one_hot_train[:10000]
X_val = X_val[:2000]
y_one_hot_val = y_one_hot_val[:2000]
###Output
_____no_output_____
###Markdown
You can check out Keras's [ImageDataGenerator documentation](https://faroit.github.io/keras-docs/2.0.9/preprocessing/image/) for more information on the below - you can also add additional image augmentation through this function, although we are skipping that step here so you can potentially explore it in the upcoming project.
###Code
# Use a generator to pre-process our images for ImageNet
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
if preprocess_flag == True:
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
else:
datagen = ImageDataGenerator()
val_datagen = ImageDataGenerator()
# Train the model
batch_size = 32
epochs = 5
# Note: we aren't using callbacks here since we only are using 5 epochs to conserve GPU time
model.fit_generator(datagen.flow(X_train, y_one_hot_train, batch_size=batch_size),
steps_per_epoch=len(X_train)/batch_size, epochs=epochs, verbose=1,
validation_data=val_datagen.flow(X_val, y_one_hot_val, batch_size=batch_size),
validation_steps=len(X_val)/batch_size)
###Output
Epoch 1/5
312/312 [============================>.] - ETA: 5s - loss: 1.3562 - acc: 0.5491 |
lab/Lab 03 - Polynomial Fitting.ipynb | ###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=1, size=len(y_))
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=10, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
from currency_converter import CurrencyConverter
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
# go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
# layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
# xaxis_title=r"$k$ - Fitted Polynomial Degree",
# yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more. y = y_ + np.random.normal(size=len(y_))ks = range(2, 8)fig = make_subplots(2, 3, subplot_titles=list(ks))for i, k in enumerate(ks): r,c = i//3+1, i%3+1 y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X) fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False), go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False), go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c) fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show() How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
from sklearn.model_selection import ParameterGrid
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(testX, testY).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2] # give all samples from 0 with 2 skip x is 20 points
y_ = response(x) # put in polynom
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0) #skip first 2 rows, the first col serves as row?
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1) #5 points , poly degree 4
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_)) #add noise
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index() #make avrage to mse by k sigma
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=np.linspace(0, 5, 10), size=len(y_))
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index() #make avrage to mse by k sigma
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
import numpy as np
import pandas as pd
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], )
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
from sklearn.model_selection import ParameterGrid
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
# testY = response(testX) + np.random.normal(scale=___, size=len(y_))
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1), repetition=range(10))):
y = response(testX) + np.random.normal(scale=setting["k"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
1
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
[-1.2 -1.03589744 -0.87179487 -0.70769231 -0.54358974 -0.37948718
-0.21538462 -0.05128205 0.11282051 0.27692308 0.44102564 0.60512821
0.76923077 0.93333333 1.0974359 1.26153846 1.42564103 1.58974359
1.75384615 1.91794872]
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
[[ 1. -1.2 1.44 -1.728 2.0736 ]
[ 1. -1.03589744 1.0730835 -1.11160444 1.15150819]
[ 1. -0.87179487 0.7600263 -0.66258703 0.57763997]
[ 1. -0.70769231 0.5008284 -0.35443241 0.25082909]
[ 1. -0.54358974 0.29548981 -0.16062523 0.08731423]]
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
df = []
for setting in ParameterGrid(dict(k=range(len(testX)), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(testX, testY).predict(X)
df.append([setting["k"], setting["s"], np.mean((y_ - y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
# testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting['s'], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
testY = response(testX) + np.random.normal(scale=s, size=len(y_))
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
#testY = response(testX) + np.random.normal(scale=___, size=len(y_))
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
normal_y = response(X) + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, normal_y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
print(df)
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
Level Salary
Position
Business Analyst 1 45000
Junior Consultant 2 50000
Senior Consultant 3 60000
Manager 4 80000
Country Manager 5 110000
Region Manager 6 150000
Partner 7 200000
Senior Partner 8 300000
C-level 9 500000
CEO 10 1000000
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
print(df)
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
k sigma mse
0 0 0.000000 3.598577
1 0 0.555556 3.974319
2 0 1.111111 4.887394
3 0 1.666667 5.692945
4 0 2.222222 6.687744
.. .. ... ...
95 9 2.777778 4.100094
96 9 3.333333 6.002319
97 9 3.888889 8.120578
98 9 4.444444 14.079196
99 9 5.000000 12.801462
[100 rows x 3 columns]
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df2 = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df2.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df2 = pd.DataFrame.from_records(df2, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
# print(df2)
go.Figure(go.Heatmap(x=df2.k, y=df2.sigma, z=df2.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.* Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=1, size=len(y_))
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
from sklearn.model_selection import ParameterGrid
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Test } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
# # Generate the x values of the test set
# testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# # Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
# testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
from sklearn.model_selection import ParameterGrid
df = []
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
# Generate the x values of the test set
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
# y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2] #choose only 20 samples
y_ = response(x) #apply the response function
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures #make from each point the polynomial dgrees
m, k, X = 5, 4, x.reshape(-1, 1) #make it vector amyda
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_)) #add noise
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6) #more noise affect k = 4
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
testY = response(testX) + np.random.normal(scale=k, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=___, size=len(y_))
###Output
_____no_output_____
###Markdown
Lab 03 - Polynomial FittingIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As wementioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\mathcal{Y}\in\mathbb{R}^p$). In the following lab we will discuss one such setting called "Polynomial Fitting". Sometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomialof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: $$ p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R} $$So our hypothesis class is of the form:$$ \mathcal{H}^k_{poly}=\left\{p_k\,\Big|\, p_k\left(x\right)=\sum_{i=0}^{k}\alpha_i x^i\quad\alpha_0,\ldots,\alpha_k\in\mathbb{R}\right\} $$Notice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are twoexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.
###Code
import sys
sys.path.append("../")
from utils import *
response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1
x = np.linspace(-1.2, 2, 40)[0::2]
y_ = response(x)
df = pd.read_csv("../datasets/Position_Salaries.csv", skiprows=2, index_col=0)
x2, y2 = df.index, df.Salary
make_subplots(1, 2, subplot_titles=(r"$\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$", r"$\text{Positions Salary}$"))\
.add_traces([go.Scatter(x=x, y=y_, mode="markers", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x2, y=y2, mode="markers",marker=dict(color="black", opacity=.7), showlegend=False)],
rows=[1,1], cols=[1,2])\
.update_layout(title=r"$\text{(1) Datasets For Polynomial Fitting}$", margin=dict(t=100)).show()
###Output
_____no_output_____
###Markdown
As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,such that we represent each sample $x_i\in\mathbb{R}$ as a vector $\mathbf{x}_i=\left(x^0,x^1,\ldots,x^k\right)$. Then,we treat the data as a design matrix $\mathbf{X}\in\mathbb{R}^{m\times k}$ of a linear regression problem.For the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows:
###Code
from sklearn.preprocessing import PolynomialFeatures
m, k, X = 5, 4, x.reshape(-1, 1)
pd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]),
columns=[rf"$x^{{0}}$".format(i) for i in range(0, k+1)],
index=[rf"$x_{{0}}$".format(i) for i in range(1, m+1)])
###Output
_____no_output_____
###Markdown
Fitting A Polynomial Of Different DegreesNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.We begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a modelthat describes the data in a better way, reflected by the decrease in the MSE.*Notice that in both the `PolynomialFeatures` and `LinearRegression` functions we can add the bias/intercept parameter. As in this case it makes no difference, we will include the bias in the polynomial features transformation and fit a linear regression **without** an intercept. The bias parameter of the polynomial features (i.e. $x^0$) will in reality be the intercept of the linear regression.*
###Code
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
ks = [2, 3, 4, 5]
fig = make_subplots(1, 4, subplot_titles=list(ks))
for i, k in enumerate(ks):
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=1, cols=i+1)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y_-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$",
margin=dict(t=60),
yaxis_title=r"$\widehat{y}$",
height=300).show()
###Output
_____no_output_____
###Markdown
Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, the additional coefficients will be zero.
###Code
coefs = {}
for k in ks:
fit = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y_)
coefs[rf"$k={{{k}}}$"] = [round(c,3) for c in fit.steps[1][1].coef_]
pd.DataFrame.from_dict(coefs, orient='index', columns=[rf"$w_{{{i}}}$" for i in range(max(ks)+1)])
###Output
_____no_output_____
###Markdown
Fitting Polynomial Of Different Degrees - With Sample NoiseStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\mathcal{N}\left(0,1\right)$).This time we observe two things:- Even for the correct $k=4$ model we are not able to achieve zero MSE.- As we increase $4<k\rightarrow 7$ we manage to decrease the error more and more.
###Code
y = y_ + np.random.normal(size=len(y_))
ks = range(2, 8)
fig = make_subplots(2, 3, subplot_titles=list(ks))
for i, k in enumerate(ks):
r,c = i//3+1, i%3+1
y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$k={{0}}, MSE={{1}}$".format(k, round(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
How is it that we are able to fit "better" models for $k$s larger than the true one? As we increase $k$ we enable the modelmore "degrees of freedom" to try and adapt itself to the observed data. The higher $k$ the more the learner will "go afterthe noise" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.Later in the course we will learn methods for detection and avoidance of overfitting. Fitting Polynomial Over Different Sample Noise LevelsNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Thoughwe will only be changing the scale of the noise (i.e. the variance, $\sigma^2$), changing other properties such as itsdistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We canobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).
###Code
scales = range(6)
fig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))
for i, s in enumerate(scales):
r,c = i//3+1, i%3+1
y = y_ + np.random.normal(scale=s, size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
fig.add_traces([go.Scatter(x=x, y=y_, mode="markers", name="Real Points", marker=dict(color="black", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y, mode="markers", name="Observed Points", marker=dict(color="red", opacity=.7), showlegend=False),
go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Points", marker=dict(color="blue", opacity=.7), showlegend=False)], rows=r, cols=c)
fig["layout"]["annotations"][i]["text"] = rf"$\sigma^2={{0}}, MSE={{1}}$".format(s, rou=nd(np.mean((y-y_hat)**2), 2))
fig.update_layout(title=r"$\text{(5) Simulated Data - Different Noise Scales}$", margin=dict(t=80)).show()
###Output
_____no_output_____
###Markdown
The Influence Of $k$ And $\sigma^2$ On ErrorLastly, let us check how the error is influenced by both $k$ and $\sigma^2$. For each value of $k$ and $\sigma^2$ we willadd noise drawn from $\mathcal{N}\left(0,\sigma^2\right)$ and then, based on the noisy data, let the learner select anhypothesis from $\mathcal{H}_{poly}^k$. We repeat the process for each set of $\left(k,\sigma^2\right)$ 10 times and reportthe mean MSE value. Results are seen in heatmap below:
###Code
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(X)
df.append([setting["k"], setting["s"], np.mean((y-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____
###Markdown
Time To Think...In the above figure, we observe the following trends:- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.- Across all values of $k$, as we increase $\sigma^2$ we get higher MSE values.- For all noise levels, we manage to reduce MSE values by increasing $k$.So, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higherdegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.Try and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)we would have calculated it over a **new** set of test samples drawn from the same distribution.Use the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\sigma^2$? What happens for high/low values of $k$?
###Code
# Generate the x values of the test set
testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)
# Generate the noisy y values of the test set. Set the noise level (the scale parameter) according to the specific setting
testY = response(testX) + np.random.normal(scale=setting["s"], size=len(y_))
from sklearn.model_selection import ParameterGrid
df = []
for setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):
y = y_ + np.random.normal(scale=setting["s"], size=len(y_))
y_hat = make_pipeline(PolynomialFeatures(setting["k"]), LinearRegression(fit_intercept=False)).fit(X, y).predict(testX)
df.append([setting["k"], setting["s"], np.mean((testY-y_hat)**2)])
df = pd.DataFrame.from_records(df, columns=["k", "sigma","mse"]).groupby(["k","sigma"]).mean().reset_index()
go.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale="amp"),
layout=go.Layout(title=r"$\text{(6) Average Train } MSE \text{ As Function of } \left(k,\sigma^2\right)$",
xaxis_title=r"$k$ - Fitted Polynomial Degree",
yaxis_title=r"$\sigma^2$ - Noise Levels")).show()
###Output
_____no_output_____ |
impl/jupyter_notebooks/cicids2017_attacks/3_friday/ddos/data_exploration.ipynb | ###Markdown
0. Load the data
###Code
# imports:
import pandas as pd
import matplotlib.pyplot as plt
import os
PREFIX_PATH = '/home/sramkova/diploma_thesis_data/cicids2017/attacks'
PREFIX_PATH = PREFIX_PATH + '/' + '/'.join(os.getcwd().split('/')[-2:]) + '/'
INPUT_CSV = PREFIX_PATH + 'query_output_processing.csv'
print(INPUT_CSV)
raw_data = pd.read_csv(INPUT_CSV)
# check that all columns are correctly loaded
raw_data.head()
pd.set_option('display.max_rows', None)
raw_data.dtypes
pd.set_option('display.max_rows', 10)
# select only original connection columns (no neighbourhood):
raw_data = raw_data[['originated_ip',
'responded_ip',
'connection.uid',
'connection.ts',
'connection.resp_pkts',
'connection.resp_ip_bytes',
'connection.orig_p',
'connection.duration',
'connection.resp_bytes',
'connection.orig_pkts',
'connection.proto',
'connection.service',
'connection.orig_bytes',
'connection.conn_state',
'connection.resp_p',
'connection.orig_ip_bytes',
'dns_count',
'ssh_count',
'http_count',
'ssl_count',
'files_count',
]]
raw_data.head()
###Output
_____no_output_____
###Markdown
1. Get to know the data 1.0 Basic statistics
###Code
# types and number of non-null values in columns:
raw_data.info()
# simple statistics of numerical attributes:
raw_data.describe()
# source: https://www.geeksforgeeks.org/python-pandas-dataframe-describe-method/
# remove null values to avoid errors
nonnull_data = raw_data.dropna()
# percentile list
perc =[.20, .40, .60, .80]
# list of dtypes to include
include =['object', 'float', 'int']
# calling describe method
nonnull_data.describe(percentiles = perc, include = include)
###Output
_____no_output_____
###Markdown
1.1 Show distinct counts
###Code
distinct_counter = raw_data.apply(lambda x: len(x.unique()))
distinct_counter
###Output
_____no_output_____
###Markdown
1.2 Visualize frequencies Get Frequency Counts of Categorical Columns: - connection.proto- connection.service- connection.conn_state(https://cmdlinetips.com/2018/02/how-to-get-frequency-counts-of-a-column-in-pandas-dataframe/)
###Code
pd.reset_option('display.max_rows')
raw_data['connection.proto'].value_counts()
raw_data['connection.service'].value_counts(dropna=False) # display null values as well
raw_data['connection.conn_state'].value_counts()
###Output
_____no_output_____
###Markdown
Get Frequency Counts of Non-categorical Columns: - originated_ip- responded_ip- connection.duration- connection.orig_pkts- connection.orig_ip_bytes- connection.resp_p- connection.orig_bytes- connection.resp_bytes- connection.resp_ip_bytes- connection.orig_p- connection.resp_pkts
###Code
raw_data['originated_ip'].value_counts()
# or print relative frequencies of unique values:
# (sometimes percentage is a better criterion then the count)
raw_data['originated_ip'].value_counts(normalize=True)
raw_data['responded_ip'].value_counts()
raw_data['connection.duration'].value_counts()
pd.set_option('display.max_rows', None)
raw_data['connection.duration'].value_counts().nlargest(100)
# raw_data['connection.orig_pkts'].value_counts()
raw_data['connection.orig_pkts'].value_counts().nlargest(40)
# raw_data['connection.orig_ip_bytes'].value_counts()
# raw_data['connection.orig_ip_bytes'].value_counts().nlargest(40)
# raw_data['connection.resp_p'].value_counts()
pd.set_option('display.max_rows', 30)
raw_data['connection.resp_p'].value_counts()
pd.set_option('display.max_rows', None)
raw_data['connection.resp_p'].value_counts().nlargest(40)
# raw_data['connection.orig_bytes'].value_counts()
raw_data['connection.orig_bytes'].value_counts().nlargest(40)
# raw_data['connection.resp_bytes'].value_counts()
raw_data['connection.resp_bytes'].value_counts().nlargest(40)
# raw_data['connection.resp_ip_bytes'].value_counts()
raw_data['connection.resp_ip_bytes'].value_counts().nlargest(40)
pd.set_option('display.max_rows', 30)
raw_data['connection.orig_p'].value_counts()
# raw_data['connection.orig_p'].value_counts().nlargest(40)
# raw_data['connection.resp_pkts'].value_counts()
pd.set_option('display.max_rows', 30)
raw_data['connection.resp_pkts'].value_counts().nlargest(40)
###Output
_____no_output_____
###Markdown
1.3 Application data counts
###Code
raw_data['dns_count'].value_counts()
raw_data['ssh_count'].value_counts()
raw_data['http_count'].value_counts()
raw_data['ssl_count'].value_counts()
raw_data['files_count'].value_counts()
###Output
_____no_output_____ |
python-baseball-simulator/Simulating baseball in Python.ipynb | ###Markdown
Simulating baseball in PythonThis notebook provides the methodology and code used in the blog post, [How much does batting order matter in Major League Baseball? A simulation approach](http://www.randalolson.com/2018/07/04/does-batting-order-matter-in-major-league-baseball-a-simulation-approach). Notebook by [Randal S. Olson](http://www.randalolson.com)Please see the [repository README file](https://github.com/rhiever/Data-Analysis-and-Machine-Learning-Projectslicense) for the licenses and usage terms for the instructional material and code in this notebook. In general, I have licensed this material so that it is as widely useable and shareable as possible. Required Python librariesIf you don't have Python on your computer, you can use the [Anaconda Python distribution](https://www.anaconda.com/download/) to install most of the Python packages you need. Anaconda provides a simple double-click installer for your convenience.This code uses base Python libraries except for `seaborn`, `tqdm`, and `joblib` packages. You can install these packages using `pip` by typing the following commands into your command line:> pip install seaborn tqdm joblib Using the baseball simulatorBelow is the Python code used to simulate baseball in my blog post, run the simulations, and generate the data visualizations shown in my blog post. When I get more time, I plan to clean up and comment this code better than it currently is.For the data visualizations, you will need to place [this tableau10.mplstyle](https://gist.github.com/rhiever/d0a7332fe0beebfdc3d5) in your `~/.matplotlib/stylelib/` directory for the visualizations to show up as they do in my blog post. Otherwise, you will have to use [other matplotlib styles](https://matplotlib.org/users/style_sheets.html).If you have any comments or questions about this project, I prefer that you [file an issue](https://github.com/rhiever/Data-Analysis-and-Machine-Learning-Projects/issues/new) on this GitHub repository. If you don't feel comfortable with GitHub, feel free to [contact me by email](http://www.randalolson.com/contact/).
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
from tqdm import tqdm_notebook as tqdm
from joblib import Parallel, delayed
import time
# ParallelExecutor code taken and modified from https://gist.github.com/MInner/12f9cf961059aed1a60e72c5531a697f
def text_progessbar(seq, total=None):
step = 1
tick = time.time()
while True:
time_diff = time.time() - tick
avg_speed = time_diff / step
total_str = 'of {}'.format(total if total else '')
print('step', step, '{}'.format(round(time_diff, 2)), 'avg: {} iter/sec'.format(round(avg_speed)), total_str)
step += 1
yield next(seq)
all_bar_funcs = {
'tqdm': lambda args: lambda x: tqdm(x, **args),
'txt': lambda args: lambda x: text_progessbar(x, **args),
'False': lambda args: iter,
'None': lambda args: iter,
}
def ParallelExecutor(use_bar='tqdm', **joblib_args):
def aprun(bar=use_bar, **tq_args):
def tmp(op_iter):
if str(bar) in all_bar_funcs.keys():
bar_func = all_bar_funcs[str(bar)](tq_args)
else:
raise ValueError('Value {} not supported as bar type'.format(bar))
return Parallel(**joblib_args)(bar_func(op_iter))
return tmp
return aprun
def simulate_game(batters, return_stats=False):
'''Simulates the batting side of a Major League Baseball game
This is a simplified simulation of a baseball game, where each batter performs randomly
according to their corresponding batting average. This simulation incorporates
different types of hits, such as singles, doubles, triples, and home runs, and uses
2017-2018 Major League averages for the probabilities of those hit types occurring.
This simulation leaves out other aspects of the game, such as individual-level hit type
tendencies, double plays, stolen bases, errors, and so forth.
Parameters
----------
batters: list
A list of batting averages for 9 batters in the desired batting order
Example input: [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
Returns
----------
runs_scored: int
The number of runs scored by the batters in one simulated game
batting_stats: dict
Dictionary containing batting statistics for each batter
'''
runs_scored = 0
batter_num = 0
# NOTE: Earned Bases is the total number of bases that the batter advanced themselves
# AND their teammates through batting
batting_stats = {}
for batter in range(len(batters)):
batting_stats[batter] = {
'At Bat': 0, 'Single': 0, 'Double': 0, 'Triple': 0, 'Home Run': 0, 'Out': 0,
'RBI': 0, 'Earned Bases': 0, 'Players On Base': 0, 'Bases Loaded': 0, 'Grand Slam': 0
}
# Assume the game lasts for only 9 innings (no extra innings)
for inning in range(9):
bases = [
False, # First base, index 0
False, # Second base, index 1
False # Third base, index 2
]
batters_out = 0
while batters_out < 3:
batting_stats[batter_num]['At Bat'] += 1
if bases[2] and bases[1] and bases[0]:
batting_stats[batter_num]['Bases Loaded'] += 1
if bases[2]:
batting_stats[batter_num]['Players On Base'] += 1
if bases[1]:
batting_stats[batter_num]['Players On Base'] += 1
if bases[0]:
batting_stats[batter_num]['Players On Base'] += 1
if np.random.random() < batters[batter_num]:
# Batting estimates from MLB.com statistics in 2017/2018 seasons:
# Single base hit: 64% of hits
# Double base hit: 20% of hits
# Triple base hit: 2% of hits
# Home run: 14% of hits
hit_type = np.random.choice(['Single', 'Double', 'Triple', 'Home Run'], p=[.64, .2, .02, .14])
if hit_type == 'Single':
batting_stats[batter_num]['Single'] += 1
# All base runners advance 1 base
if bases[2]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 1
bases[2] = False
if bases[1]:
bases[2] = True
bases[1] = False
batting_stats[batter_num]['Earned Bases'] += 1
if bases[0]:
bases[1] = True
batting_stats[batter_num]['Earned Bases'] += 1
bases[0] = True
batting_stats[batter_num]['Earned Bases'] += 1
elif hit_type == 'Double':
batting_stats[batter_num]['Double'] += 1
# All base runners advance 2 bases
if bases[2]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 1
bases[2] = False
if bases[1]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 2
bases[1] = False
if bases[0]:
bases[2] = True
batting_stats[batter_num]['Earned Bases'] += 2
bases[0] = False
bases[1] = True
batting_stats[batter_num]['Earned Bases'] += 2
elif hit_type == 'Triple':
batting_stats[batter_num]['Triple'] += 1
# All base runners advance 3 bases
if bases[2]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 1
bases[2] = False
if bases[1]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 2
bases[1] = False
if bases[0]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 3
bases[0] = False
bases[2] = True
batting_stats[batter_num]['Earned Bases'] += 3
elif hit_type == 'Home Run':
batting_stats[batter_num]['Home Run'] += 1
# Check if a Grand Slam was scored
if bases[0] and bases[1] and bases[2]:
batting_stats[batter_num]['Grand Slam'] += 1
# All base runners and the hitter score a run
if bases[2]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 1
bases[2] = False
if bases[1]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 2
bases[1] = False
if bases[0]:
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 3
bases[0] = False
runs_scored += 1
batting_stats[batter_num]['RBI'] += 1
batting_stats[batter_num]['Earned Bases'] += 4
else:
# Batter struck out, flew out, or grounded out
batters_out += 1
batting_stats[batter_num]['Out'] += 1
batter_num = (batter_num + 1) % len(batters)
return runs_scored, batting_stats
designated_hitter_spot_scores = {}
num_simulated_games = 1000000
for team_avg in tqdm([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
for designated_hitter_spot in range(9):
batters = [team_avg] * 9
batters[designated_hitter_spot] = 0.35
aprun = ParallelExecutor(n_jobs=-1, use_bar=False)
designated_hitter_spot_scores[(team_avg, designated_hitter_spot)] = [runs_scored for runs_scored, _ in aprun(total=num_simulated_games)(delayed(simulate_game)(batters) for _ in range(num_simulated_games))]
dh_batting_avgs = []
for team_avg in reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
dh_spot_avgs = []
for designated_hitter_spot in range(9):
dh_spot_avgs.append(np.mean(designated_hitter_spot_scores[(team_avg, designated_hitter_spot)]))
dh_spot_avgs = np.array(dh_spot_avgs) / np.mean(dh_spot_avgs)
dh_batting_avgs.append(dh_spot_avgs)
plt.figure(figsize=(9, 9))
sb.heatmap(dh_batting_avgs, cmap='PuOr', center=1., annot=True, fmt='.3f', cbar=False)
plt.xticks([x + 0.5 for x in range(9)], [str(x) for x in range(1, 10)], fontsize=12)
plt.xlabel('DH Batting Position (BA=0.35)', fontsize=14)
plt.yticks([y + 0.5 for y in range(6)], reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]), fontsize=12, va='center')
plt.ylabel('Team Batting Average (BA)', fontsize=14)
plt.title('Batting order matters when one player is much\nbetter than their teammates\n\n', fontsize=20)
plt.text(4.5, -0.1, 'Measured: Relative runs scored based on the DH batting position\n>1 means more runs scored, <1 means fewer runs scored', fontsize=12, ha='center')
plt.text(-0.7, 6.8, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-dh.png', bbox_inches='tight')
;
from scipy.stats import ranksums
from itertools import product
for team_avg1, designated_hitter_spot1, team_avg2, designated_hitter_spot2 in product([0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9), [0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9)):
if team_avg1 != team_avg2:
continue
if designated_hitter_spot1 > designated_hitter_spot2:
continue
if team_avg1 == team_avg2 and designated_hitter_spot1 == designated_hitter_spot2:
continue
statistic, pval = ranksums(designated_hitter_spot_scores[(team_avg1, designated_hitter_spot1)], designated_hitter_spot_scores[(team_avg2, designated_hitter_spot2)])
if pval < 1e-5:
print('sig diff: team avg={}, dh pos={} vs. dh pos={} [p={}]'.format(team_avg1, designated_hitter_spot1 + 1, designated_hitter_spot2 + 1, pval))
pitcher_spot_scores = {}
num_simulated_games = 1000000
for team_avg in tqdm([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
for pitcher_spot in range(9):
batters = [team_avg] * 9
batters[pitcher_spot] = 0.1
aprun = ParallelExecutor(n_jobs=-1, use_bar=False)
pitcher_spot_scores[(team_avg, pitcher_spot)] = [runs_scored for runs_scored, _ in aprun(total=num_simulated_games)(delayed(simulate_game)(batters) for _ in range(num_simulated_games))]
p_batting_avgs = []
for team_avg in reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
p_spot_avgs = []
for pitcher_spot in range(9):
p_spot_avgs.append(np.mean(pitcher_spot_scores[(team_avg, pitcher_spot)]))
p_spot_avgs = np.array(p_spot_avgs) / np.mean(p_spot_avgs)
p_batting_avgs.append(p_spot_avgs)
plt.figure(figsize=(9, 9))
sb.heatmap(p_batting_avgs, cmap='PuOr', center=1., annot=True, fmt='.3f', cbar=False)
plt.xticks([x + 0.5 for x in range(9)], [str(x) for x in range(1, 10)], fontsize=12)
plt.xlabel('Pitcher Batting Position (BA=0.1)', fontsize=14)
plt.yticks([y + 0.5 for y in range(6)], reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]), fontsize=12, va='center')
plt.ylabel('Team Batting Average (BA)', fontsize=14)
plt.title('Batting order matters when one player is much\nworse than their teammates\n\n', fontsize=20)
plt.text(4.5, -0.1, 'Measured: Relative runs scored based on the Pitcher batting position\n>1 means more runs scored, <1 means fewer runs scored', fontsize=12, ha='center')
plt.text(-0.7, 6.8, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-pitcher.png', bbox_inches='tight')
;
from scipy.stats import ranksums
from itertools import product
for team_avg1, pitcher_spot1, team_avg2, pitcher_spot2 in product([0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9), [0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9)):
if team_avg1 != team_avg2:
continue
if pitcher_spot1 > pitcher_spot2:
continue
if team_avg1 == team_avg2 and pitcher_spot1 == pitcher_spot2:
continue
statistic, pval = ranksums(pitcher_spot_scores[(team_avg1, pitcher_spot1)], pitcher_spot_scores[(team_avg2, pitcher_spot2)])
if pval < 1e-5:
print('sig diff: team avg={}, pitcher pos={} vs. pitcher pos={} [p={}]'.format(team_avg1, pitcher_spot1 + 1, pitcher_spot2 + 1, pval))
hitter_spot_scores = {}
num_simulated_games = 1000000
for hitter_ba in tqdm([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
for hitter_spot in range(9):
batters = [0.25] * 9
batters[hitter_spot] = hitter_ba
aprun = ParallelExecutor(n_jobs=-1, use_bar=False)
hitter_spot_scores[(hitter_ba, hitter_spot)] = [runs_scored for runs_scored, _ in aprun(total=num_simulated_games)(delayed(simulate_game)(batters) for _ in range(num_simulated_games))]
hitter_batting_avgs = []
for hitter_ba in reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]):
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean(hitter_spot_scores[(hitter_ba, hitter_spot)]))
hitter_spot_avgs = np.array(hitter_spot_avgs) / np.mean(hitter_spot_avgs)
hitter_batting_avgs.append(hitter_spot_avgs)
plt.figure(figsize=(9, 9))
sb.heatmap(hitter_batting_avgs, cmap='PuOr', center=1., annot=True, fmt='.3f', cbar=False)
plt.xticks([x + 0.5 for x in range(9)], [str(x) for x in range(1, 10)], fontsize=12)
plt.xlabel('Hitter Batting Position (Team BA=0.25)', fontsize=14)
plt.yticks([y + 0.5 for y in range(6)], reversed([0.1, 0.15, 0.2, 0.25, 0.3, 0.35]), fontsize=12, va='center')
plt.ylabel('Hitter Batting Average (BA)', fontsize=14)
plt.title('Exceptional batters should lead the batting line-up,\npoor batters should conclude the line-up\n\n', fontsize=20)
plt.text(4.5, -0.1, 'Measured: Relative runs scored based on the Hitter batting position & BA\n>1 means more runs scored, <1 means fewer runs scored', fontsize=12, ha='center')
plt.text(-0.7, 6.8, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-varying-hitter.png', bbox_inches='tight')
;
from scipy.stats import ranksums
from itertools import product
for hitter_avg1, hitter_spot1, hitter_avg2, hitter_spot2 in product([0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9), [0.1, 0.15, 0.2, 0.25, 0.3, 0.35], range(9)):
if hitter_avg1 != hitter_avg2:
continue
if hitter_spot1 > hitter_spot2:
continue
if hitter_avg1 == hitter_avg2 and hitter_spot1 == hitter_spot2:
continue
statistic, pval = ranksums(hitter_spot_scores[(hitter_avg1, hitter_spot1)], hitter_spot_scores[(hitter_avg2, hitter_spot2)])
if pval < 1e-5:
print('sig diff: batter avg={}, batter pos={} vs. batter pos={} [p={}]'.format(hitter_avg1, hitter_spot1 + 1, hitter_spot2 + 1, pval))
batters = [0.25] * 9
num_simulated_games = 1000000
aprun = ParallelExecutor(n_jobs=-1, use_bar=False)
average_team_stats = [game_stats for _, game_stats in aprun(total=num_simulated_games)(delayed(simulate_game)(batters) for _ in range(num_simulated_games))]
batting_stat = 'At Bat'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
with plt.style.context('tableau10'):
plt.figure()
plt.bar(range(len(hitter_spot_avgs)), hitter_spot_avgs, color='#9467BD')
batting_stat += 's per Game'
plt.ylabel(batting_stat)
plt.xticks(range(9), [str(x) for x in range(1, 10)])
plt.xlabel('Batting Position')
plt.title('Earlier batters have more At Bats on average')
plt.text(-1.3, -0.75, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-stats-{}.png'.format(batting_stat.replace(' ', '-')), bbox_inches='tight')
;
batting_stat = 'At Bat'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
hitter_spot_avgs
batting_stat = 'Players On Base'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
with plt.style.context('tableau10'):
plt.figure()
plt.bar(range(len(hitter_spot_avgs)), hitter_spot_avgs, color='#9467BD')
batting_stat += ' per Game'
plt.ylabel(batting_stat)
plt.xticks(range(9), [str(x) for x in range(1, 10)])
plt.xlabel('Batting Position')
plt.title('Middle batters tend to have more players on base when batting')
plt.text(-1.5, -0.4, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-stats-{}.png'.format(batting_stat.replace(' ', '-')), bbox_inches='tight')
;
batting_stat = 'RBI'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
with plt.style.context('tableau10'):
plt.figure()
plt.bar(range(len(hitter_spot_avgs)), hitter_spot_avgs, color='#9467BD')
batting_stat += ' per Game'
plt.ylabel(batting_stat)
plt.xticks(range(9), [str(x) for x in range(1, 10)])
plt.xlabel('Batting Position')
plt.title('Middle batters tend to contribute more RBI')
plt.text(-1.6, -0.06, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-stats-{}.png'.format(batting_stat.replace(' ', '-')), bbox_inches='tight')
;
batting_stat = 'RBI'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
hitter_spot_avgs
batting_stat = 'Bases Loaded'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
with plt.style.context('tableau10'):
plt.figure()
plt.bar(range(len(hitter_spot_avgs)), hitter_spot_avgs, color='#9467BD')
batting_stat += ' per Game'
plt.ylabel(batting_stat)
plt.xticks(range(9), [str(x) for x in range(1, 10)])
plt.xlabel('Batting Position')
plt.title('The 6th batter is most likely to face a Bases Loaded situation')
plt.text(-1.6, -0.013, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-stats-{}.png'.format(batting_stat.replace(' ', '-')), bbox_inches='tight')
;
batting_stat = 'Grand Slam'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
with plt.style.context('tableau10'):
plt.figure()
plt.bar(range(len(hitter_spot_avgs)), hitter_spot_avgs, color='#9467BD')
batting_stat += 's per Game'
plt.ylabel(batting_stat)
plt.xticks(range(9), [str(x) for x in range(1, 10)])
plt.xlabel('Batting Position')
plt.title('The 6th batter is most likely to hit a Grand Slam')
plt.text(-1.85, -0.0005, 'Data source: League averages & custom baseball simulations\nAuthor: Randal S. Olson (randalolson.com / @randal_olson)', fontsize=10, ha='left')
plt.savefig('mlb-batting-order-stats-{}.png'.format(batting_stat.replace(' ', '-')), bbox_inches='tight')
;
batting_stat = 'Grand Slam'
hitter_spot_avgs = []
for hitter_spot in range(9):
hitter_spot_avgs.append(np.mean([game_stats[hitter_spot][batting_stat] for game_stats in average_team_stats]))
hitter_spot_avgs
###Output
_____no_output_____ |
MTGNN+model5 (1).ipynb | ###Markdown
vacc.append(val_acc) vrae.append(val_rae) vcorr.append(val_corr) acc.append(test_acc) rae.append(test_rae) corr.append(test_corr)print('\n\n')print('10 runs average')print('\n\n')print("valid\trse\trae\tcorr")print("mean\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.mean(vacc), np.mean(vrae), np.mean(vcorr)))print("std\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.std(vacc), np.std(vrae), np.std(vcorr)))print('\n\n')print("test\trse\trae\tcorr")print("mean\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.mean(acc), np.mean(rae), np.mean(corr)))print("std\t{:5.4f}\t{:5.4f}\t{:5.4f}".format(np.std(acc), np.std(rae), np.std(corr)))
###Code
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
print(device)
#parser = argparse.ArgumentParser(description='PyTorch Time series forecasting'
data='solar_AL.txt'
data='C:\0.ml\MTGNN\data\solar_AL.txt'
log_interval=2000
metavar='N'#,help='report interval'
save='model/model.pt'#,help='path to save the final model'
optim2='adam'
L1Loss=True
normalize=2
#device='cuda:1'
device='cuda:0'
gcn_true=True
buildA_true=True
gcn_depth=2
num_nodes=137
dropout=0.3#,help='dropout rate'
subgraph_size=20#,help='k'
node_dim=40#,help='dim of nodes'
dilation_exponential=2#,help='dilation exponential'
conv_channels=16#,help='convolution channels'
residual_channels=16#,help='residual channels'
skip_channels=32#,help='skip channels'
end_channels=64#,help='end channels'
in_dim=1#,help='inputs dimension'
seq_in_len=24*7#,help='input sequence length'
seq_out_len=1#,help='output sequence length'
horizon=3
layers=5#,help='number of layers'
batch_size=32#,help='batch size'
lr=0.0001#,help='learning rate'
weight_decay=0.00001#,help='weight decay rate'
clip=5#,help='clip'
propalpha=0.05#,help='prop alpha'
tanhalpha=3#,help='tanh alpha'
epochs=1
num_split=1#,help='number of splits for graphs'
step_size=100#,help='step_size'
fin = open('C:\\0.ml\\MTGNN\\data\\solar_AL.txt')
rawdat = np.loadtxt(fin, delimiter=',')
#self.rawdat=load(file_name)
dat = np.zeros(rawdat.shape)
n, m = dat.shape
print('iter')
print(rawdat.shape)
df = pd.DataFrame(dat)
df
ttt=Sampled_inputs[0]
for i in range(1,Sampled_inputs.shape[0]):
ttt=np.concatenate( (ttt,Sampled_inputs[1] ), axis=0)
#print(ttt.shape)
print(type(rawdat))
print(type(dat))
print(type(Sampled_inputs))
print(type(df))
df
rawdat2=""
print(rawdat2.shape)
temp=rawdat2[0]
print(temp.shape)
df = pd.DataFrame(temp)
print(df.shape)
df
#util.py
import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from torch.autograd import Variable
def load(file_name):
with open(file_name, 'rb') as fp:
obj = pickle.load(fp)
return obj
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.)/(len(x)))
class DataLoaderS(object):
# train and valid is the ratio of training set and validation set. test = 1 - train - valid
def __init__(self, file_name, train, valid, device, horizon, window, normalize=2):
print('__init__111')
self.P = window
self.h = horizon
self.rawdat=rawdat2
print('self.rawdat: ',self.rawdat.shape)
#fin = open(file_name)
#self.rawdat = np.loadtxt(fin, delimiter=',')
#self.rawdat=load(file_name)
self.dat = np.zeros(self.rawdat.shape)
print('self.dat.shape: ',self.dat.shape)
self.n, self.m = self.dat.shape
self.normalize = 2
self.scale = np.ones(self.m)
self._normalized(normalize)
self._split(int(train * self.n), int((train + valid) * self.n), self.n)
self.scale = torch.from_numpy(self.scale).float()
tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m)
self.scale = self.scale.to(device)
self.scale = Variable(self.scale)
self.rse = normal_std(tmp)
self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)))
self.device = device
def _normalized(self, normalize):
print('_normalized')
# normalized by the maximum value of entire matrix.
if (normalize == 0):
self.dat = self.rawdat
if (normalize == 1):
self.dat = self.rawdat / np.max(self.rawdat)
# normlized by the maximum value of each row(sensor).
if (normalize == 2):
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdat[:, i]))
self.dat[:, i] = self.rawdat[:, i] / np.max(np.abs(self.rawdat[:, i]))
def _split(self, train, valid, test):
print('_split')
#print("type(train): ",type(train))
tarin_start=self.P + self.h - 1
valid_stop=self.n
print("tarin_start: ",tarin_start)
print("train: ",train)
print("valid: ",valid)
print("valid_stop: ",valid_stop)
train_set = range(tarin_start, train)
valid_set = range(train, valid)
test_set = range(valid, valid_stop)
print("train_set: ",train_set)
print("valid_set: ",valid_set)
print("test_set: ",test_set)
self.train = self._batchify(train_set, self.h)
self.valid = self._batchify(valid_set, self.h)
self.test = self._batchify(test_set, self.h)
def _batchify(self, idx_set, horizon):
print('_batchify')
print("idx_set: ", idx_set)
#print("type(idx_set): ", type(idx_set))
n = len(idx_set)
X = torch.zeros((n, self.P, self.m))
Y = torch.zeros((n, self.m))
print("len(idx_set):",len(idx_set))
print("X.shape:",X.shape)
print("Y.shape:",Y.shape)
for i in range(n):
end = idx_set[i] - self.h + 1
start = end - self.P
#print("***********")
X[i, :, :] = torch.from_numpy(self.dat[start:end, :])
Y[i, :] = torch.from_numpy(self.dat[idx_set[i], :])
return [X, Y]
def get_batches(self, inputs, targets, batch_size, shuffle=True):
print('get_batches')
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while (start_idx < length):
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]
Y = targets[excerpt]
X = X.to(self.device)
Y = Y.to(self.device)
yield Variable(X), Variable(Y)
start_idx += batch_size
class DataLoaderM(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
print( 'DataLoaderM __init__')
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
print('shuffle')
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
print('get_iterator')
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
"""Asymmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename):
sensor_ids, sensor_id_to_ind, adj = load_pickle(pkl_filename)
return adj
def load_dataset(dataset_dir, batch_size, valid_batch_size= None, test_batch_size=None):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['train_loader'] = DataLoaderM(data['x_train'], data['y_train'], batch_size)
data['val_loader'] = DataLoaderM(data['x_val'], data['y_val'], valid_batch_size)
data['test_loader'] = DataLoaderM(data['x_test'], data['y_test'], test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def load_node_feature(path):
fi = open(path)
x = []
for li in fi:
li = li.strip()
li = li.split(",")
e = [float(t) for t in li[1:]]
x.append(e)
x = np.array(x)
mean = np.mean(x,axis=0)
std = np.std(x,axis=0)
z = torch.tensor((x-mean)/std,dtype=torch.float)
return z
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
data="C:\\0.ml\\Sampled_inputs.pck"
Sampled_inputs=load(data)
trainData=Sampled_inputs
temptrainData=np.empty([1540,60, 33])
n=len(trainData)
for l in range(0, n):
temp=trainData[l]
#print(temp)
#temp=np.transpose(temp)
temp=temp.T
#print(temp.shape)
#print(temp)
temptrainData[l,:,:]=temp
n=n+1
#np.append(temptrainData, temp)
#print(temptrainData)
#print(temptrainData.shape)
#print(trainData.shape)
trainData=temptrainData
print("Sampled_inputs.shape: ",Sampled_inputs.shape)
print("trainData.shape: ",trainData.shape)
#print(trainData[0])
temp=trainData[0]
rawdat2=trainData[0]
print("rawdat2:",rawdat2.shape)
num_nodes= 33
print("num_nodes: ",num_nodes)
ttt=trainData[0]
rawdat2=ttt
#Sampled_inputs.shape[0]
for i in range(1,3):
temp=np.concatenate( (temp,trainData[1] ), axis=0)
#print(temp.shape)
#rawdat2=temp
print("rawdat2:",rawdat2.shape)
data='C:\\0.ml\\MTGNN\\data\\solar_AL.txt'
#data='C:\\0.ml\\MTGNN\\data\\electricity.txt'
fin = open(data)
rawdat2 = np.loadtxt(fin, delimiter=',')
print("rawdat2.shape: ",rawdat2.shape)
num_nodes= 137
print("num_nodes: ",num_nodes)
save= "C:\0.ml\MTGNN\\model-save-3.pt"
seq_in_len=1 #24*7#window size
horizon=3
batch_size=32#,help='batch size'
num_nodes= 33
device='cpu'
#device='cuda:0'
main()
###Output
__init__111
self.rawdat: (52560, 137)
self.dat.shape: (52560, 137)
_normalized
_split
tarin_start: 3
train: 31536
valid: 42048
valid_stop: 52560
train_set: range(3, 31536)
valid_set: range(31536, 42048)
test_set: range(42048, 52560)
_batchify
idx_set: range(3, 31536)
len(idx_set): 31533
X.shape: torch.Size([31533, 1, 137])
Y.shape: torch.Size([31533, 137])
_batchify
idx_set: range(31536, 42048)
len(idx_set): 10512
X.shape: torch.Size([10512, 1, 137])
Y.shape: torch.Size([10512, 137])
_batchify
idx_set: range(42048, 52560)
len(idx_set): 10512
X.shape: torch.Size([10512, 1, 137])
Y.shape: torch.Size([10512, 137])
The recpetive field size is 187
Number of model parameters is 339345
optim = Optim()
done
begin training
train()
X.shape: torch.Size([31533, 1, 137])
Y.shape: torch.Size([31533, 137])
batch_size: 32
get_batches
iter: 0 | loss: 1.328
-----------------------------------------------------------------------------------------
Exiting from training early
|
SVM/Untitled.ipynb | ###Markdown
AS PURCHASED COLUMN IS ALREADY ENCODED WE DONT HAVE TO USE ENCODER AND IT IS OUR DEPENEDENT VARIABLE .
###Code
X=df[['Age','EstimatedSalary']]
y=df['Purchased']
print(X.head())
print(y.head())
###Output
Age EstimatedSalary
0 19 19000
1 35 20000
2 26 43000
3 27 57000
4 19 76000
0 0
1 0
2 0
3 0
4 0
Name: Purchased, dtype: int64
###Markdown
NOW SPLIT THE DATASET INTO TRAINING SET AND TEST SET
###Code
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=23)
print(X_train.head())
print(X_test.head())
print(y_train.head())
print(y_test.head())
###Output
Age EstimatedSalary
64 59 83000
16 47 25000
77 22 27000
272 60 42000
10 26 80000
Age EstimatedSalary
133 21 68000
331 48 119000
167 35 71000
335 36 54000
239 53 143000
64 0
16 1
77 0
272 1
10 0
Name: Purchased, dtype: int64
133 0
331 1
167 0
335 0
239 1
Name: Purchased, dtype: int64
###Markdown
NOW AS OUR INDEPENDENT VARIABLE MATRIX X IS NOT IN A SCALE .SO WE HAVE TO USE DATA SCALING
###Code
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train_sc=sc.fit_transform(X_train)
X_test_sc=sc.fit_transform(X_test)
X_train_sc,X_test_sc
###Output
C:\Users\HP\Documents\New folder\lib\site-packages\sklearn\preprocessing\data.py:625: DataConversionWarning: Data with input dtype int64 were all converted to float64 by StandardScaler.
return self.partial_fit(X, y)
C:\Users\HP\Documents\New folder\lib\site-packages\sklearn\base.py:462: DataConversionWarning: Data with input dtype int64 were all converted to float64 by StandardScaler.
return self.fit(X, **fit_params).transform(X)
C:\Users\HP\Documents\New folder\lib\site-packages\sklearn\preprocessing\data.py:625: DataConversionWarning: Data with input dtype int64 were all converted to float64 by StandardScaler.
return self.partial_fit(X, y)
C:\Users\HP\Documents\New folder\lib\site-packages\sklearn\base.py:462: DataConversionWarning: Data with input dtype int64 were all converted to float64 by StandardScaler.
return self.fit(X, **fit_params).transform(X)
###Markdown
IMPORT THE LIBRARIES
###Code
from sklearn.svm import SVC
svc=SVC(kernel='rbf',random_state=23)
svc.fit(X_train_sc,y_train)
###Output
_____no_output_____
###Markdown
SCORE OF THE ALGORITHM
###Code
score=100.0* svc.score(X_test_sc,y_test)
print(score)
#you can check by using differrent Kernels(linear and rbf(radial basis function))
#using linear kernel will give accuracy 84.1666 and using rbf kernel will give 90.8333.
#By default it is rbf
###Output
90.83333333333333
###Markdown
CLASSIFICATION REPORT
###Code
from sklearn.metrics import classification_report
labels=['Low','High']
result=svc.predict(X_test_sc)
print(classification_report(y_test,result))
###Output
precision recall f1-score support
0 0.96 0.90 0.93 79
1 0.83 0.93 0.87 41
micro avg 0.91 0.91 0.91 120
macro avg 0.89 0.91 0.90 120
weighted avg 0.91 0.91 0.91 120
###Markdown
PERFORMANCE METRICS
###Code
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
mae=mean_absolute_error(y_test,result)
mse=mean_squared_error(y_test,result)
r2=r2_score(y_test,result)
print(mae)
print(mse)
print(r2)
###Output
0.09166666666666666
0.09166666666666666
0.5924668107440567
|
Course_1-PreLaunch_Preparatory_Content/Module_2-Python_Libraries/2-Pandas/Session4_Operations_on_Dataframes.ipynb | ###Markdown
Operations on PandasThis notebook will cover the following topics: * Filtering dataframes * Single and multiple conditions* Creating new columns* Lambda functions * Group by and aggregate functions* Pivot data* Merging data frames * Joins and concatenations Preparatory steps BackgroundAn FMCG company P&J found that the sales of their best selling items are affected by the weather and rainfall trend. For example, the sale of tea increases when it rains, sunscreen is sold on the days when it is least likely to rain, and the sky is clear. They would like to check whether the weather patterns play a vital role in the sale of certain items. Hence as initial experimentation, they would like you to forecast the weather trend in the upcoming days. The target region for this activity is Australia; accordingly, this exercise will be based on analysing and cleaning the weather data from the Australian region available on public platforms. Read the data into a dataframe
###Code
import pandas as pd
data = pd.read_csv("weatherdata.csv", header =0)
###Output
_____no_output_____
###Markdown
Display the data
###Code
data.head(5)
###Output
_____no_output_____
###Markdown
Data Dictionary 1. Date: The date on which the recording was taken2. Location: The location of the recording3. MinTemp: Minimum temperature on the day of the recording (in C)4. MaxTemp: Maximum temperature in the day of the recording (in C)5. Rainfall: Rainfall in mm6. Evaporation: The so-called Class A pan evaporation (mm) in the 24 hours to 9am7. Sunshine: The number of hours of bright sunshine in the day.8. WindGustDir: The direction of the strongest wind gust in the 24 hours to midnight9. WindGustSpeed: The speed (km/h) of the strongest wind gust in the 24 hours to midnight Example 1.1: Filtering dataframesFind the days which had sunshine for more that 4 hours. These days will have increased sales of sunscreen.
###Code
data["Sunshine"]>4
data[data["Sunshine"]>4]
###Output
_____no_output_____
###Markdown
**Note:** High sunshine corresponds to low rainfall. Example 1.2: Filtering dataframesThe cold drink sales will most likely increase on the days which have high sunshine(>5) and high max temperature(>35). Use the filter operation to filter out these days
###Code
data[(data["MaxTemp"]>35) & (data["Sunshine"]>4)]
###Output
_____no_output_____
###Markdown
**Note:** The construction of the filter condition, it has individual filter conditions separated in parenthesis Example 2.1: Creating new columns If you noticed the filtering done in the earlier examples did not give precise information about the days, the data column simply has the dates. The date column can be split into the year, month and day of the month. **Special module of pandas** The "DatetimeIndex" is a particular module which has the capabilities to extract a day, month and year form the date.
###Code
pd.DatetimeIndex(data["Date"]).year
###Output
_____no_output_____
###Markdown
**Adding New columns** To add a new column in the dataframe just name the column and pass the instructions about the creation of the new column
###Code
data["Year"] = pd.DatetimeIndex(data["Date"]).year
data.head()
data["Month"] = pd.DatetimeIndex(data["Date"]).month
data["Dayofmonth"] = pd.DatetimeIndex(data["Date"]).day
data.head(20)
###Output
_____no_output_____
###Markdown
Example 2.2: Creating new columnsThe temperature given is in Celcius, convert it in Fahrenheit and store it in a new column for it.
###Code
data["Maxtemp_F"] = data["MaxTemp"] * 9/5 +32
data.head()
###Output
_____no_output_____
###Markdown
Example 3.1: Lambda FunctionsLet's create a new column which highlights the days which have rainfall more than 50 mm as rainy days and the rest are not.
###Code
data.Rainfall.apply(lambda x: "Rainy" if x > 50 else "Not rainy")
###Output
_____no_output_____
###Markdown
**Note** 1. New way of accessing a column in a dataframe by using the dot operator.2. "apply" function takes in a lambda operator as argument.
###Code
type(data.Rainfall)
type(data["Rainfall"])
data["is_raining"] = data.Rainfall.apply(lambda x: "Rainy" if x > 50 else "Not rainy")
data[data["is_raining"] == "Rainy"]
###Output
_____no_output_____
###Markdown
In Session questionYou are provided with the dataset of a company which has offices across three cities - Mumbai, Bangalore and New Delhi. The dataset contains the rating (out of 5) of all the employees from different departments (Finance, HR, Marketing and Sales). The company has come up with a new policy that any individual with a rating equal to or below 3.5 needs to attend a training. Using dataframes, load the dataset and then derive the column โTrainingโ which shows โYesโ for people who require training and โNoโ for those who do not.Find the department that has the most efficient team (the team with minimum percentage of employees who need training).
###Code
rating = pd.read_csv('rating.csv')
# Provide your answer below
rating["Training"] = rating.Rating.apply(lambda x: "Yes" if x <= 3.5 else "No")
print(rating.head())
for i in ['Finance', 'HR', 'Sales', 'Marketing']:
print(i, len(rating[(rating['Training'] == 'No') & (rating['Department'] == i)]) / len(rating[rating['Department'] == i]) * 100)
###Output
Finance 50.0
HR 57.25190839694656
Sales 49.23076923076923
Marketing 46.3768115942029
###Markdown
Example 4.1: Grouping and Aggregate functionsFind the location which received the most amount of rain in the given data. In this place, certain promotional offers can be put in place to boost sales of tea, umbrella etc.
###Code
data_by_location = data.groupby(by=["Location"]).mean()
data_by_location.head()
data_by_location.sort_values("Rainfall", ascending=False).head()
###Output
_____no_output_____
###Markdown
Example 4.2: Grouping and Aggregate functionsHot chocolate is the most sold product in the cold months. Find month which is the coldest so that the inventory team can keep the stock of hot chocolate ready well in advance.
###Code
data_by_month = data.groupby(by=["Month"]).mean()
data_by_month.head()
data_by_month.sort_values("MinTemp")
###Output
_____no_output_____
###Markdown
Example 4.3: Grouping and Aggregate functionsSometimes feeling cold is more than about low temperatures; a windy day can also make you cold. A factor called the cill factor can be used to quantify the cold based on the wind speed and the temperature. The formula for the chill factor is given by $ WCI = (10 * \sqrt{v} - v + 10.5) .(33 - T_{m}) $v is the speed of the wind and $ T_{m} $ is the minimum temperatureAdd a column for WCI and find the month with th lowest WCI.
###Code
from math import sqrt
def wci(x):
velocity = x["WindGustSpeed"]
minTemp = x["MinTemp"]
return (10* sqrt(velocity) - velocity + 10.5)*(33 - minTemp)
data["WCI"] = data.apply(wci, axis=1)
data.head()
data_by_month = data.groupby(by=["Month"]).mean()
data_by_month.sort_values("WCI", ascending=False)
###Output
_____no_output_____
###Markdown
Dataframe groupingGroup the dataframe 'df' by 'month' and 'day' and find the mean value for column 'rain' and 'wind'.
###Code
df = pd.read_csv("forestfires.csv")
df_1 = df[["month", "day", "rain", "wind"]]
df_1 = df_1.groupby(by=["month", "day"]).mean()
df_1.head(20)
###Output
_____no_output_____
###Markdown
Example 5.1: Merging DataframesThe join command is used to combine dataframes. Unlike hstack and vstack, the join command works by using a key to combine to dataframes. For example the total tea for the Newcastle store for the month of June 2011 is given in the file names ```junesales.csv``` Read in the data from the file and join it to the weather data exracted from the original dataframe.
###Code
sales = pd.read_csv("junesales.csv", header = 0)
sales["Dayofmonth"] = pd.DatetimeIndex(sales["Date"]).day
sales.head()
# Filter the sales data for the relevant month and the appropriate location to a new dataframe.
Newcastle_data = data[(data['Location']=='Newcastle') & (data['Year']==2011) & (data['Month']==6)]
Newcastle_data.head()
merge_data = Newcastle_data.merge(sales, on = "Dayofmonth")
merge_data.head(30)
###Output
_____no_output_____
###Markdown
Example 5.2: Merging Dataframes Types of joins. * INNER JOIN* LEFT JOIN* RIGHT JOIN* FULL JOINEach state may have different tax laws, so we might want to add the states information to the data as well.The file ```locationsandstates.csv``` information about the states and location, the data in this file is **not** same as the weather data. It is possible that few locations in "data" (original dataframe) are not in this file, and all the locations in the file might not be in the original dataframe. In the original dataframe add the state data.
###Code
state = pd.read_csv("locationsandstates.csv", header = 0)
state
state_data = data.merge(state, on = "Location", how = "left")
state_data
###Output
_____no_output_____
###Markdown
Dataframes MergePerform an inner merge on two data frames df_1 and df_2 on 'unique_id' and print the combined dataframe.
###Code
df_1 = pd.read_csv('restaurant-1.csv')
df_2 = pd.read_csv('restaurant-2.csv')
df_3 = df_1.merge(df_2, how="inner", on="unique_id")
print(df_3.head(20))
print(df_1.columns)
print(df_2.columns)
df_3 = pd.concat([df_1, df_2])
df_3.head()
###Output
Index(['name', 'address', 'city', 'cuisine', 'unique_id'], dtype='object')
Index(['name_2', 'address_2', 'city_2', 'cuisine_2', 'unique_id'], dtype='object')
###Markdown
Given three data frames containing the number of gold, silver, and bronze Olympic medals won by some countries, determine the total number of medals won by each country. Note: All the three data frames donโt have all the same countries. So, ensure you use the โfill_valueโ argument (set it to zero), to avoid getting NaN values. Also, ensure you sort the final dataframe, according to the total medal count in descending order.
###Code
gold = pd.DataFrame({'Country': ['USA', 'France', 'Russia'],
'Medals': [15, 13, 9]}
)
silver = pd.DataFrame({'Country': ['USA', 'Germany', 'Russia'],
'Medals': [29, 20, 16]}
)
bronze = pd.DataFrame({'Country': ['France', 'USA', 'UK'],
'Medals': [40, 28, 27]}
)
final_set = pd.concat([gold, silver, bronze])
result = final_set.groupby(['Country']).agg('sum').sort_values(['Medals'], ascending=False)
result
###Output
_____no_output_____
###Markdown
Example 6.1: pivot tablesUsing pivot tables find the average monthly rainfall in the year 2016 of all the locations. The information can then be used to predict the sales of tea in the year 2017.
###Code
data_2016 = data[data["Year"] ==2016]
data_2016
data_2016.pivot_table(index = "Location", columns = "Month", values = "Rainfall", aggfunc='mean')
###Output
_____no_output_____
###Markdown
Find the Pandas pivot table documentation [here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot_table.html)This information can be used to decide the stocks of tea in each of the stores. You can modify the pivot_table command to get a lot of work done quickly.
###Code
data_2016.pivot_table(index = "Location", columns = "Month", values = "Sunshine", aggfunc='mean')
###Output
_____no_output_____
###Markdown
Note[Here](https://pandas.pydata.org/pandas-docs/stable/index.html) is the link to the official documentation of Pandas. Be sure to visit it inorder to explore to availability of functions in the library. Dataframe Pivot TableGroup the data 'df' by 'month' and 'day' and find the mean value for column 'rain' and 'wind' using the pivot table command.
###Code
df = pd.read_csv("forestfires.csv")
df_1 = df.pivot_table(index = ['month','day'],
values = ['rain','wind'],
aggfunc = 'mean')
df_1.head(20)
###Output
_____no_output_____ |
Exercises/RNN, LSTM, GRU/RNN, LSTM, GRU.ipynb | ###Markdown
Recurrent Neural NetworkThis notebook was created by Camille-Amaury JUGE, in order to better understand RNN, LSTM, GRU principles and how they work.(it follows the exercices proposed by Hadelin de Ponteves on Udemy : https://www.udemy.com/course/le-deep-learning-de-a-a-z/) Imports
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 16, 10
# keras
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM, SimpleRNN, GRU
# scikit learn
from sklearn.preprocessing import MinMaxScaler
###Output
_____no_output_____
###Markdown
preprocessingThe dataset represents the google's stock price over the past 5 years. It shows the basic features of the stocks.The train set is only compunded of 1 month.Our aim is to make prediction over the trends that the google's stock could have in the futur.
###Code
df_train = pd.read_csv("Google_Stock_Price_Train.csv")
df_train.head()
df_test = pd.read_csv("Google_Stock_Price_Test.csv")
df_test.head()
df_train.isna().sum()
df_test.isna().sum()
df_train.dtypes
df_test.dtypes
df_train["Close"] = np.array([i.replace(",","") for i in df_train["Close"]]).astype(float)
df_train["Volume"] = np.array([i.replace(",","") for i in df_train["Volume"]]).astype(float)
df_test["Volume"] = np.array([i.replace(",","") for i in df_test["Volume"]]).astype(float)
df_train.dtypes
df_test.dtypes
df_train.describe().transpose().round(2)
df_test.describe().transpose().round(2)
###Output
_____no_output_____
###Markdown
Data are quite centered around the mean (regarding median ~ mean), it seems that there are no outliers then we can pre-suppose that the evolution is more or less linear.
###Code
plt.title("Opening/Closing Stock Price")
plt.xlabel("Date")
plt.ylabel("$ stock price")
plt.xticks(np.arange(0, len(df_train["Date"]), 200))
plt.plot()
plt.plot(df_train["Date"], df_train["Open"], color='blue', linewidth=4, label="Open")
plt.plot(df_train["Date"], df_train["Close"], color='red', linewidth=2, label="Close")
plt.legend()
plt.title("High/Low Stock Price")
plt.xlabel("Date")
plt.ylabel("$ stock price")
plt.xticks(np.arange(0, len(df_train["Date"]), 200))
plt.plot(df_train["Date"], df_train["Low"], color='blue', linewidth=4, label="Low")
plt.plot(df_train["Date"], df_train["High"], color='red', linewidth=2, label="High")
plt.legend()
###Output
_____no_output_____
###Markdown
We cn observe a strange behavior on the data regarding to the close price. It seems that the data are not accurate until 23-5-2014we are going to scale the feature.
###Code
X_train = df_train[["Open"]].values
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_train_scaled
###Output
_____no_output_____
###Markdown
We are going to create timesteps in order to give a row the last days values : here we will choose two month (~60 days)
###Code
_days_range = 60
X_train = []
y_train = []
for i in range(_days_range, X_train_scaled.shape[0]):
X_train.append(X_train_scaled[i-_days_range:i, 0])
y_train.append(X_train_scaled[i, 0])
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_train[0]
df = pd.concat([df_train["Open"], df_test["Open"]], axis = 0)
len(df)
X_test = df[df.shape[0]-df_test.shape[0]-_days_range:].values
X_test = X_test.reshape(-1,1)
X_test_scaled = scaler.transform(X_test)
X_test = []
for i in range(_days_range, X_test_scaled.shape[0]):
X_test.append(X_test_scaled[i-_days_range:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
X_test.shape
X_test[19]
###Output
_____no_output_____
###Markdown
Model LSTM
###Code
def lstm_model(input_shape):
model = Sequential()
model.add(LSTM(units=256, return_sequences=True, input_shape=input_shape))
model.add(Dropout(0.3))
model.add(LSTM(units=256, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=128, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=128, return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(units=128))
model.add(Dense(units=64))
model.add(Dense(units=1))
model.compile(optimizer="adam", loss="mean_squared_error")
return model
model = lstm_model((X_train.shape[1],1))
model.fit(X_train, y_train, epochs=50, batch_size = 32)
y_pred = model.predict(X_test)
y_pred_real = scaler.inverse_transform(y_pred)
plt.title("Predicted/True Stock Price")
plt.xlabel("Date")
plt.ylabel("$ stock price")
plt.plot()
plt.plot(df_test["Open"], color='blue', linewidth=4, label="True")
plt.plot(y_pred_real, color='red', linewidth=2, label="Predicted")
plt.legend()
###Output
_____no_output_____
###Markdown
GRU
###Code
def gru_model(input_shape):
model = Sequential()
model.add(GRU(units=256, return_sequences=True, input_shape=input_shape))
model.add(Dropout(0.3))
model.add(GRU(units=256, return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(units=128, return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(units=128, return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(units=128))
model.add(Dense(units=64))
model.add(Dense(units=1))
model.compile(optimizer="adam", loss="mean_squared_error", metrics=["mean_squared_error"])
return model
model = gru_model((X_train.shape[1],1))
model.fit(X_train, y_train, epochs=50, batch_size = 32)
y_pred = model.predict(X_test)
y_pred_real = scaler.inverse_transform(y_pred)
plt.title("Predicted/True Stock Price GRU")
plt.xlabel("Date")
plt.ylabel("$ stock price")
plt.plot()
plt.plot(df_test["Open"], color='blue', linewidth=4, label="True")
plt.plot(y_pred_real, color='red', linewidth=2, label="Predicted")
plt.legend()
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.