markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
`**d` represents any number of keyword parameters | def dconcat(sep = ":", **dic):
for k in dic.keys():
print("{}{}{}".format(k, sep, dic[k]))
dconcat(hello = "world", python = "rocks", sep = "~") | hello~world
python~rocks
| MIT | Notebooks/Arguments-and-Unpacking.ipynb | gtavasoli/PyTips |
UnpackingThe new feature [PEP 448](https://www.python.org/dev/peps/pep-0448/) added in **Python 3.5** allows `*a` and `**d` to be used outside of function parameters: | print(*range(5))
lst = [0, 1, 2, 3]
print(*lst)
a = *range(3), # The comma here cannot be omitted
print(a)
d = {"hello": "world", "python": "rocks"}
print({**d}["python"])
print(*d)
print([*d][0]) | 0 1 2 3 4
0 1 2 3
(0, 1, 2)
rocks
hello python
hello
| MIT | Notebooks/Arguments-and-Unpacking.ipynb | gtavasoli/PyTips |
The so-called unpacking (Unpacking) can actually be regarded as removing the tuple of `()` or removing the dictionary of `{}`. This syntax also provides a more Pythonic way to merge dictionaries: | user = {'name': "Trey", 'website': "http://treyhunner.com"}
defaults = {'name': "Anonymous User", 'page_name': "Profile Page"}
print({**defaults, **user}) | {'name': 'Trey', 'page_name': 'Profile Page', 'website': 'http://treyhunner.com'}
| MIT | Notebooks/Arguments-and-Unpacking.ipynb | gtavasoli/PyTips |
Using this unpacking method when calling a function is also available in **Python 2.7**: | print(concat(*"ILovePython")) | I/L/o/v/e/P/y/t/h/o/n
| MIT | Notebooks/Arguments-and-Unpacking.ipynb | gtavasoli/PyTips |
Average speed of earth is 29.93 km/s, so looks good. | vr0 = earth_diff['r'] / (24*3600) * km
vr0
vtheta0 = earth_diff['theta']
vtheta0
vphi0 = earth_diff['phi']
vphi0 | _____no_output_____ | MIT | code/notebooks/eph-earth-velocity.ipynb | GandalfSaxe/letomes |
Librairies | from typing import List, Union, Tuple, Callable, Dict
from os import environ
from random import seed
from numpy.random import seed as np_seed
from numpy import ndarray, zeros
from pandas import DataFrame, read_csv
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import random
from tensorflow import keras
from tensorflow.keras import applications
from tensorflow import data
import plotly.graph_objects as go
import wandb
from wandb.keras import WandbCallback
from kaggle_secrets import UserSecretsClient | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
WandB | user_secrets = UserSecretsClient()
api_key = user_secrets.get_secret("WANDB")
wandb.login(key=api_key)
run = wandb.init(
project="pet_finder",
entity="leopoldavezac",
config={
'learning_rate':0.001,
'epochs':20,
'batch_size':24,
'loss_func':'mse',
'img_width':224,
'img_length':224,
'efficient_net_symbol':'B0',
'efficient_net_trainable':False,
'dense_layers_post_efficient_net':[18, 9],
'dropout':0.2,
'data_augmentation_contrast':0.1,
}
) | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
Constants | DATA_PATH = '../input/petfinder-pawpularity-score'
ID_VAR_NM = 'Id'
TARGET_VAR_NM = 'Pawpularity'
AUTOTUNE = tf.data.experimental.AUTOTUNE
CONFIG = wandb.config | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
Load & Preprocess Data |
def get_datasets() -> List[tf.data.Dataset]:
df_train = load_df(set_nm='train')
df_test = load_df(set_nm='test')
df_train[TARGET_VAR_NM] /= 100
df_train = create_img_path_var(df_train, 'train')
df_test = create_img_path_var(df_test, 'test')
df_train, df_val = split(df_train)
ds_train = create_dataset_with_preprocessed_imgs(
df_train.img_path.values,
df_train[TARGET_VAR_NM].values.astype('float'),
augment=True
)
ds_val = create_dataset_with_preprocessed_imgs(
df_val.img_path.values,
df_val[TARGET_VAR_NM].values.astype('float')
)
ds_test = create_dataset_with_preprocessed_imgs(
df_test.img_path.values
)
return [ds_train, ds_val, ds_test]
def load_df(set_nm:str) -> DataFrame:
var_nms = [ID_VAR_NM]
var_nms += [TARGET_VAR_NM] if set_nm == 'train' else []
return read_csv('{}/{}.csv'.format(DATA_PATH, set_nm), usecols=var_nms)
def create_img_path_var(df: DataFrame, set_nm:str) -> DataFrame:
df['img_path'] = '{}/{}/'.format(DATA_PATH, set_nm) + df[ID_VAR_NM] + '.jpg'
df.drop(columns=ID_VAR_NM, inplace=True)
return df
def split(df: DataFrame) -> List[DataFrame]:
train, val = train_test_split(df.values, test_size=0.2)
df_train = DataFrame(train, columns=df.columns)
df_val = DataFrame(val, columns=df.columns)
return [df_train, df_val]
def create_dataset_with_preprocessed_imgs(X_paths: ndarray, y: Union[None, ndarray] = None, augment:bool=False) -> data.Dataset:
get_preprocessed_img = build_img_processor(y is not None)
if y is not None:
ds = data.Dataset.from_tensor_slices((X_paths, y))
else:
ds = data.Dataset.from_tensor_slices((X_paths,))
ds = ds.map(get_preprocessed_img, num_parallel_calls=AUTOTUNE)
if augment:
augmentation_model = get_augmentation_model()
ds = ds.map(lambda X, y: (augmentation_model(X, training=True), y))
ds = ds.batch(CONFIG.batch_size)
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
def build_img_processor(with_target: bool) -> Callable:
def get_preprocessed_img(path: str) -> tf.Tensor:
img = load_img(path)
img = resize(img)
img = eff_net_preprocess(img)
return img
def get_preprocessed_img_with_target(path:str, y:float) -> Tuple[Union[tf.Tensor, float]]:
return (get_preprocessed_img(path), y)
return get_preprocessed_img_with_target if with_target else get_preprocessed_img
def load_img(path: str) -> tf.Tensor:
img = tf.io.read_file(path)
return tf.io.decode_jpeg(img)
def resize(img: tf.Tensor) -> tf.Tensor:
return tf.cast(
tf.image.resize_with_pad(img, CONFIG.img_length, CONFIG.img_width),
dtype=tf.int32
)
def eff_net_preprocess(img: tf.Tensor) -> tf.Tensor:
return keras.applications.efficientnet.preprocess_input(img)
def normalize(img: tf.Tensor) -> tf.Tensor:
return img / 255.0
def get_augmentation_model() -> tf.keras.Model:
return tf.keras.Sequential([
layers.RandomFlip("horizontal"),
layers.RandomRotation(CONFIG.data_augmentation_contrast),
])
ds_train, ds_val, ds_test = get_datasets() | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
Img Dims Visualization | img_paths = ('{}/{}/'.format(DATA_PATH, 'train') + load_df('train')[ID_VAR_NM] + '.jpg').values
img_dims = zeros((len(img_paths), 2))
for i, img_path in enumerate(img_paths):
img_dims[i,:] = load_img(img_path).shape[:-1]
fig = go.Figure()
fig.add_trace(go.Histogram(x=img_dims[:,0], histnorm='probability', name='width'))
fig.add_trace(go.Histogram(x=img_dims[:,1], histnorm='probability', name='height'))
fig.update_layout(title_text='Distribution of Img Width and Height')
fig.show() | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
Model | def get_model(efficient_net_model_nm:str, dense_layers_post_eff_net:List[int], dropout: float) -> tf.keras.Model:
efficient_net = tf.keras.models.load_model('../input/keras-applications-models/{efficient_net_model_nm}.h5')
if CONFIG.efficient_net_trainable:
unfreeze_layers(efficient_net)
layers = [
tf.keras.layers.Input(shape=(CONFIG.img_length, CONFIG.img_width, 3)),
efficient_net,
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dropout(dropout)
]
layers += [tf.keras.layers.Dense(nb_units) for nb_units in dense_layers_post_eff_net]
layers += [tf.keras.layers.Dense(1, activation='sigmoid')]
model = keras.models.Sequential(layers)
print(model.summary())
return model
def unfreez_layers(model: tf.keras.Model) -> None:
for layer in model.layers:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
else:
layer.trainable = False
def compile_model(model: keras.Model, learning_rate: float, loss_func:str) -> None:
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss=loss_func, optimizer=optimizer, metrics=[keras.metrics.RootMeanSquaredError()])
def fit(model: keras.Model, ds_train: data.Dataset, ds_val: data.Dataset, epochs: int) -> None:
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
model.fit(ds_train, epochs=epochs, validation_data=ds_val, callbacks=[WandbCallback(), early_stopping])
model = get_model(CONFIG.efficient_net_symbol, CONFIG.dense_layers_post_efficient_net, CONFIG.dropout)
compile_model(model, CONFIG.learning_rate, CONFIG.loss_func)
fit(model, ds_train, ds_val, CONFIG.epochs)
run.finish() | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
Submissions |
def save_test_pred(pred: ndarray) -> None:
df_test = load_df_test()
df_test[TARGET_VAR_NM] = pred
df_test[[ID_VAR_NM, TARGET_VAR_NM]].to_csv('submission.csv', index=False)
def load_df_test() -> DataFrame:
return read_csv('{}/test.csv'.format(DATA_PATH), usecols=[ID_VAR_NM])
test_pred = model.predict(ds_test)
test_pred *= 100
save_test_pred(test_pred) | _____no_output_____ | MIT | pet-finder.ipynb | leopoldavezac/PetFinder |
๋น
๋ฐ์ดํฐ ๋ถ์ ๊ธฐ์ฌ ์ค๊ธฐ ์ํ ์์ ํ์ด 1. ์์
ํ ์ 1์ ํ : ๋ฐ์ดํฐ ์ฒ๋ฆฌ ์์ญ* mtcars ๋ฐ์ดํฐ์
(mtcars.csv)์ qsec ์ปฌ๋ผ์ ์ต์์ต๋ ์ฒ๋(Min-Max Scale)๋ก ๋ณํํ ํ 0.5๋ณด๋ค ํฐ ๊ฐ์ ๊ฐ์ง๋ ๋ ์ฝ๋ ์๋ฅผ ๊ตฌํ์์ค. | import pandas as pd # pandas import
df = pd.read_csv('mtcars.csv') # df์ mtcars.csv๋ฅผ ์ฝ์ด ๋ฐ์ดํฐํ๋ ์์ผ๋ก ์ ์ฅ
df.head() # df์ ์์์ 5๊ฐ ๋ฐ์ดํฐ ์ถ๋ ฅ | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ๋ฐฉ๋ฒ 1 sklearn์ MinMaxScaler๋ฅผ ์ด์ฉํด์ ๋ณํ | from sklearn.preprocessing import MinMaxScaler # sklearn์ min-max scaler import
scaler = MinMaxScaler() # MinMaxScaler ๊ฐ์ฒด ์์ฑ
# scaler์ ๋ฐ์ดํฐ๋ฅผ ๋ฃ์ด์ ๋ชจ๋ธ์ ๋ง๋ค๊ณ ๊ฐ์ ๋ฎ์ด์จ์
df['qsec'] = scaler.fit_transform(df[['qsec']])
# qsec๊ฐ 0.5๋ณด๋ค ํฐ ๋ฐ์ดํฐ๋ง ์์ธํด์ ๊ธธ์ด๋ฅผ ๊ตฌํจ
answer = len(df[df['qsec']>0.5])
print(answer) | 9
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ๋ฐฉ๋ฒ 2 Min-Max Scale์ ์์์ผ๋ก ๋ง๋ค์ด์ ๋ณํ์ํด | df['qsec'] = (df['qsec'] - df['qsec'].min()) / (df['qsec'].max() - df['qsec'].min())
answer = len(df[df['qsec']>0.5])
print(answer) | 9
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
2. ์์
ํ ์ 2์ ํ : ๋ชจํ ๊ตฌ์ถ ๋ฐ ํ๊ฐ ์์ญ* ์๋๋ ๋ฐฑํ์ ๊ณ ๊ฐ์ 1๋
๊ฐ ๊ตฌ๋งค ๋ฐ์ดํฐ์ด๋ค. ๊ฒฐ๊ณผ : X_test๋ฐ์ดํฐ๋ก ๋จ์์ผ ํ๋ฅ ์ ๊ตฌํด์ cust_id์ ๋จ์์ผ ํ๋ฅ ๋ง ๊ฐ์ง csv๋ก ์์ฑ ํ๊ฐ์งํ : ROC-AUC Curve ํ์ธ ์ฌํญ* ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ* Feature Engineering* ๋ถ๋ฅ ๋ชจ๋ธ* ์ต์ ํ* ์์๋ธ 1. EDA | import pandas as pd
import numpy as np
X_train = pd.read_csv('X_train.csv', encoding='euc-kr') # ํ๊ธ์ด ์์ด ์ธ์ฝ๋ฉํ์ฌ ์ฝ๋ฏ
y_train = pd.read_csv('y_train.csv')
X_test = pd.read_csv('X_test.csv', encoding='euc-kr') # ํ๊ธ์ด ์์ด ์ธ์ฝ๋ฉํ์ฌ ์ฝ๋ฏ
display(X_train.head())
display(y_train.head())
display(X_test.head())
train_data = X_train.merge(y_train, on='cust_id', how='outer') # X์ y๋ฅผ ํฉ์นจ
train_data.head() | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์นผ๋ผ ๋ณ ๋ฐ์ดํฐ ํ์
๊ณผ ํํ ํ์ธ * ํ๋ถ๊ธ์ก์ ๊ฒฐ์ธก์น ์์ * ์ฃผ๊ตฌ๋งค์ํ๊ณผ ์ฃผ๊ตฌ๋งค์ง์ ๋ง ๋ฒ์ฃผํ ๋ฐ์ดํฐ : ํ์์ one-hot encoding | train_data.info() | <class 'pandas.core.frame.DataFrame'>
Int64Index: 3500 entries, 0 to 3499
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 cust_id 3500 non-null int64
1 ์ด๊ตฌ๋งค์ก 3500 non-null int64
2 ์ต๋๊ตฌ๋งค์ก 3500 non-null int64
3 ํ๋ถ๊ธ์ก 1205 non-null float64
4 ์ฃผ๊ตฌ๋งค์ํ 3500 non-null object
5 ์ฃผ๊ตฌ๋งค์ง์ 3500 non-null object
6 ๋ด์ ์ผ์ 3500 non-null int64
7 ๋ด์ ๋น๊ตฌ๋งค๊ฑด์ 3500 non-null float64
8 ์ฃผ๋ง๋ฐฉ๋ฌธ๋น์จ 3500 non-null float64
9 ๊ตฌ๋งค์ฃผ๊ธฐ 3500 non-null int64
10 gender 3500 non-null int64
dtypes: float64(3), int64(6), object(2)
memory usage: 328.1+ KB
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ๊ฒฐ์ธก์น ํ์ธ * ํ๋ถ๊ธ์ก์ ๊ฒฐ์ธก์น๊ฐ ๋๋ถ๋ถ(2295 / 3500) | train_data.isnull().sum() | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ํ
์คํธ ๋ฐ์ดํฐ์ ํ๋ถ๊ธ์ก๋ ๊ฒฐ์ธก์น๊ฐ ๋ง์ | X_test.isnull().sum() | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์ซ์ํ ๋ฐ์ดํฐ์ ๋ถํฌ ํ์ธ * ์ปฌ๋ผ๋ง๋ค์ ์ค์ผ์ผ ์ฐจ์ด๊ฐ ์ปค์ ๋ณํ ํ์ | train_data.describe() | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ๋ฌธ์ ๋ฐ์ดํฐ ๋ถํฌ ํ์ธ | train_data.describe(include=[object])
display(train_data['์ฃผ๊ตฌ๋งค์ํ'].unique(), len(train_data['์ฃผ๊ตฌ๋งค์ํ'].unique()))
display(X_test['์ฃผ๊ตฌ๋งค์ํ'].unique(), len(X_test['์ฃผ๊ตฌ๋งค์ํ'].unique()))
display(train_data['์ฃผ๊ตฌ๋งค์ง์ '].unique(), len(train_data['์ฃผ๊ตฌ๋งค์ง์ '].unique()))
display(X_test['์ฃผ๊ตฌ๋งค์ง์ '].unique(), len(X_test['์ฃผ๊ตฌ๋งค์ง์ '].unique())) | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์ฃผ๊ตฌ๋งค์ํ์์ ์ฐจ์ด๋๋ ์ํ ํ์ธ | for i in train_data['์ฃผ๊ตฌ๋งค์ํ'].unique():
if i not in X_test['์ฃผ๊ตฌ๋งค์ํ'].unique():
print(i) | ์ํ๊ฐ์
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* train_data์ ์ํ ๊ฐ์ ๋ฐ์ดํฐ ํ์ธ | train_data[train_data['์ฃผ๊ตฌ๋งค์ํ']=='์ํ๊ฐ์ ']
2/3500 # 0.05% ์ํ๊ฐ์ ๋ฐ์ดํฐ | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์ด๊ตฌ๋งค์ก๊ณผ ์ต๋๊ตฌ๋งค์ก์ ์์๊ฐ ํ์ธ๋์ด ํด๋น ๋ฐ์ดํฐ ์ถ๋ ฅ * ๋ชจ๋ ์ฌ์์ ๊ฒฝ์ฐ๋ก ํ์ธ๋จ * ํ๋ถ ๊ธ์ก์ด ์ต๋๊ตฌ๋งค์ก๋ณด๋ค ๋ง์ ๊ฒ์ ์๋ฏธ? * ์ต๋๊ตฌ๋งค์ก์ด ์์๋ ๋ญ๊ฐ? | train_data[(train_data['์ด๊ตฌ๋งค์ก']<=0) | (train_data['์ต๋๊ตฌ๋งค์ก']<=0)] | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ํ
์คํธ ๋ฐ์ดํฐ๋ ํ์ธ * 0๊ณผ ์์์ ๊ฐ์ ์ด๋ป๊ฒ ์ฒ๋ฆฌํ ๊ฒ์ธ๊ฐ? | X_test[(X_test['์ด๊ตฌ๋งค์ก']<=0) | (X_test['์ต๋๊ตฌ๋งค์ก']<=0)] | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* train_data์์ gender์ ๋ฐ๋ฅธ ๊ฐ๋ค ๋น๊ต | display(train_data.groupby('gender').min())
display(train_data.groupby('gender').mean())
display(train_data.groupby('gender').max()) | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* train_data๋ผ๋ฆฌ์ ์๊ด๊ณ์ ํ์ธ * gender์ ์๊ด๊ด๊ณ๊ฐ ๋์ ๋ฐ์ดํฐ๊ฐ ์์ | train_data.corr() | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์ฑ๋ณ์ ๋ฐ๋ฅธ ํน์ด์ ์ด ๋ณด์ด์ง ์์* ์ ์ฒด ๋ฐ์ดํฐ์ ์ฑ๋ณ ๋ฐ์ดํฐ ์ ํ์ธ | print('์ ์ฒด ๋ฐ์ดํฐ :', len(train_data))
print('์ฌ์ ๋ฐ์ดํฐ ์ :', len(train_data[train_data['gender']==0]))
print('๋จ์ ๋ฐ์ดํฐ ์ :', len(train_data[train_data['gender']==1]))
print('๋ฐ์ดํฐ์์ ๋จ์์ผ ํ๋ฅ : {}%'.format(round((len(train_data[train_data['gender']==1]) / len(train_data))*100, 1))) | ์ ์ฒด ๋ฐ์ดํฐ : 3500
์ฌ์ ๋ฐ์ดํฐ ์ : 2184
๋จ์ ๋ฐ์ดํฐ ์ : 1316
๋ฐ์ดํฐ์์ ๋จ์์ผ ํ๋ฅ : 37.6%
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
2. ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ * ํ๋ถ๊ธ์ก์ ๊ฒฐ์ธก์น๋ฅผ 0์ผ๋ก ๋์ฒด | train_data['ํ๋ถ๊ธ์ก'].fillna(0, inplace=True)
X_test['ํ๋ถ๊ธ์ก'].fillna(0, inplace=True)
display(train_data.isnull().sum())
display(X_test.isnull().sum()) | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ์ฃผ๊ตฌ๋งค์ํ์ด ์ํ๊ฐ์ ์ธ ๋ฐ์ดํฐ ์ญ์ | train_data.drop(train_data[train_data['์ฃผ๊ตฌ๋งค์ํ']=='์ํ๊ฐ์ '].index, inplace=True)
train_data[train_data['์ฃผ๊ตฌ๋งค์ํ']=='์ํ๊ฐ์ '] | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
3. Feature Engineering * ์ฐ์ํ ๋ฐ์ดํฐ : ํ์ค ์ ๊ท ๋ถํฌ๋ก ๋ณํ * ๋ด์ ๋น๊ตฌ๋งค๊ฑด์, ๊ตฌ๋งค์ฃผ๊ธฐ๋ ๊ฒฐ๊ณผ๋ฅผ ๋ณด๊ณ ์ญ์ ๋ ๊ฒํ (์๊ด๊ณ์ ๋ฎ์) | train_data.head()
conti_cols = ['์ด๊ตฌ๋งค์ก', '์ต๋๊ตฌ๋งค์ก', 'ํ๋ถ๊ธ์ก','๋ด์ ์ผ์', '๋ด์ ๋น๊ตฌ๋งค๊ฑด์', '์ฃผ๋ง๋ฐฉ๋ฌธ๋น์จ', '๊ตฌ๋งค์ฃผ๊ธฐ']
# ํ์ค ์ ๊ท ๋ถํ scaler ์ฌ์ฉํ์ฌ train๋ฐ์ดํฐ์์ ๋ง๋ค scaler๋ฅผ test์ ๋์ผํ๊ฒ ์ ์ฉํ์ฌ ๋ณํ
from sklearn.preprocessing import StandardScaler
for col in conti_cols:
scaler = StandardScaler()
scaler.fit(train_data[[col]])
train_data[col] = scaler.transform(train_data[[col]])
X_test[col] = scaler.transform(X_test[[col]])
display(train_data.describe())
display(X_test.describe()) | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
* ๋ฒ์ฃผํ ๋ฐ์ดํฐ ์ํซ์ธ์ฝ๋ฉ | categorical_cols = ['์ฃผ๊ตฌ๋งค์ํ', '์ฃผ๊ตฌ๋งค์ง์ ']
for col in categorical_cols:
temp = pd.get_dummies(train_data[col])
train_data = pd.concat([train_data, temp], axis=1)
del train_data[col]
temp = pd.get_dummies(X_test[col])
X_test = pd.concat([X_test, temp], axis=1)
del X_test[col]
display(train_data.head())
display(X_test.head()) | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
4. ๋ถ๋ฅ ์๊ณ ๋ฆฌ์ฆ | from sklearn.ensemble import RandomForestClassifier # ๋๋คํฌ๋ ์คํธ
from sklearn.linear_model import LogisticRegression # ๋ก์ง์คํฑํ๊ท
from sklearn.metrics import roc_auc_score # ์์ ์ ํ๊ฐ์งํ์ธ ROC_AUC score
x_cols = list(train_data.columns)
x_cols.remove('cust_id')
x_cols.remove('gender')
X = train_data[x_cols]
y = train_data['gender']
test_x = X_test[x_cols]
# ๋๋คํฌ๋ ์คํธ ๋ชจ๋ธ ์์ฑ/ํ์ต
model_rf = RandomForestClassifier(n_estimators=100, max_leaf_nodes=32)
model_rf.fit(X, y)
pred_rf = model_rf.predict_proba(X)
print('RF ROCAUC Score: ', roc_auc_score(y, pred_rf[:,1]))
# ๋ก์ง์คํฑ ํ๊ท ๋ชจ๋ธ ์์ฑ/ํ์ต
model_lr = LogisticRegression()
model_lr.fit(X, y)
pred_lr = model_lr.predict_proba(X)
print('LR ROCAUC Score: ', roc_auc_score(y, pred_lr[:,1])) | RF ROCAUC Score: 0.7546568802481672
LR ROCAUC Score: 0.6955802615788438
| MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
5. ๊ฒฐ๊ณผ ์ ์ถ | # ROC_AUC score๊ฐ ๋์ ๋๋คํด์คํธ๋ก ํ
์คํธ ๋ฐ์ดํฐ๋ก ๊ฒฐ๊ณผ ์์ธก๊ฐ ์ ๋ฆฌ
pred_result = pd.DataFrame(model_rf.predict_proba(test_x))
result = pd.concat([X_test['cust_id'], pred_result[[1]]], axis=1)
result.columns = [['cust_id', 'gender']] # ์์ธก๊ฒฐ๊ณผ์ ์ปฌ๋ผ์ด 1์ด๋ฏ๋ก ๋ฌธ์ ์ ๋์ผํ๊ฒ ์ปฌ๋ผ ๋ณ๊ฒฝ
result
result.to_csv('result.csv', index=False) # ๋ณ๋์ ์ธ๋ฑ์ค ์์ด csv๋ก ์ ์ฅ | _____no_output_____ | MIT | ๋น
๋ถ๊ธฐ ์ค๊ธฐ ์์ ํ์ด.ipynb | kamzzang/ADPStudy |
A K-means clustering project The purpose of this exercise is to implement the K-means clustering module of pyspark and find how many hacker groups were involved in the data brach of a certain technology firm. The forensic engineers of the firm were able to collect some meta data of each brach. The firm suspects that there might be two or three hacker group. But they are sure that each group attacked the same number of times, i.e. the attack from the hacker groups was 50 - 50. | import findspark
findspark.init('/home/yohannes/spark-2.4.7-bin-hadoop2.7')
%config Completer.use_jedi = False
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('Kmeans').getOrCreate()
data = spark.read.csv('hack_data.csv',header=True,inferSchema=True) | _____no_output_____ | MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
Data exploration | data.printSchema()
data.head(1) | _____no_output_____ | MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
Creating features vector | from pyspark.ml.feature import VectorAssembler
data.columns
assembler = VectorAssembler(inputCols=['Session_Connection_Time',
'Bytes Transferred',
'Kali_Trace_Used',
'Servers_Corrupted',
'Pages_Corrupted',
'WPM_Typing_Speed'], outputCol='features')
final_data = assembler.transform(data)
final_data.printSchema() | root
|-- Session_Connection_Time: double (nullable = true)
|-- Bytes Transferred: double (nullable = true)
|-- Kali_Trace_Used: integer (nullable = true)
|-- Servers_Corrupted: double (nullable = true)
|-- Pages_Corrupted: double (nullable = true)
|-- Location: string (nullable = true)
|-- WPM_Typing_Speed: double (nullable = true)
|-- features: vector (nullable = true)
| MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
Scaling the data | from pyspark.ml.feature import StandardScaler
scaler = StandardScaler(inputCol='features',outputCol='scaledFeat')
final_data = scaler.fit(final_data).transform(final_data)
final_data.printSchema() | root
|-- Session_Connection_Time: double (nullable = true)
|-- Bytes Transferred: double (nullable = true)
|-- Kali_Trace_Used: integer (nullable = true)
|-- Servers_Corrupted: double (nullable = true)
|-- Pages_Corrupted: double (nullable = true)
|-- Location: string (nullable = true)
|-- WPM_Typing_Speed: double (nullable = true)
|-- features: vector (nullable = true)
|-- scaledFeat: vector (nullable = true)
| MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
K-means clustering model | from pyspark.ml.clustering import KMeans
# let's try with the assumption that there were two hacker groups
kmeans = KMeans(featuresCol='scaledFeat',k=2)
model = kmeans.fit(final_data)
results = model.transform(final_data)
centers = model.clusterCenters()
print(centers)
results.printSchema()
results.describe().show()
results.groupBy('prediction').count().show()
# This shows that the attack was equaly sheared by the hacker groups | +----------+-----+
|prediction|count|
+----------+-----+
| 1| 167|
| 0| 167|
+----------+-----+
| MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
Graphical visualization of the clusters | results_pd = results.toPandas()
results_pd
import matplotlib.pyplot as plt
%matplotlib inline
plot = plt.scatter(data=results_pd, x=results_pd.index, y=results_pd['Bytes Transferred'],c=results_pd['prediction'])
plt.ylabel('Bytes Transferred') | _____no_output_____ | MIT | K-means clustering_pyspark.ipynb | Molla80/K-means-clustering-using-pyspark |
Credit Risk ClassificationCredit risk poses a classification problem thatโs inherently imbalanced. This is because healthy loans easily outnumber risky loans. In this Challenge, youโll use various techniques to train and evaluate models with imbalanced classes. Youโll use a dataset of historical lending activity from a peer-to-peer lending services company to build a model that can identify the creditworthiness of borrowers. Instructions:This challenge consists of the following subsections:* Split the Data into Training and Testing Sets* Create a Logistic Regression Model with the Original Data* Predict a Logistic Regression Model with Resampled Training Data Split the Data into Training and Testing SetsOpen the starter code notebook and then use it to complete the following steps.1. Read the `lending_data.csv` data from the `Resources` folder into a Pandas DataFrame.2. Create the labels set (`y`) from the โloan_statusโ column, and then create the features (`X`) DataFrame from the remaining columns. > **Note** A value of `0` in the โloan_statusโ column means that the loan is healthy. A value of `1` means that the loan has a high risk of defaulting. 3. Check the balance of the labels variable (`y`) by using the `value_counts` function.4. Split the data into training and testing datasets by using `train_test_split`. Create a Logistic Regression Model with the Original DataEmploy your knowledge of logistic regression to complete the following steps:1. Fit a logistic regression model by using the training data (`X_train` and `y_train`).2. Save the predictions on the testing data labels by using the testing feature data (`X_test`) and the fitted model.3. Evaluate the modelโs performance by doing the following: * Calculate the accuracy score of the model. * Generate a confusion matrix. * Print the classification report.4. Answer the following question: How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels? Predict a Logistic Regression Model with Resampled Training DataDid you notice the small number of high-risk loan labels? Perhaps, a model that uses resampled data will perform better. Youโll thus resample the training data and then reevaluate the model. Specifically, youโll use `RandomOverSampler`.To do so, complete the following steps:1. Use the `RandomOverSampler` module from the imbalanced-learn library to resample the data. Be sure to confirm that the labels have an equal number of data points. 2. Use the `LogisticRegression` classifier and the resampled data to fit the model and make predictions.3. Evaluate the modelโs performance by doing the following: * Calculate the accuracy score of the model. * Generate a confusion matrix. * Print the classification report. 4. Answer the following question: How well does the logistic regression model, fit with oversampled data, predict both the `0` (healthy loan) and `1` (high-risk loan) labels? Write a Credit Risk Analysis ReportFor this section, youโll write a brief report that includes a summary and an analysis of the performance of both machine learning models that you used in this challenge. You should write this report as the `README.md` file included in your GitHub repository.Structure your report by using the report template that `Starter_Code.zip` includes, and make sure that it contains the following:1. An overview of the analysis: Explain the purpose of this analysis.2. The results: Using bulleted lists, describe the balanced accuracy scores and the precision and recall scores of both machine learning models.3. A summary: Summarize the results from the machine learning models. Compare the two versions of the dataset predictions. Include your recommendation for the model to use, if any, on the original vs. the resampled data. If you donโt recommend either model, justify your reasoning. | # Import the modules
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
import warnings
warnings.filterwarnings('ignore') | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
--- Split the Data into Training and Testing Sets Step 1: Read the `lending_data.csv` data from the `Resources` folder into a Pandas DataFrame. | # Read the CSV file from the Resources folder into a Pandas DataFrame
# Using the read_csv function and Path module, create a DataFrame
lending_data_df = pd.read_csv(
Path('./Resources/lending_data.csv'),
).dropna()
# Review the DataFrame
display(lending_data_df.head())
display(lending_data_df.tail()) | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 2: Create the labels set (`y`) from the โloan_statusโ column, and then create the features (`X`) DataFrame from the remaining columns. | # Separate the data into labels and features
y = lending_data_df['loan_status']
# Separate the X variable, the features
X = lending_data_df[['loan_size','interest_rate','borrower_income','debt_to_income','num_of_accounts','derogatory_marks','total_debt']]
# Review the y variable Series
display(y[:5])
# Review the X variable DataFrame
display(X.head()) | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 3: Check the balance of the labels variable (`y`) by using the `value_counts` function. | # Check the balance of our target values
y.value_counts() | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 4: Split the data into training and testing datasets by using `train_test_split`. | # Import the train_test_learn module
from sklearn.model_selection import train_test_split
# Split the data using train_test_split
# Assign a random_state of 1 to the function
X_train, X_test,y_train,y_test= train_test_split(X, y,random_state=1)
| _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
--- Create a Logistic Regression Model with the Original Data Step 1: Fit a logistic regression model by using the training data (`X_train` and `y_train`). | # Import the LogisticRegression module from SKLearn
from sklearn.linear_model import LogisticRegression
# Instantiate the Logistic Regression model
# Assign a random_state parameter of 1 to the model
logistic_regression_model = LogisticRegression(random_state=1)
# Fit the model using training data
logistic_regression_model.fit(X_train,y_train) | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 2: Save the predictions on the testing data labels by using the testing feature data (`X_test`) and the fitted model. | # Make a prediction using the testing data
y_predict = logistic_regression_model.predict(X_test) | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 3: Evaluate the modelโs performance by doing the following:* Calculate the accuracy score of the model.* Generate a confusion matrix.* Print the classification report. | # Print the balanced_accuracy score of the model
balanced_accuracy = balanced_accuracy_score(y_test,y_predict)
print(balanced_accuracy)
# Generate a confusion matrix for the model
logistic_regression_matrix =confusion_matrix(y_test, y_predict)
print(logistic_regression_matrix)
# Print the classification report for the model
logistic_regression_report = classification_report_imbalanced(y_test, y_predict)
print(logistic_regression_report) | pre rec spe f1 geo iba sup
0 1.00 0.99 0.91 1.00 0.95 0.91 18765
1 0.85 0.91 0.99 0.88 0.95 0.90 619
avg / total 0.99 0.99 0.91 0.99 0.95 0.91 19384
| MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 4: Answer the following question.
**Question:** How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels?
**Answer:** The model performed better for the 0 class of samples than it did for the 1 class of samples. The precision and the recall for the 0 class (healthy loan) is much better than that for the 1 class (high-risk loan). The precision for the 0 values is very high at 1.00. This means that out of all the times that the model predicted a testing data observation to be the value 0, 100% of those predictions were correct. This number is largely due to the fact we are workong with an imbalanced data set where 0 values represent the majority class with 18765 instances found in the data versus only 619 instances in the minority class. In contrast, out of all the times that the model predicted a value of 1, only 85% of those predictions were correct with 1 class represents the minority class.
The recall for the 0 and 1 classes almost matches which means the model accuratly calculated the respecitve class values equaly.
--- Predict a Logistic Regression Model with Resampled Training Data Step 1: Use the `RandomOverSampler` module from the imbalanced-learn library to resample the data. Be sure to confirm that the labels have an equal number of data points. | # Import the RandomOverSampler module form imbalanced-learn
from imblearn.over_sampling import RandomOverSampler
# Instantiate the random oversampler model
# # Assign a random_state parameter of 1 to the model
random_oversampler_model = RandomOverSampler(random_state=1)
# Fit the original training data to the random_oversampler model
X_resampled, y_resampled = random_oversampler_model.fit_resample(X_train,y_train)
# Count the distinct values of the resampled labels data
y_resampled.value_counts()
| _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 2: Use the `LogisticRegression` classifier and the resampled data to fit the model and make predictions. | # Instantiate the Logistic Regression model
# Assign a random_state parameter of 1 to the model
logistic_regression_resample_model = LogisticRegression(random_state=1)
# Fit the model using the resampled training data
logistic_regression_resample_model.fit(X_resampled, y_resampled)
# Make a prediction using the testing data
y_resampled_perdict = logistic_regression_resample_model.predict(X_test) | _____no_output_____ | MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Step 3: Evaluate the modelโs performance by doing the following:* Calculate the accuracy score of the model.* Generate a confusion matrix.* Print the classification report. | # Print the balanced_accuracy score of the model
balanced_accuracy = balanced_accuracy_score(y_test,y_resampled_perdict)
print(balanced_accuracy)
# Generate a confusion matrix for the model
logistic_regression_resample_matrtix = confusion_matrix(y_test, y_resampled_perdict)
print(logistic_regression_resample_matrtix)
# Print the classification report for the model
logistic_regression_resample_report = classification_report_imbalanced(y_test, y_resampled_perdict)
print(logistic_regression_resample_report) | pre rec spe f1 geo iba sup
0 1.00 0.99 0.99 1.00 0.99 0.99 18765
1 0.84 0.99 0.99 0.91 0.99 0.99 619
avg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384
| MIT | credit_risk_resampling.ipynb | douglasg-fintec/Credit_Risk_Resampling |
Imports | import os
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
import PIL
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
dataset_location = '/home/marcin/Datasets/camvid/' | _____no_output_____ | MIT | PyTorchNN/1610_PT_FCU.ipynb | marcinbogdanski/ai-sketchpad |
CamVid Dataset | def download(url, dest, md5sum):
import os
import urllib
import hashlib
folder, file = os.path.split(dest)
if folder != '':
os.makedirs(folder, exist_ok=True)
if not os.path.isfile(dest):
print('Downloading', file, '...')
urllib.request.urlretrieve(url, dest)
else:
print('Already Exists:', file)
assert hashlib.md5(open(dest, 'rb').read()).hexdigest() == md5sum
download(url='https://github.com/alexgkendall/SegNet-Tutorial/archive/master.zip',
dest=os.path.join(dataset_location, 'master.zip'),
md5sum='9a61b9d172b649f6e5da7e8ebf75338f')
def extract(src, dest):
import os
import zipfile
path, file = os.path.split(src)
extract_path, _ = os.path.splitext(src)
already_extracted = os.path.isdir(dest)
if not already_extracted:
with zipfile.ZipFile(src, 'r') as zf:
print('Extracting', file, '...')
zf.extractall(dest)
else:
print('Already Extracted:', file)
assert os.path.isdir(extract_path)
extract(src=os.path.join(dataset_location, 'master.zip'),
dest=os.path.join(dataset_location, 'master'))
class camvidLoader(torch.utils.data.Dataset):
def __init__(
self,
root,
split="train",
is_transform=False,
img_size=None,
augmentations=None,
img_norm=True,
test_mode=False,
):
self.root = root
self.split = split
self.img_size = [360, 480]
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.test_mode = test_mode
self.mean = np.array([104.00699, 116.66877, 122.67892])
self.n_classes = 12
self.files = collections.defaultdict(list)
if not self.test_mode:
for split in ["train", "test", "val"]:
file_list = os.listdir(root + "/" + split)
self.files[split] = file_list
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_name = self.files[self.split][index]
img_path = self.root + "/" + self.split + "/" + img_name
lbl_path = self.root + "/" + self.split + "annot/" + img_name
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.uint8)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
if self.img_norm:
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def decode_segmap(self, temp, plot=False):
Sky = [128, 128, 128]
Building = [128, 0, 0]
Pole = [192, 192, 128]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrian = [64, 64, 0]
Bicyclist = [0, 128, 192]
Unlabelled = [0, 0, 0]
label_colours = np.array(
[
Sky,
Building,
Pole,
Road,
Pavement,
Tree,
SignSymbol,
Fence,
Car,
Pedestrian,
Bicyclist,
Unlabelled,
]
)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = label_colours[l, 0]
g[temp == l] = label_colours[l, 1]
b[temp == l] = label_colours[l, 2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
import scipy.misc as m
import collections
t_loader = camvidLoader(
root=os.path.join(dataset_location, 'master/SegNet-Tutorial-master/CamVid'),
split='train', is_transform=True, img_size=(360, 480))
img, lbl = t_loader[0]
lbl.max()
t_loader.files['train'][0]
import functools
class fcn32s(nn.Module):
def __init__(self, n_classes=21, learned_billinear=False):
super(fcn32s, self).__init__()
self.learned_billinear = learned_billinear
self.n_classes = n_classes
self.loss = functools.partial(cross_entropy2d, size_average=False)
self.conv_block1 = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=100),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block3 = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block4 = nn.Sequential(
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.conv_block5 = nn.Sequential(
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2, ceil_mode=True),
)
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Dropout2d(),
nn.Conv2d(4096, self.n_classes, 1),
)
if self.learned_billinear:
raise NotImplementedError
def forward(self, x):
conv1 = self.conv_block1(x)
conv2 = self.conv_block2(conv1)
conv3 = self.conv_block3(conv2)
conv4 = self.conv_block4(conv3)
conv5 = self.conv_block5(conv4)
score = self.classifier(conv5)
out = F.upsample(score, x.size()[2:])
return out
def init_vgg16_params(self, vgg16, copy_fc8=True):
blocks = [
self.conv_block1,
self.conv_block2,
self.conv_block3,
self.conv_block4,
self.conv_block5,
]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
for idx, conv_block in enumerate(blocks):
for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i1, i2 in zip([0, 3], [0, 3]):
l1 = vgg16.classifier[i1]
l2 = self.classifier[i2]
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
n_class = self.classifier[6].weight.size()[0]
if copy_fc8:
l1 = vgg16.classifier[6]
l2 = self.classifier[6]
l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size())
l2.bias.data = l1.bias.data[:n_class]
def cross_entropy2d(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
nt, ht, wt = target.size()
# Handle inconsistent size between input and target
if h != ht and w != wt: # upsample labels
input = F.interpolate(input, size=(ht, wt), mode="bilinear", align_corners=True)
input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(-1)
loss = F.cross_entropy(
input, target, weight=weight, size_average=size_average, ignore_index=250
)
return loss
model = fcn32s(n_classes=12)
vgg16 = models.vgg16(pretrained=True)
model.init_vgg16_params(vgg16)
res = model(img.expand(1, -1, -1, -1))
def plot_all(img, res, lbl):
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=[16,9])
kkk = np.array(img.numpy().transpose(1, 2, 0)*255 + t_loader.mean, dtype=int)
kkk = kkk[:,:,::-1]
ax1.imshow(kkk)
arr = np.argmax( res.detach()[0].numpy(), axis=0) # res to numpy
ax2.imshow(t_loader.decode_segmap(arr))
ax3.imshow(t_loader.decode_segmap(lbl.numpy()))
plot_all(img, res, lbl)
for e in range(300000) | _____no_output_____ | MIT | PyTorchNN/1610_PT_FCU.ipynb | marcinbogdanski/ai-sketchpad |
United States - Crime Rates - 1960 - 2014 Introduction:This time you will create a data Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries | import numpy as np
import pandas as pd | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv). Step 3. Assign it to a variable called crime. | url = "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv"
crime = pd.read_csv(url)
crime.head() | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 4. What is the type of the columns? | crime.info() | <class 'pandas.core.frame.DataFrame'>
RangeIndex: 55 entries, 0 to 54
Data columns (total 12 columns):
Year 55 non-null int64
Population 55 non-null int64
Total 55 non-null int64
Violent 55 non-null int64
Property 55 non-null int64
Murder 55 non-null int64
Forcible_Rape 55 non-null int64
Robbery 55 non-null int64
Aggravated_assault 55 non-null int64
Burglary 55 non-null int64
Larceny_Theft 55 non-null int64
Vehicle_Theft 55 non-null int64
dtypes: int64(12)
memory usage: 5.2 KB
| BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now. Step 5. Convert the type of the column Year to datetime64 | # pd.to_datetime(crime)
crime.Year = pd.to_datetime(crime.Year, format='%Y')
crime.info() | <class 'pandas.core.frame.DataFrame'>
RangeIndex: 55 entries, 0 to 54
Data columns (total 12 columns):
Year 55 non-null datetime64[ns]
Population 55 non-null int64
Total 55 non-null int64
Violent 55 non-null int64
Property 55 non-null int64
Murder 55 non-null int64
Forcible_Rape 55 non-null int64
Robbery 55 non-null int64
Aggravated_assault 55 non-null int64
Burglary 55 non-null int64
Larceny_Theft 55 non-null int64
Vehicle_Theft 55 non-null int64
dtypes: datetime64[ns](1), int64(11)
memory usage: 5.2 KB
| BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 6. Set the Year column as the index of the dataframe | crime = crime.set_index('Year', drop = True)
crime.head() | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 7. Delete the Total column | del crime['Total']
crime.head() | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 8. Group the year by decades and sum the values Pay attention to the Population column number, summing this column is a mistake | # To learn more about .resample (https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html)
# To learn more about Offset Aliases (http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)
# Uses resample to sum each decade
crimes = crime.resample('10AS').sum()
# Uses resample to get the max value only for the "Population" column
population = crime['Population'].resample('10AS').max()
# Updating the "Population" column
crimes['Population'] = population
crimes | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Step 9. What is the mos dangerous decade to live in the US? | # apparently the 90s was a pretty dangerous time in the US
crime.idxmax(0) | _____no_output_____ | BSD-3-Clause | 04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb | chisus089/3_pandas_exercises |
Exercise1.1Plot $f(x) = 1 - e ^ (2 * x)$ over $[-1, 1]$ with intervals $.01$ | """
Exercise1.1
Plot f(x) = 1 - e ^ (2 * x) over [-1, 1] with intervals .01
"""
x_range = arange(-1, 1, .01)
y_range = array([1 - exp(2 * x) for x in x_range])
plot(x_range, y_range, 'k-', label = "Exercise1.1")
ylabel("y")
xlabel("x")
legend(loc='upper right') | _____no_output_____ | MIT | Exercise01.ipynb | lnsongxf/Applied_Computational_Economics_and_Finance |
Exercise1.2Solve matrix multiplication of$$AB = \left[\begin{array}{ccc} 0 &-1& 2\\-2& -1& 4\\2& 7& -2\end{array}\right]\left[\begin{array}{ccc} -7& 1& 1\\ 7& -3& -2\\3& 5& 0\end{array}\right]$$ $$y = [3, -1, 2] $$Solve $C = A*B$, $$Cx = y$$. | """
Exercise1.2
Solve matrix multiplication
"""
#from numpy import array, linalg
A = array([[0, -1, 2], [-2, -1, 4], [2, 7, -2]])
B = array([[-7, 1, 1], [7, -3, -2], [3, 5, 0]])
y = array([3, -1, 2])
# part_a():
"""
Solve Cx = y using standard matrix multiplication for A and B
"""
C = A.dot(B)
x = linalg.solve(C, y)
print("The standard matrix product C: " ,C)
print("\nSolution from Matrix multiplication: ", x)
#part_b():
"""
Solve Cx = y using element-wise multiplication (Hadamard product)
"""
C = A * B
x = linalg.solve(C, y)
print("\nThe element-by-element matrix product C:")
print(C)
print("\nSolution from Element-wise multiplication:")
print(x) |
The element-by-element matrix product C:
[[ 0 -1 2]
[-14 3 -8]
[ 6 35 0]]
Solution from Element-wise multiplication:
[-0.79958678 0.19421488 1.59710744]
| MIT | Exercise01.ipynb | lnsongxf/Applied_Computational_Economics_and_Finance |
Exercise1.3calculate the time series$$yt = 5 + .05 * t + Et$$ (Where E is epsilon)for years $1960, 1961, ..., 2016$ assuming $Et$ independently and identically distributed with mean $0$ and sigma $0.2$. | # Setting a random seed for reproducibility
rnd = np.random.RandomState(seed=123)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.html
"""
Exercise1.3
calculate the time series
"""
#from numpy import random, array, polyfit, poly1d
mu = -0.2
sigma = 0.2
"""
Create the time series, yt, then perform a regress on yt, plot yt and the its trendline
"""
start_year = 1960
end_year = 2016
t_array = array(range(start_year, end_year + 1))
# Generating a random array
epsilon_t = array(rnd.normal(mu, sigma,len(t_array)))
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html
"""
Least squares polynomial fit.
Fit a polynomial p(x) = p[0] * x**deg + ... + p[deg] of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
"""
fit_func = poly1d(fit)
"""
https://docs.scipy.org/doc/numpy/reference/generated/numpy.poly1d.html
A one-dimensional polynomial class.
A convenience class,
used to encapsulate โnaturalโ operations on polynomials
so that said operations may take on their customary form in code .
"""
# two plots together
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
| _____no_output_____ | MIT | Exercise01.ipynb | lnsongxf/Applied_Computational_Economics_and_Finance |
Exercise1.4Consider the original example with the farmer where acreage planted will be$$a = 0.5 + 0.5 * Ep$$ (Ep is expected price)Quantity q is equivalent to$$q = a * y$$ (y is yield)Clearing price p is$$p = 3 - 2 * q$$Assume in our case that yield will be a random two point distribution s.t.```y = array([0.7, 1.3])```Our goal is to compute the variance of this price distribution, otherwise knownas $sigma^2$ for part a. |
#from math import exp, fabs
#from numpy import array, var
#part_a():
"""
Compute the variance in price
"""
a = 1
y, w = array([0.7, 1.3]), array([0.5, 0.5])
for _ in range(100):
a_previous = a
p = 3 - 2 * a * y
f = w.dot(p)
a = 0.5 + 0.5 * f
if fabs(a_previous - a) < exp(-8):
break
print "acreage", a, "variance:", var(p), "expectation", p.dot(w)
| _____no_output_____ | MIT | Exercise01.ipynb | lnsongxf/Applied_Computational_Economics_and_Finance |
Model | xb, yb = dls_feat.one_batch(); xb.shape
from torch.nn import TransformerEncoder, TransformerEncoderLayer
class SeqHead(nn.Module):
def __init__(self):
super().__init__()
# d_model = 2048+6+1
d_model = 1024
n_head = 4
self.flat = nn.Sequential(AdaptiveConcatPool2d(), Flatten())
self.hook = ReshapeBodyHook(self.flat)
# self.linear = nn.Linear(d_model+7, d_model)
encoder_layers = TransformerEncoderLayer(d_model, n_head, d_model*2)
self.transformer = TransformerEncoder(encoder_layers, 4)
self.head = nn.Sequential(nn.Linear(d_model,6))
def forward(self, x):
x = self.flat(x)
# x = torch.cat(x, axis=-1)
# x = self.linear(x)
feat = self.transformer(x.transpose(0,1))
return self.head(feat.transpose(0,1))
m = SeqHead()
name = 'train3d_baseline_feat_transformer'
learn = get_learner(dls_feat, m, name=name)
learn.add_cb(DePadLoss())
xb.shape
# with torch.no_grad():
# learn.model(xb).shape
# learn.summary() | _____no_output_____ | Apache-2.0 | 03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb | bearpelican/rsna_retro |
Training | learn.lr_find()
do_fit(learn, 10, 1e-4)
learn.save(f'runs/{name}-1') | _____no_output_____ | Apache-2.0 | 03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb | bearpelican/rsna_retro |
Testing | sub_fn = f'subm/{name}'
learn.load(f'runs/{name}-1')
learn.validate()
learn.dls = get_3d_dls_feat(Meta.df_tst, path=path_feat_tst_384avg, bs=32, test=True)
preds,targs = learn.get_preds()
preds.shape, preds.min(), preds.max()
pred_csv = submission(Meta.df_tst, preds, fn=sub_fn)
api.competition_submit(f'{sub_fn}.csv', name, 'rsna-intracranial-hemorrhage-detection')
api.competitions_submissions_list('rsna-intracranial-hemorrhage-detection')[0] | _____no_output_____ | Apache-2.0 | 03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb | bearpelican/rsna_retro |
1. Create Train Script | %%file train
#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
import pickle
import os
np.random.seed(123)
# Define paths for Model Training inside Container.
INPUT_PATH = '/opt/ml/input/data'
OUTPUT_PATH = '/opt/ml/output'
MODEL_PATH = '/opt/ml/model'
PARAM_PATH = '/opt/ml/input/config/hyperparameters.json'
# Training data sitting in S3 will be copied to this location during training when used with File MODE.
TRAIN_DATA_PATH = f'{INPUT_PATH}/train'
TEST_DATA_PATH = f'{INPUT_PATH}/test'
def train():
print("------- [STARTING TRAINING] -------")
train_df = pd.read_csv(os.path.join(TRAIN_DATA_PATH, 'train.csv'), names=['class', 'mass', 'width', 'height', 'color_score'])
train_df.head()
X_train = train_df[['mass', 'width', 'height', 'color_score']]
y_train = train_df['class']
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
# Save the trained Model inside the Container
with open(os.path.join(MODEL_PATH, 'model.pkl'), 'wb') as out:
pickle.dump(knn, out)
print("------- [TRAINING COMPLETE!] -------")
print("------- [STARTING EVALUATION] -------")
test_df = pd.read_csv(os.path.join(TEST_DATA_PATH, 'test.csv'), names=['class', 'mass', 'width', 'height', 'color_score'])
X_test = train_df[['mass', 'width', 'height', 'color_score']]
y_test = train_df['class']
acc = knn.score(X_test, y_test)
print('Accuracy = {:.2f}%'.format(acc * 100))
print("------- [EVALUATION DONE!] -------")
if __name__ == '__main__':
train() | Overwriting train
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
2. Create Serve Script | %%file serve
#!/usr/bin/env python
from flask import Flask, Response, request
from io import StringIO
import pandas as pd
import logging
import pickle
import os
app = Flask(__name__)
MODEL_PATH = '/opt/ml/model'
# Singleton Class for holding the Model
class Predictor:
model = None
@classmethod
def load_model(cls):
print('[LOADING MODEL]')
if cls.model is None:
with open(os.path.join(MODEL_PATH, 'model.pkl'), 'rb') as file_:
cls.model = pickle.load(file_)
print('MODEL LOADED!')
return cls.model
@classmethod
def predict(cls, X):
clf = cls.load_model()
return clf.predict(X)
@app.route('/ping', methods=['GET'])
def ping():
print('[HEALTH CHECK]')
model = Predictor.load_model()
status = 200
if model is None:
status = 404
return Response(response={"HEALTH CHECK": "OK"}, status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def invoke():
data = None
# Transform Payload in CSV to Pandas DataFrame.
if request.content_type == 'text/csv':
data = request.data.decode('utf-8')
data = StringIO(data)
data = pd.read_csv(data, header=None)
else:
return flask.Response(response='This Predictor only supports CSV data', status=415, mimetype='text/plain')
logging.info('Invoked with {} records'.format(data.shape[0]))
predictions = Predictor.predict(data)
# Convert from numpy back to CSV
out = StringIO()
pd.DataFrame({'results': predictions}).to_csv(out, header=False, index=False)
result = out.getvalue()
return Response(response=result, status=200, mimetype='text/csv')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) | Overwriting serve
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
3. Build a Docker Image and Push to ECR | %%sh
# Assign a name for your Docker image.
image_name=byoc-sklearn
echo "Image Name: ${image_name}"
# Retrieve AWS Account.
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-east-1 if none defined).
region=$(aws configure get region)
region=${region:-us-east-1}
echo "Account: ${account}"
echo "Region: ${region}"
repository="${account}.dkr.ecr.${region}.amazonaws.com"
echo "Repository: ${repository}"
image="${account}.dkr.ecr.${region}.amazonaws.com/${image_name}:latest"
echo "Image URI: ${image}"
# If the repository does not exist in ECR, create it.
aws ecr describe-repositories --repository-names ${image_name} > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name ${image_name} > /dev/null
fi
# Get the login command from ECR and execute it directly.
aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin ${repository}
# Build the docker image locally with the image name and tag it.
docker build -t ${image_name} .
docker tag ${image_name} ${image}
# Finally, push image to ECR with the full image name.
docker push ${image} | Image Name: byoc-sklearn
Account: 892313895307
Region: us-east-1
Repository: 892313895307.dkr.ecr.us-east-1.amazonaws.com
Image URI: 892313895307.dkr.ecr.us-east-1.amazonaws.com/byoc-sklearn:latest
Login Succeeded
Sending build context to Docker daemon 80.38kB
Step 1/8 : FROM python:3.7
---> 5b86e11778a2
Step 2/8 : COPY requirements.txt ./
---> Using cache
---> 8623cb69764a
Step 3/8 : RUN pip install --no-cache-dir -r requirements.txt
---> Using cache
---> 00be6a106a8c
Step 4/8 : COPY train /usr/local/bin
---> Using cache
---> f55d18c34b89
Step 5/8 : RUN chmod +x /usr/local/bin/train
---> Using cache
---> aae62ce0c43b
Step 6/8 : COPY serve /usr/local/bin
---> Using cache
---> d9408249ae77
Step 7/8 : RUN chmod +x /usr/local/bin/serve
---> Using cache
---> 04fc001c0b7c
Step 8/8 : EXPOSE 8080
---> Using cache
---> 6990c97b2383
Successfully built 6990c97b2383
Successfully tagged byoc-sklearn:latest
The push refers to repository [892313895307.dkr.ecr.us-east-1.amazonaws.com/byoc-sklearn]
032f1a03bf08: Preparing
053f064686a0: Preparing
59239f9a3c52: Preparing
34bf625dab71: Preparing
415a4c435e2d: Preparing
a9066f74cbd8: Preparing
1b17be258ee0: Preparing
6522a2852221: Preparing
56a69ef72608: Preparing
6f7043721c9b: Preparing
a933681cf349: Preparing
f49d20b92dc8: Preparing
fe342cfe5c83: Preparing
630e4f1da707: Preparing
9780f6d83e45: Preparing
56a69ef72608: Waiting
f49d20b92dc8: Waiting
6f7043721c9b: Waiting
fe342cfe5c83: Waiting
630e4f1da707: Waiting
9780f6d83e45: Waiting
a933681cf349: Waiting
1b17be258ee0: Waiting
a9066f74cbd8: Waiting
6522a2852221: Waiting
34bf625dab71: Layer already exists
032f1a03bf08: Layer already exists
053f064686a0: Layer already exists
415a4c435e2d: Layer already exists
59239f9a3c52: Layer already exists
a9066f74cbd8: Layer already exists
1b17be258ee0: Layer already exists
6f7043721c9b: Layer already exists
56a69ef72608: Layer already exists
6522a2852221: Layer already exists
f49d20b92dc8: Layer already exists
a933681cf349: Layer already exists
fe342cfe5c83: Layer already exists
9780f6d83e45: Layer already exists
630e4f1da707: Layer already exists
latest: digest: sha256:a2ebe6e788d472b87131c8ee6e7ef75d3e2cb27b01d9f1d41d6d4e88274d0e5e size: 3467
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Imports | from sagemaker.predictor import csv_serializer
import pandas as pd
import sagemaker | _____no_output_____ | Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Essentials | role = sagemaker.get_execution_role()
session = sagemaker.Session()
account = session.boto_session.client('sts').get_caller_identity()['Account']
region = session.boto_session.region_name
image_name = 'byoc-sklearn'
image_uri = f'{account}.dkr.ecr.{region}.amazonaws.com/{image_name}:latest' | _____no_output_____ | Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Train (Local Mode) | model = sagemaker.estimator.Estimator(
image_name=image_uri,
role=role,
train_instance_count=1,
train_instance_type='local',
sagemaker_session=None
)
model.fit({'train': 'file://.././DATA/train/train.csv', 'test': 'file://.././DATA/test/test.csv'}) | Creating tmp815252o0_algo-1-n7amc_1 ...
[1BAttaching to tmp815252o0_algo-1-n7amc_12mdone[0m
[36malgo-1-n7amc_1 |[0m ------- [STARTING TRAINING] -------
[36malgo-1-n7amc_1 |[0m ------- [TRAINING COMPLETE!] -------
[36malgo-1-n7amc_1 |[0m ------- [STARTING EVALUATION] -------
[36malgo-1-n7amc_1 |[0m Accuracy = 97.73%
[36malgo-1-n7amc_1 |[0m ------- [EVALUATION DONE!] -------
[36mtmp815252o0_algo-1-n7amc_1 exited with code 0
[0mAborting on container exit...
===== Job Complete =====
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Deploy (Locally) | predictor = model.deploy(1, 'local', endpoint_name='byoc-sklearn', serializer=csv_serializer) | Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Evaluate Real Time Inference (Locally) | df = pd.read_csv('.././DATA/test/test.csv', header=None)
test_df = df.sample(1)
test_df
test_df.drop(test_df.columns[[0]], axis=1, inplace=True)
test_df
test_df.values
prediction = predictor.predict(test_df.values).decode('utf-8').strip()
prediction | _____no_output_____ | Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Train (using SageMaker) | WORK_DIRECTORY = '.././DATA'
train_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/train', key_prefix='byoc-sklearn/train')
test_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/test', key_prefix='byoc-sklearn/test')
train_data_s3_pointer
test_data_s3_pointer
model = sagemaker.estimator.Estimator(
image_name=image_uri,
role=role,
train_instance_count=1,
train_instance_type='ml.m5.xlarge',
sagemaker_session=session # ensure the session is set to session
)
model.fit({'train': train_data_s3_pointer, 'test': test_data_s3_pointer}) | 's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.
's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Deploy Trained Model as SageMaker Endpoint | predictor = model.deploy(1, 'ml.m5.xlarge', endpoint_name='byoc-sklearn', serializer=csv_serializer) | Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Real Time Inference using Deployed Endpoint | df = pd.read_csv('.././DATA/test/test.csv', header=None)
test_df = df.sample(1)
test_df.drop(test_df.columns[[0]], axis=1, inplace=True)
test_df
test_df.values
prediction = predictor.predict(test_df.values).decode('utf-8').strip()
prediction | _____no_output_____ | Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Batch Transform (Batch Inference) using Trained SageMaker Model | bucket_name = session.default_bucket()
output_path = f's3://{bucket_name}/byoc-sklearn/batch_test_out'
transformer = model.transformer(instance_count=1,
instance_type='ml.m5.xlarge',
output_path=output_path,
assemble_with='Line',
accept='text/csv')
WORK_DIRECTORY = '.././DATA'
batch_input = session.upload_data(f'{WORK_DIRECTORY}/batch_test', key_prefix='byoc-sklearn/batch_test')
transformer.transform(batch_input, content_type='text/csv', split_type='Line', input_filter='$')
transformer.wait() | .Gracefully stopping... (press Ctrl+C again to force)
.........................
[34m * Serving Flask app "serve" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
* Running on http://0.0.0.0:8080/ (Press CTRL+C to quit)[0m
[34m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mGET /ping HTTP/1.1#033[0m" 200 -[0m
[34m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[33mGET /execution-parameters HTTP/1.1#033[0m" 404 -[0m
[34m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[34mINFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[34m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[34mINFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[35m * Serving Flask app "serve" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
* Running on http://0.0.0.0:8080/ (Press CTRL+C to quit)[0m
[35m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mGET /ping HTTP/1.1#033[0m" 200 -[0m
[35m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[33mGET /execution-parameters HTTP/1.1#033[0m" 404 -[0m
[35m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[35mINFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[35m169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
[35mINFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -[0m
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
Inspect Batch Transformed Output | s3_client = session.boto_session.client('s3')
s3_client.download_file(bucket_name,
'byoc-sklearn/batch_test_out/batch_test.csv.out',
'.././DATA/batch_test/batch_test.csv.out')
with open('.././DATA/batch_test/batch_test.csv.out', 'r') as f:
results = f.readlines()
print("Transform results: \n{}".format(''.join(results))) | Transform results:
1
3
0
1
1
3
1
3
0
0
0
3
0
0
2
| Apache-2.0 | SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb | arunprsh/AI-ML-Examples |
11์ฅ ์์ฐ์ด์ฒ๋ฆฌ 1๋ถ **๊ฐ์ฌ๋ง**: ํ๋์์ ์๋ ์ [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff) 10์ฅ์ ์ฌ์ฉ๋ ์ฝ๋์ ๋ํ ์ค๋ช
์ ๋ด๊ณ ์์ผ๋ฉฐ ํ
์ํ๋ก์ฐ 2.6 ๋ฒ์ ์์ ์์ฑ๋์์ต๋๋ค. ์์ค์ฝ๋๋ฅผ ๊ณต๊ฐํ ์ ์์๊ฒ ๊ฐ์ฌ๋๋ฆฝ๋๋ค.**tensorflow ๋ฒ์ ๊ณผ GPU ํ์ธ**- ๊ตฌ๊ธ ์ฝ๋ฉ ์ค์ : '๋ฐํ์ -> ๋ฐํ์ ์ ํ ๋ณ๊ฒฝ' ๋ฉ๋ด์์ GPU ์ง์ ํ ์๋ ๋ช
๋ น์ด ์คํ ๊ฒฐ๊ณผ ํ์ธ ``` !nvidia-smi ```- ์ฌ์ฉ๋๋ tensorflow ๋ฒ์ ํ์ธ ```python import tensorflow as tf tf.__version__ ```- tensorflow๊ฐ GPU๋ฅผ ์ฌ์ฉํ๋์ง ์ฌ๋ถ ํ์ธ ```python tf.config.list_physical_devices('GPU') ``` ์ฃผ์๋ด์ฉ - ์์ฐ์ด์ฒ๋ฆฌ(Natural Language Processing) ์๊ฐ - ๋จ์ด์ฃผ๋จธ๋(bag-of-words) ๋ชจ๋ธ - ์์ฐจ(sequence) ๋ชจ๋ธ- ์์ฐจ ๋ชจ๋ธ ํ์ฉ - ์๋ฐฉํฅ ์ํ์ ๊ฒฝ๋ง(bidirectional LSTM) ์ ์ฉ- ํธ๋์คํฌ๋จธ(Transformer) ํ์ฉ- ์ํ์ค-ํฌ-์ํ์ค(seq2seq) ๋ชจ๋ธ ํ์ฉ 11.1 ์์ฐ์ด์ฒ๋ฆฌ ์๊ฐ ํ์ด์ฌ, ์๋ฐ, C, C++, C, ์๋ฐ์คํฌ๋ฆฝํธ ๋ฑ ์ปดํจํฐ ํ๋ก๊ทธ๋๋ฐ์ธ์ด์ ๊ตฌ๋ถํ๊ธฐ ์ํด ์ผ์์์ ์ฌ์ฉ๋๋ ํ๊ตญ์ด, ์์ด ๋ฑ์ __์์ฐ์ด__(natural language)๋ผ ๋ถ๋ฅธ๋ค. ์์ฐ์ด์ ํน์ฑ์ ์ ํํ ๋ถ์์ ์ํ ์๊ณ ๋ฆฌ์ฆ์ ๊ตฌํํ๋ ์ผ์ ์ฌ์ค์ ๋งค์ฐ ์ด๋ ต๋ค. ๋ฅ๋ฌ๋ ๊ธฐ๋ฒ์ด ํ์ฉ๋๊ธฐ ์ด์ ๊นข์ง ์ ์ ํ ๊ท์น์ ๊ตฌ์ฑํ์ฌ ์์ฐ์ด๋ฅผ ์ดํดํ๋ ค๋ ์ ๋ง์ ์๋๊ฐ ์์ด์์ง๋ง ๋ณ๋ก ์ฑ๊ณต์ ์ด์ง ์์๋ค.1990๋
๋๋ถํฐ ์ธํฐ๋ท์ผ๋ก๋ถํฐ ๊ตฌํด์ง ์์ฒญ๋ ์์ ํ
์คํธ ๋ฐ์ดํฐ์ ๋จธ์ ๋ฌ๋ ๊ธฐ๋ฒ์์ ์ฉํ๊ธฐ ์์ํ๋ค. ๋จ, ์ฃผ์ ๋ชฉ์ ์ด **์ธ์ด์ ์ดํด**๊ฐ ์๋๋ผ ์๋ ์์ ๋ค์ฒ๋ผ ์
๋ ฅ ํ
์คํธ๋ฅผ ๋ถ์ํ์ฌ**ํต๊ณ์ ์ผ๋ก ์ ์ฉํ ์ ๋ณด๋ฅผ ์์ธก**ํ๋ ๋ฐฉํฅ์ผ๋ก ์์ ๋์๋ค.- ํ
์คํธ ๋ถ๋ฅ: "์ด ๋ฌธ์ฅ์ ์ฃผ์ ๋?"- ๋ด์ฉ ํํฐ๋ง: "์์ค์ด ํฌํจ๋์๋?"- ๊ฐ์ฑ ๋ถ์: "๋ด์ฉ์ด ๊ธ์ ์ด์ผ ๋ถ์ ์ด์ผ?"- ์ธ์ด ๋ชจ๋ธ๋ง: "์ด ๋ฌธ์ฅ์ ์ด์ด ์ด๋ค ๋จ์ด๊ฐ ์์ด์ผ ํ์ง?"- ๋ฒ์ญ: "์ด๊ฑฐ๋ฅผ ํ๊ตญ์ด๋ก ์ด๋ป๊ฒ ๋งํด?"- ์์ฝ: "์ด ๊ธฐ์ฌ๋ฅผ ํ ์ค๋ก ์์ฝํ๋ฉด?"์ด์ ๊ฐ์ ๋ถ์์ **์์ฐ์ด์ฒ๋ฆฌ**(NLP, Natural Language Processing)์ด๋ผ ํ๋ฉฐ๋จ์ด(words), ๋ฌธ์ฅ(sentences), ๋ฌธ๋จ(paragraphs) ๋ฑ์์ ์ฐพ์ ์ ์๋ํจํด(pattern)์ ์ธ์ํ๋ ค ์๋ํ๋ค. **๋จธ์ ๋ฌ๋ ํ์ฉ** ์์ฐ์ด์ฒ๋ฆฌ๋ฅผ ์ํด 1990๋
๋๋ถํฐ ์์๋ ๋จธ์ ๋ฌ๋ ํ์ฉ์ ๋ณํ๊ณผ์ ์ ๋ค์๊ณผ ๊ฐ๋ค. - 1990 - 2010๋
๋ ์ด๋ฐ: ๊ฒฐ์ ํธ๋ฆฌ(decision trees), ๋ก์ง์คํฑ ํ๊ท(logistic regression) ๋ชจ๋ธ์ด ์ฃผ๋ก ํ์ฉ๋จ.- 2014-2015: LSTM ๋ฑ ์ํ์ค ์ฒ๋ฆฌ ์๊ณ ๋ฆฌ์ฆ ํ์ฉ ์์- 2015-2017: (์๋ฐฉํฅ) ์ํ์ ๊ฒฝ๋ง์ด ๊ธฐ๋ณธ์ ์ผ๋ก ํ์ฉ๋จ.- 2017-2018: ํธ๋์คํฌ๋จธ(Transformer) ๋ชจ๋ธ์ด ์ต๊ณ ์ ์ฑ๋ฅ ๋ฐํํ๋ฉฐ, ๋ง์ ๋์ ๋ค์ ํด๊ฒฐํจ. ํ์ฌ ๊ฐ์ฅ ๋ง์ด ํ์ฉ๋๋ ๋ชจ๋ธ์. 11.2 ํ
์คํธ ๋ฒกํฐํ ๋ฅ๋ฌ๋ ๋ชจ๋ธ์ ํ
์คํธ ์์ฒด๋ฅผ ์ฒ๋ฆฌํ ์ ์๋ค.๋ฐ๋ผ์ ํ์คํธ๋ฅผ ์์นํ ํ
์(numeric tensors)๋ก ๋ณํํ๋ **ํ
์คํธ ๋ฒกํฐํ**(text vectorization) ๊ณผ์ ์ด ์๊ตฌ๋๋ฉฐ๋ณดํต ๋ค์ ์ธ ๋จ๊ณ๋ฅผ ๋ฐ๋ฅธ๋ค.1. **ํ
์คํธ ํ์คํ**(text standardization): ์๋ฌธ์ํ, ๋ง์นจํ ์ ๊ฑฐ ๋ฑ๋ฑ1. **ํ ํฐํ**(tokenization): ๊ธฐ๋ณธ ๋จ์์ **์ ๋**(units)์ผ๋ก ์ชผ๊ฐ๊ธฐ - ํ ํฐ ์์ : ๋ฌธ์, ๋จ์ด, ๋จ์ธ๋ค์ ์งํฉ ๋ฑ๋ฑ1. **์ดํ ์์ธํ**(vocabulary indexing): ํ ํฐ ๊ฐ๊ฐ์ ํ๋์ ์์นํ ๋ฒกํฐ(numerical vector)๋ก ๋ณํ.์๋ ๊ทธ๋ฆผ์ ํ
์คํธ ๋ฒกํฐํ์ ๊ธฐ๋ณธ์ ์ธ ๊ณผ์ ์ ์ ๋ณด์ฌ์ค๋ค. ๊ทธ๋ฆผ ์ถ์ฒ: [Deep Learning with Python(Manning MEAP)](https://www.manning.com/books/deep-learning-with-python-second-edition) **ํ
์คํธ ํ์คํ** ๋ค์ ๋ ๋ฌธ์ฅ์ ํ์คํ๋ฅผ ํตํด ๋์ผํ ๋ฌธ์ฅ์ผ๋ก ๋ณํํด๋ณด์.- "sunset came. i was staring at the Mexico sky. Isnt nature splendid??"- "Sunset came; I stared at the México sky. Isn't nature splendid?" ์๋ฅผ ๋ค์ด ๋ค์ ํ์คํ ๊ธฐ๋ฒ์ ์ฌ์ฉํ ์ ์๋ค.- ๋ชจ๋ ์๋ฌธ์ํ- `.`, `;`, `?`, `'` ๋ฑ ํน์ ๊ธฐํธ ์ ๊ฑฐ- ํน์ ์ํ๋ฒณ ๋ณํ: "é"๋ฅผ "e"๋ก, "æ"๋ฅผ "ae"๋ก ๋ฑ๋ฑ- ๋์ฌ/๋ช
์ฌ์ ๊ธฐ๋ณธํ ํ์ฉ: "cats"๋ฅผ "[cat]"๋ก, "was staring"๊ณผ "stared"๋ฅผ "[stare]"๋ก ๋ฑ๋ฑ. ๊ทธ๋ฌ๋ฉด ์ ๋ ๋ฌธ์ฅ ๋ชจ๋ ์๋ ๋ฌธ์ฅ์ผ๋ก ๋ณํ๋๋ค. - "sunset came i [stare] at the mexico sky isnt nature splendid" ํ์คํ ๊ณผ์ ์ ํตํด ์ด๋ ์ ๋์ ์ ๋ณด๋ฅผ ์์คํ๊ฒ ๋์ง๋งํ์ตํด์ผํ ๋ด์ฉ์ ์ค์ฌ ์ผ๋ฐํ ์ฑ๋ฅ์ด ๋ณด๋ค ์ข์ ๋ชจ๋ธ์ ํ๋ จ์ํค๋ ์ฅ์ ์ด ์๋ค.ํ์ง๋ง ๋ถ์ ๋ชฉ์ ์ ๋ฐ๋ผ ํ์คํ ๊ธฐ๋ฒ์ ๊ฒฝ์ฐ์ ๋ฐ๋ผ ๋ฌ๋ผ์ง ์ ์์์ ์ฃผ์ํด์ผ ํ๋ค. ์๋ฅผ ๋ค์ด ์ธํฐ๋ทฐ ๊ธฐ์ฌ์ ๊ฒฝ์ฐ ๋ฌผ์ํ(`?`)๋ ์ ๊ฑฐํ๋ฉด ์๋๋ค. **ํ ํฐํ** ํ
์คํธ ํ์คํ ์ดํ ๋ฐ์ดํฐ ๋ถ์์ ๊ธฐ๋ณธ ๋จ์์ธ ํ ํฐ์ผ๋ก ์ชผ๊ฐ์ผ ํ๋ค.๋ณดํต ์๋ ์ธ ๊ฐ์ง ๋ฐฉ์ ์ค์ ํ๋๋ฅผ ์ฌ์ฉํ๋ค.- ๋จ์ด ๊ธฐ์ค ํ ํฐํ(word-level tokenization) - ๊ณต๋ฐฑ์ผ๋ก ๊ตฌ๋ถ๋ ๋จ์ด๋ค๋ก ์ชผ๊ฐ๊ธฐ. - ๊ฒฝ์ฐ์ ๋ฐ๋ผ ๋์ฌ ์ด๊ทผ๊ณผ ์ด๋ฏธ๋ฅผ ๊ตฌ๋ถํ๊ธฐ๋ ํจ: "star+ing", "call+ed" ๋ฑ๋ฑ- N-๊ทธ๋จ ํ ํฐํ(N-gram tokenization) - N-๊ทธ๋จ ํ ํฐ: ์ฐ์์ผ๋ก ์์นํ N ๊ฐ(์ดํ)์ ๋จ์ด ๋ฌถ์ - ์์ : "the cat", "he was" ๋ฑ์ 2-๊ทธ๋จ ํ ํฐ์ด๋ค.- ๋ฌธ์ ๊ธฐ์ค ํ ํฐํ(character-level tokenization) - ํ๋์ ๋ฌธ์๊ฐ ํ๋์ ํ ํฐ์. - ๋ฌธ์ฅ ์์ฑ, ์์ฑ ์ธ์ ๋ฑ์์ ํ์ฉ๋จ. ์ผ๋ฐ์ ์ผ๋ก ๋ฌธ์ ๊ธฐ์ค ํ ํฐํ๋ ์ ์ฌ์ฉ๋์ง ์๋๋ค. ์ฌ๊ธฐ์๋ ๋จ์ด ๊ธฐ์ค ๋๋ N-๊ทธ๋จ ํ ํฐํ๋ง ์ด์ฉํ๋ค.- ๋จ์ด ๊ธฐ์ค ํ ํฐํ: ๋จ์ด๋ค์ ์์๋ฅผ ์ค์์ํ๋ **์์ฐจ ๋ชจ๋ธ**(sequence models)์ ์ฌ์ฉํ ๊ฒฝ์ฐ ํ์ฉ- N-๊ทธ๋จ ํ ํฐํ: ๋จ์ธ๋ค์ ์์๋ฅผ ๋ณ๋ก ์๊ดํ์ง ์๋ **๋จ์ด์ฃผ๋จธ๋(bag-of-words, BOW)** ๋ชจ๋ธ์ ์ฌ์ฉํ ๊ฒฝ์ฐ ํ์ฉ - N-๊ทธ๋จ: ๋จ์ด๋ค ์ฌ์ด์ ์์์ ๋ํ ์ง์ญ ์ ๋ณด๋ฅผ ์ด๋ ์ ๋ ์ ์งํจ. - ์ผ์ข
์ ํน์ฑ ๊ณตํ(feature engineering) ๊ธฐ๋ฒ์ด๋ฉฐ ๋ฐ๋ผ์ ์์ ํ์ต ๊ธฐ๋ฐ์ ์ธ์ด์ฒ๋ฆฌ(shallow language-processing) ๋ชจ๋ธ์ ํ์ฉ๋จ. - 1์ฐจ์ ํฉ์ฑ๊ณฑ ์ ๊ฒฝ๋ง, ์ํ ์ ๊ฒฝ๋ง, ํธ๋์คํฌ๋จธ ๋ฑ์ ์ด ๊ธฐ๋ฒ์ ์ฌ์ฉํ์ง ์์๋ ๋จ. **๋จ์ด์ฃผ๋จธ๋(bag-of-words)**๋ N-ํ ํฐ์ผ๋ก ๊ตฌ์ฑ๋ ์งํฉ์ ์๋ฏธํ๋ฉฐ **N-๊ทธ๋จ ์ฃผ๋จธ๋(bag-of-N-grams)**๋ผ๊ณ ๋ถ๋ฆฌ๊ธฐ๋ ํ๋ค.์๋ฅผ ๋ค์ด "the cat sat on the mat." ๋ฌธ์ฅ์ ๋ํ 2-๊ทธ๋จ ์งํฉ๊ณผ 3-๊ทธ๋จ ์งํฉ์ ๊ฐ๊ฐ ๋ค์๊ณผ ๊ฐ๋ค. - 2-๊ทธ๋จ ์งํฉ```{"the", "the cat", "cat", "cat sat", "sat", "sat on", "on", "on the", "the mat", "mat"}``` - 3-๊ทธ๋จ ์งํฉ```{"the", "the cat", "cat", "cat sat", "the cat sat", "sat", "sat on", "on", "cat sat on", "on the", "sat on the", "the mat", "mat", "on the mat"} ``` **์ดํ ์์ธํ** ์ผ๋ฐ์ ์ผ๋ก ๋จผ์ ํ๋ จ์
์ ํฌํจ๋ ๋ชจ๋ ํ ํฐ๋ค์ ์์ธ(์ธ๋ฑ์ค)์ ์์ฑํ๋ค.์์ฑ๋ ์์ธ์ ๊ฐ ํ ํฐ์ ๋ฐํ์ผ๋ก ์-ํซ, ๋ฉํฐ-ํซ ์ธ์ฝ๋ฉ ๋ฑ์ ๋ฐฉ์์ ์ฌ์ฉํ์ฌ์์นํ ํ
์๋ก ๋ณํํ๋ค.[4์ฅ](https://codingalzi.github.io/dlp/notebooks/dlp04_getting_started_with_neural_networks.html)๊ณผ [5์ฅ](https://codingalzi.github.io/dlp/notebooks/dlp05_fundamentals_of_ml.html)์์ ์ค๋ช
ํ ๋๋ก ๋ณดํต ์ฌ์ฉ ๋น๋์๊ฐ ๋์ 2๋ง ๋๋ 3๋ง ๊ฐ์ ๋จ์ด๋ง์ ๋์์ผ๋ก ์ดํ ์์ธํ๋ฅผ ์งํํ๋ค.๋น์์ `num_words=10000`์ ์ฌ์ฉํ์ฌ ์ฌ์ฉ ๋น๋์๊ฐ ์์ 1๋ง ๋ฑ ์์ ๋๋ ๋จ์ด๋ง์๋์์ผ๋ก ํ๋ จ์
์ ๊ตฌ์ฑํ์๋ค.```pythonfrom tensorflow.keras.datasets import imdb(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)```์ผ๋ผ์ค์ imdb ๋ฐ์ดํฐ์
์ ์ด๋ฏธ ์ ์๋ค์ ์ํ์ค๋ก ์ ์ฒ๋ฆฌ๊ฐ ๋์ด ์๋ค. ํ์ง๋ง ์ฌ๊ธฐ์๋ ์๋ณธ imdb ๋ฐ์ดํฐ์
์ ๋์์ผ๋ก ์ ์ฒ๋ฆฌ๋ฅผ ์ง์ ์ํํ๋ ๋จ๊ณ๋ถํฐ ์ดํด๋ณผ ๊ฒ์ด๋ค.์ด๋ฅผ ์ํด ์๋ ์ฌํญ์ ๊ธฐ์ตํด ๋์ด์ผ ํ๋ค.- OOV ์ธ๋ฑ์ค ํ์ฉ: ์ดํ ์์ธ์ ํฌํจ๋์ง ์๋ ๋จ์ด๋ ๋ชจ๋ 1๋ก ์ฒ๋ฆฌ. ์ผ๋ฐ ๋ฌธ์ฅ์ผ๋ก ๋ฒ์ญ๋๋ ๊ฒฝ์ฐ "[UNK]" ์ผ๋ก ์ฒ๋ฆฌ๋จ. - OOV = Out Of Vocabulary - UNK = Unknown- ๋ง์คํฌ(mask) ํ ํฐ: ๋ฌด์ ๋์ด์ผ ํ๋ ํ ํฐ์ ๋ํ๋. ๋ชจ๋ 0์ผ๋ก ์ฒ๋ฆฌ. - ์๋ฅผ ๋ค์ด, ๋ฌธ์ฅ์ ๊ธธ์ด๋ฅผ ๋ง์ถ๊ธฐ ์ํด ์ฌ์ฉ๋๋ ํจ๋ฉ์ผ๋ก 0์ผ๋ก ์ฑ์์ค ์ ์์. ``` [[5, 7, 124, 4, 89] [8, 34, 21, 0, 0]] ``` **์ผ๋ผ์ค์ `TextVectorization` ์ธต ํ์ฉ** ์ง๊ธ๊น์ง ์ค๋ช
ํ ํ
์คํธ ๋ฒกํฐํ๋ฅผ ์ํด ์ผ๋ผ์ค์ `TextVectorization` ์ธต์ ํ์ฉํ ์ ์์ผ๋ฉฐ๊ธฐ๋ณธ ์ฌ์ฉ๋ฒ์ ๋ค์๊ณผ ๊ฐ๋ค. | from tensorflow.keras.layers import TextVectorization
text_vectorization = TextVectorization(
output_mode="int",
) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
`TextVectorization` ์ธต ๊ตฌ์ฑ์ ์ฌ์ฉ๋๋ ์ฃผ์ ๊ธฐ๋ณธ ์ค์ ์ ๋ค์๊ณผ ๊ฐ๋ค.- ํ์คํ: ์๋ฌธ์ํ์ ๋ง์นจํ ๋ฑ ์ ๊ฑฐ - `standardize='lower_and_strip_punctuation'`- ํ ํฐํ: ๋จ์ด ๊ธฐ์ค ์ชผ๊ฐ๊ธฐ - `ngrams=None` - `split='whitespace'`- ์ถ๋ ฅ ๋ชจ๋: ์ถ๋ ฅ ํ
์์ ํ์ - `output_mode="int"` ํ์คํ์ ํ ํฐํ ๋ฐฉ์์ ์์๋ก ์ง์ ํด์ ํ์ฉํ ์๋ ์๋ค.๋ค๋ง, ํ์ด์ฌ์ ๊ธฐ๋ณธ ๋ฌธ์์ด ์๋ฃํ์ธ `str` ๋์ ์ `tf.string` ํ
์๋ฅผ ํ์ฉํด์ผ ํจ์ ์ฃผ์ํด์ผ ํ๋ค. ํ์คํ์ ํ ํฐํ์ ๊ธฐ๋ณธ๊ฐ์ ์๋ ๋ ํจ์๋ฅผ ํ์ฉํ๋ ๊ฒ๊ณผ ๋์ผํ๋ค.- `custom_standardization_fn()`- `custom_split_fn()` | import re
import string
import tensorflow as tf
# ํ์คํ: ์๋ฌธ์ํ ๋ฐ ๋ง์นจํ ์ ๊ฑฐ
def custom_standardization_fn(string_tensor):
lowercase_string = tf.strings.lower(string_tensor)
return tf.strings.regex_replace(
lowercase_string, f"[{re.escape(string.punctuation)}]", "")
# ๊ณต๋ฐฑ ๊ธฐ์ค์ผ๋ก ์ชผ๊ฐ๊ธฐ
def custom_split_fn(string_tensor):
return tf.strings.split(string_tensor)
# ์ฌ์ฉ์ ์ ์ ํ์คํ ๋ฐ ์ชผ๊ฐ๊ธฐ ํ์ฉ
text_vectorization = TextVectorization(
output_mode="int",
standardize=custom_standardization_fn,
split=custom_split_fn,
) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
**์์ ** ์๋ ๋ฐ์ดํฐ์
์ ๋์์ผ๋ก ํ
์คํธ ๋ฒกํฐํ๋ฅผ ์งํํด๋ณด์. | dataset = [
"I write, erase, rewrite",
"Erase again, and then",
"A poppy blooms.",
]
text_vectorization.adapt(dataset) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์์ฑ๋ ์ดํ ์์ธ์ ๋ค์๊ณผ ๊ฐ๋ค. | vocabulary = text_vectorization.get_vocabulary()
vocabulary | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์์ฑ๋ ์ดํ ์์ธ์ ํ์ฉํ์ฌ ์๋ก์ด ๋ฌธ์ฅ์ ๋ฒกํฐํ ํด๋ณด์. | test_sentence = "I write, rewrite, and still rewrite again"
encoded_sentence = text_vectorization(test_sentence)
print(encoded_sentence) | tf.Tensor([ 7 3 5 9 1 5 10], shape=(7,), dtype=int64)
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
๋ฒกํฐํ๋ ํ
์๋ก๋ถํฐ ๋ฌธ์ฅ์ ๋ณต์ํ๋ฉด ํ์คํ๋ ๋ฌธ์ฅ์ด ์์ฑ๋๋ค. | inverse_vocab = dict(enumerate(vocabulary))
decoded_sentence = " ".join(inverse_vocab[int(i)] for i in encoded_sentence)
print(decoded_sentence) | i write rewrite and [UNK] rewrite again
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
**`TextVectorization` ์ธต ์ฌ์ฉ๋ฒ** `TextVectorization` ์ธต์ GPU ๋๋ TPU์์ ์ง์๋์ง ์๋๋ค.๋ฐ๋ผ์ ๋ชจ๋ธ ๊ตฌ์ฑ์ ์ง์ ์ฌ์ฉํ๋ ๋ฐฉ์์ ๋ชจ๋ธ์ ํ๋ จ์๋ฆ์ถ ์ ์๊ธฐ์ ๊ถ์ฅ๋์ง ์๋๋ค.์ฌ๊ธฐ์๋ ๋์ ์ ๋ฐ์ดํฐ์
์ ์ฒ๋ฆฌ๋ฅผ ๋ชจ๋ธ ๊ตฌ์ฑ๊ณผ ๋
๋ฆฝ์ ์ผ๋ก ์ฒ๋ฆฌํ๋ ๋ฐฉ์์ ์ด์ฉํ๋ค.ํ์ง๋ง ํ๋ จ์ด ์์ฑ๋ ๋ชจ๋ธ์ ์ค์ ์ ๋ฐฐ์นํ ๊ฒฝ์ฐ `TextVectorization` ์ธต์์์ฑ๋ ๋ชจ๋ธ์ ์ถ๊ฐํด์ ์ฌ์ฉํ๋ ๊ฒ ์ข๋ค.์ด์ ๋ํ ์์ธํ ์ค๋ช
์ ์ ์ ๋ค์ ๋ถ๋ก์์ ์ค๋ช
ํ๋ค. 11.3 ๋จ์ด ๋ชจ์ ํํ๋ฒ: ์งํฉ๊ณผ ์ํ์ค ์์ ์ธ๊ธํ ๋๋ก ์์ฐ์ด์ฒ๋ฆฌ ๋ชจ๋ธ์ ๋ฐ๋ผ ๋จ์ด ๋ชจ์์ ๋ค๋ฃจ๋ ๋ฐฉ์์ด ๋ค๋ฅด๋ค. - ๋จ์ด์ฃผ๋จธ๋(bag-of-words) ๋ชจ๋ธ - ๋จ์ด๋ค์ ์์๋ฅผ ๋ฌด์. ๋จ์ด ๋ชจ์์ ๋จ์ด๋ค์ ์งํฉ์ผ๋ก ๋ค๋ฃธ. - 2015๋
์ด์ ๊น์ง ์ฃผ๋ก ์ฌ์ฉ๋จ.- ์ํ์ค(sequence) ๋ชจ๋ธ - ์ํ(recurrent) ๋ชจ๋ธ - ๋จ์ด๋ค์ ์์๋ฅผ ์๊ณ์ด ๋ฐ์ดํฐ์ ์คํ
์ฒ๋ผ ๊ฐ์ฃผ. - 2015-2016์ ์ฃผ๋ก ์ฌ์ฉ๋จ. - ํธ๋์คํฌ๋จธ(Transformer) ์ํคํ
์ฒ - ๊ธฐ๋ณธ์ ์ผ๋ก ์์๋ฅผ ๋ฌด์ํ์ง๋ง ๋จ์ด ์์น๋ฅผ ํ์ตํ ์ ์๋ ๋ฅ๋ ฅ์ ๊ฐ์ง. - 2017๋
์ดํ ๊ธฐ๋ณธ์ ์ผ๋ก ํ์ฉ๋จ. ์ฌ๊ธฐ์๋ IMDB ์ํ ๋ฆฌ๋ทฐ ๋ฐ์ดํฐ๋ฅผ ์ด์ฉํ์ฌ ๋ ๋ชจ๋ธ ๋ฐฉ์์ ํ์ฉ๋ฒ๊ณผ ์ฐจ์ด์ ์ ์๊ฐํ๋ค. 11.3.1 IMDB ์ํ ๋ฆฌ๋ทฐ ๋ฐ์ดํฐ ์ค๋น ์ด์ ๊ณผ๋ ๋ฌ๋ฆฌ ์ฌ๊ธฐ์๋ IMDB ๋ฐ์ดํฐ์
์ ์ง์ ๋ค์ด๋ก๋ํ์ฌ ์ ์ฒ๋ฆฌํ๋ ๊ณผ์ ์ ์ดํด๋ณธ๋ค. ์ค๋น ๊ณผ์ 1: ๋ฐ์ดํฐ์
๋ค์ด๋ก๋ ์์ถ ํ๊ธฐ์์ถ์ ํ๋ฉด ์๋ ๊ตฌ์กฐ์ ๋๋ ํ ๋ฆฌ๊ฐ ์์ฑ๋๋ค.```aclImdb/...train/......pos/......neg/...test/......pos/......neg/````train`์ `pos`์ `neg` ์๋ธ๋๋ ํ ๋ฆฌ์ ๊ฐ๊ฐ 12,500๊ฐ์ ๊ธ์ ๊ณผ ๋ถ์ ๋ฆฌ๋ทฐ๊ฐํฌํจ๋์ด ์๋ค. *์ฃผ์์ฌํญ*: ์๋ ์ฝ๋๋ ์๋์ฐ์ ๊ฒฝ์ฐ 10 ์ต์ ๋ฒ์ ๋๋ 11๋ถํฐ ์ง์๋๋ค. | !curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz | % Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 80.2M 100 80.2M 0 0 26.1M 0 0:00:03 0:00:03 --:--:-- 26.1M
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
`aclImdb/train/unsup` ์๋ธ๋๋ ํ ๋ฆฌ๋ ํ์ ์๊ธฐ์ ์ญ์ ํ๋ค. | if 'google.colab' in str(get_ipython()):
!rm -r aclImdb/train/unsup
else:
import shutil
unsup_path = './aclImdb/train/unsup'
shutil.rmtree(unsup_path) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
๊ธ์ ๋ฆฌ๋ทฐ ํ๋์ ๋ด์ฉ์ ์ดํด๋ณด์.๋ชจ๋ธ ๊ตฌ์ฑ ์ด์ ์ ํ๋ จ ๋ฐ์ดํฐ์
์ ์ดํด ๋ณด๊ณ ๋ชจ๋ธ์ ๋ํ ์ง๊ด์ ๊ฐ๋ ๊ณผ์ ์ด ํญ์ ํ์ํ๋ค. | if 'google.colab' in str(get_ipython()):
!cat aclImdb/train/pos/4077_10.txt
else:
with open('aclImdb/train/pos/4077_10.txt', 'r') as f:
text = f.read()
print(text) | I first saw this back in the early 90s on UK TV, i did like it then but i missed the chance to tape it, many years passed but the film always stuck with me and i lost hope of seeing it TV again, the main thing that stuck with me was the end, the hole castle part really touched me, its easy to watch, has a great story, great music, the list goes on and on, its OK me saying how good it is but everyone will take there own best bits away with them once they have seen it, yes the animation is top notch and beautiful to watch, it does show its age in a very few parts but that has now become part of it beauty, i am so glad it has came out on DVD as it is one of my top 10 films of all time. Buy it or rent it just see it, best viewing is at night alone with drink and food in reach so you don't have to stop the film.<br /><br />Enjoy | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์ค๋น ๊ณผ์ 2: ๊ฒ์ฆ์
์ค๋นํ๋ จ์
์ 20%๋ฅผ ๊ฒ์ฆ์
์ผ๋ก ๋ผ์ด๋ธ๋ค.์ด๋ฅผ ์ํด `aclImdb/val` ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํ ํ์๊ธ์ ๊ณผ ๋ถ์ ํ๋ จ์
๋ชจ๋ ๋ฌด์์๋ก ์์ ํ ๊ทธ์ค 20%๋ฅผ ๊ฒ์ฆ์
๋๋ ํ ๋ฆฌ๋ก ์ฎ๊ธด๋ค. | import os, pathlib, shutil, random
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category) # val ๋๋ ํ ๋ฆฌ ์์ฑ
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files) # ํ๋ จ์
๋ฌด์์ ์๊ธฐ
num_val_samples = int(0.2 * len(files)) # 20% ์ง์ ํ ๊ฒ์ฆ์
์ผ๋ก ์ฎ๊ธฐ๊ธฐ
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์ค๋น ๊ณผ์ 3: ํ
์ ๋ฐ์ดํฐ์
์ค๋น`text_dataset_from_directory()` ํจ์๋ฅผ ์ด์ฉํ์ฌ ํ๋ จ์
, ๊ฒ์ฆ์
, ํ
์คํธ์
์ ์ค๋นํ๋ค. ์๋ฃํ์ ๋ชจ๋ `Dataset`์ด๋ฉฐ, ๋ฐฐ์น ํฌ๊ธฐ๋ 32๋ฅผ ์ฌ์ฉํ๋ค. | from tensorflow import keras
batch_size = 32
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
) | Found 20000 files belonging to 2 classes.
Found 5000 files belonging to 2 classes.
Found 25000 files belonging to 2 classes.
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
๊ฐ ๋ฐ์ดํฐ์
์ ๋ฐฐ์น๋ก ๊ตฌ๋ถ๋๋ฉฐ์
๋ ฅ์ `tf.string` ํ
์์ด๊ณ , ํ๊น์ `int32` ํ
์์ด๋ค.ํฌ๊ธฐ๋ ๋ชจ๋ 32์ด๋ฉฐ ์ง์ ๋ ๋ฐฐ์น ํฌ๊ธฐ์ด๋ค.์๋ฅผ ๋ค์ด, ์ฒซ์งธ ๋ฐฐ์น์ ์
๋ ฅ๊ณผ ํ๊น ๋ฐ์ดํฐ์ ์ ๋ณด๋ ๋ค์๊ณผ ๊ฐ๋ค. | for inputs, targets in train_ds:
print("inputs.shape:", inputs.shape)
print("inputs.dtype:", inputs.dtype)
print("targets.shape:", targets.shape)
print("targets.dtype:", targets.dtype)
# ์์ : ์ฒซ์งธ ๋ฐฐ์น์ ์ฒซ์งธ ๋ฆฌ๋ทฐ
print("inputs[0]:", inputs[0])
print("targets[0]:", targets[0])
break | inputs.shape: (32,)
inputs.dtype: <dtype: 'string'>
targets.shape: (32,)
targets.dtype: <dtype: 'int32'>
inputs[0]: tf.Tensor(b'The film begins with a bunch of kids in reform school and focuses on a kid named \'Gabe\', who has apparently worked hard to earn his parole. Gabe and his sister move to a new neighborhood to make a fresh start and soon Gabe meets up with the Dead End Kids. The Kids in this film are little punks, but they are much less antisocial than they\'d been in other previous films and down deep, they are well-meaning punks. However, in this neighborhood there are also some criminals who are perpetrating insurance fraud through arson and see Gabe as a convenient scapegoat--after all, he\'d been to reform school and no one would believe he was innocent once he was framed. So, when Gabe is about ready to be sent back to "The Big House", it\'s up to the rest of the gang to save him and expose the real crooks.<br /><br />The "Dead End Kids" appeared in several Warner Brothers films in the late 1930s and the films were generally very good (particularly ANGELS WITH DIRTY FACES). However, after the boys\' contracts expired, they went on to Monogram Studios and the films, to put it charitably, were very weak and formulaic--with Huntz Hall and Leo Gorcey being pretty much the whole show and the group being renamed "The Bowery Boys". Because ANGELS WASH THEIR FACES had the excellent writing and production values AND Hall and Gorcey were not constantly mugging for the camera, it\'s a pretty good film--and almost earns a score of 7 (it\'s REAL close). In fact, while this isn\'t a great film aesthetically, it\'s sure a lot of fun to watch, so I will give it a 7! Sure, it was a tad hokey-particularly towards the end when the kids take the law into their own hands and Reagan ignores the Bill of Rights--but it was also quite entertaining. The Dead End Kids are doing their best performances and Ronald Reagan and Ann Sheridan provided excellent support. Sure, this part of the film was illogical and impossible but somehow it was still funny and rather charming--so if you can suspend disbelief, it works well.', shape=(), dtype=string)
targets[0]: tf.Tensor(1, shape=(), dtype=int32)
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
11.3.2 ๋จ์ด์ฃผ๋จธ๋ ๊ธฐ๋ฒ ๋จ์ด์ฃผ๋จธ๋์ ์ฑ์ธ ํ ํฐ์ผ๋ก ์ด๋ค N-๊ทธ๋จ์ ์ฌ์ฉํ ์ง ๋จผ์ ์ง์ ํด์ผ ํ๋ค. - ์ ๋๊ทธ๋จ(unigrams): ํ๋์ ๋จ์ด๊ฐ ํ์ ํ ํฐ- N-๊ทธ๋จ(N-grams): ์ต๋ N ๊ฐ์ ์ฐ์ ๋จ์ด๋ก ์ด๋ฃจ์ด์ง ํ ํฐ **๋ฐฉ์ 1: ์ ๋๊ทธ๋จ ๋ฐ์ด๋๋ฆฌ ์ธ์ฝ๋ฉ** ์๋ฅผ ๋ค์ด "the cat sat on the mat" ๋ฌธ์ฅ์ ์ ๋๊ทธ๋จ์ผ๋ก ์ฒ๋ฆฌํ๋ฉด ๋ค์ ๋จ์ด์ฃผ๋จธ๋๊ฐ ์์ฑ๋๋ค.์งํฉ์ผ๋ก ์ฒ๋ฆฌ๋๊ธฐ์ ๋จ์ด๋ค์ ์์๋ ์์ ํ ๋ฌด์๋๋ค.```{"cat", "mat", "on", "sat", "the"}```์ด์ ๋ชจ๋ ๋ฌธ์ฅ์ ์ดํ์์ธ์ ํฌํจ๋ ๋จ์ด๋ค์ ์๋งํผ ๊ธด 1์ฐจ์ ์ด์ง ํ
์(binary tensor)๋ก์ฒ๋ฆฌ๋๋ค. ์ฆ, ๋ฉํฐ-ํซ(multi-hot) ์ธ์ฝ๋ฉ ๋ฐฉ์์ ์ฌ์ฉํด์ ํ
์๋ก ๋ณํ๋๋ค.[4์ฅ](https://codingalzi.github.io/dlp/notebooks/dlp04_getting_started_with_neural_networks.html)๊ณผ [5์ฅ](https://codingalzi.github.io/dlp/notebooks/dlp05_fundamentals_of_ml.html)์์ ๋ฌธ์ฅ์ ์ธ์ฝ๋ฉ ๋ฐฉ์๊ณผ ๋์ผํ๋ค. `TextVectorization` ํด๋์ค์ `output_mode="multi_hot"` ์ต์
์ ์ด์ฉํ๋ฉด๋ฐฉ๊ธ ์ค๋ช
ํ ๋ด์ฉ์ ๊ทธ๋๋ก ์ฒ๋ฆฌํด์ค๋ค. | from tensorflow.keras.layers import TextVectorization
text_vectorization = TextVectorization(
max_tokens=20000,
output_mode="multi_hot",
)
# ์ดํ์์ธ ์์ฑ
text_only_train_ds = train_ds.map(lambda x, y: x)
text_vectorization.adapt(text_only_train_ds) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์์ฑ๋ ์ดํ์์ธ์ ์ด์ฉํ์ฌ ํ๋ จ์
, ๊ฒ์ฆ์
, ํ
์คํธ์
๋ชจ๋ ๋ฒกํฐํํ๋ค. | binary_1gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
binary_1gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
binary_1gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y)) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
๋ณํ๋ ์ฒซ์งธ ๋ฐฐ์น์ ์
๋ ฅ๊ณผ ํ๊น ๋ฐ์ดํฐ์ ์ ๋ณด๋ ๋ค์๊ณผ ๊ฐ๋ค.`max_tokens=20000`์ผ๋ก ์ง์ ํ์๊ธฐ์ ๋ชจ๋ ๋ฌธ์ฅ์ ๊ธธ์ด๊ฐ 2๋ง์ธ ๋ฒกํฐ๋ก ๋ณํ๋์๋ค. | for inputs, targets in binary_1gram_train_ds:
print("inputs.shape:", inputs.shape)
print("inputs.dtype:", inputs.dtype)
print("targets.shape:", targets.shape)
print("targets.dtype:", targets.dtype)
print("inputs[0]:", inputs[0])
print("targets[0]:", targets[0])
break | inputs.shape: (32, 20000)
inputs.dtype: <dtype: 'float32'>
targets.shape: (32,)
targets.dtype: <dtype: 'int32'>
inputs[0]: tf.Tensor([1. 1. 1. ... 0. 0. 0.], shape=(20000,), dtype=float32)
targets[0]: tf.Tensor(0, shape=(), dtype=int32)
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
**๋ฐ์ง ๋ชจ๋ธ ์ง์ ** ๋จ์ด์ฃผ๋จธ๋ ๋ชจ๋ธ๋ก ์ฌ๊ธฐ์๋ ๋ฐ์ง ๋ชจ๋ธ์ ์ฌ์ฉํ๋ค. `get_model()` ํจ์๊ฐ ์ปดํ์ผ ๋ ๋จ์ํ ๋ฐ์ง ๋ชจ๋ธ์ ๋ฐํํ๋ค.๋ชจ๋ธ์ ์ถ๋ ฅ๊ฐ์ ๊ธ์ ์ผ ํ๋ฅ ์ด๋ฉฐ, ์ต์์ ์ธต์ ํ์ฑํ ํจ์๋ก `sigmoid`๋ฅผ ์ฌ์ฉํ๋ค. | from tensorflow import keras
from tensorflow.keras import layers
def get_model(max_tokens=20000, hidden_dim=16):
inputs = keras.Input(shape=(max_tokens,))
x = layers.Dense(hidden_dim, activation="relu")(inputs)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x) # ๊ธ์ ์ผ ํ๋ฅ ๊ณ์ฐ
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
return model
model = get_model()
model.summary() | Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 20000)] 0
dense (Dense) (None, 16) 320016
dropout (Dropout) (None, 16) 0
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 320,033
Trainable params: 320,033
Non-trainable params: 0
_________________________________________________________________
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
**๋ชจ๋ธ ํ๋ จ** ๋ฐ์ง ๋ชจ๋ธ ํ๋ จ๊ณผ์ ์ ํน๋ณํ ๊ฒ ์๋ค.ํ๋ จ ํ ํ
์คํธ์
์ ๋ํ ์ ํ๋๊ฐ 89% ๋ณด๋ค ์กฐ๊ธ ๋ฎ๊ฒ ๋์จ๋ค.์ต๊ณ ์ฑ๋ฅ์ ๋ชจ๋ธ์ด ํ
์คํธ์
์ ๋ํด 95% ์ ๋ ์ ํ๋๋ฅผ ๋ด๋ ๊ฒ๋ณด๋ค๋ ๋ฎ์ง๋ง๋ฌด์์๋ก ์ฐ๋ ๋ชจ๋ธ๋ณด๋ค๋ ํจ์ฌ ์ข์ ๋ชจ๋ธ์ด๋ค. | callbacks = [
keras.callbacks.ModelCheckpoint("binary_1gram.keras",
save_best_only=True)
]
model.fit(binary_1gram_train_ds.cache(),
validation_data=binary_1gram_val_ds.cache(),
epochs=10,
callbacks=callbacks)
model = keras.models.load_model("binary_1gram.keras")
print(f"Test acc: {model.evaluate(binary_1gram_test_ds)[1]:.3f}") | Epoch 1/10
625/625 [==============================] - 10s 16ms/step - loss: 0.4074 - accuracy: 0.8277 - val_loss: 0.2792 - val_accuracy: 0.8908
Epoch 2/10
625/625 [==============================] - 3s 5ms/step - loss: 0.2746 - accuracy: 0.8981 - val_loss: 0.2774 - val_accuracy: 0.8964
Epoch 3/10
625/625 [==============================] - 4s 6ms/step - loss: 0.2471 - accuracy: 0.9115 - val_loss: 0.2872 - val_accuracy: 0.8976
Epoch 4/10
625/625 [==============================] - 3s 5ms/step - loss: 0.2246 - accuracy: 0.9244 - val_loss: 0.3187 - val_accuracy: 0.8936
Epoch 5/10
625/625 [==============================] - 3s 6ms/step - loss: 0.2156 - accuracy: 0.9313 - val_loss: 0.3164 - val_accuracy: 0.8960
Epoch 6/10
625/625 [==============================] - 3s 6ms/step - loss: 0.2108 - accuracy: 0.9341 - val_loss: 0.3355 - val_accuracy: 0.8934
Epoch 7/10
625/625 [==============================] - 4s 6ms/step - loss: 0.2052 - accuracy: 0.9366 - val_loss: 0.3354 - val_accuracy: 0.8944
Epoch 8/10
625/625 [==============================] - 4s 6ms/step - loss: 0.2017 - accuracy: 0.9365 - val_loss: 0.3582 - val_accuracy: 0.8940
Epoch 9/10
625/625 [==============================] - 4s 6ms/step - loss: 0.2013 - accuracy: 0.9394 - val_loss: 0.3497 - val_accuracy: 0.8938
Epoch 10/10
625/625 [==============================] - 4s 6ms/step - loss: 0.2043 - accuracy: 0.9394 - val_loss: 0.3631 - val_accuracy: 0.8940
782/782 [==============================] - 8s 10ms/step - loss: 0.2861 - accuracy: 0.8885
Test acc: 0.888
| MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
**๋ฐฉ์ 2: ๋ฐ์ด๊ทธ๋จ ๋ฐ์ด๋๋ฆฌ ์ธ์ฝ๋ฉ** ๋ฐ์ด๊ทธ๋จ(2-grams)์ ์ ๋๊ทธ๋จ ๋์ ์ด์ฉํด๋ณด์. ์๋ฅผ ๋ค์ด "the cat sat on the mat" ๋ฌธ์ฅ์ ๋ฐ์ด๊ทธ๋จ์ผ๋ก ์ฒ๋ฆฌํ๋ฉด ๋ค์ ๋จ์ด์ฃผ๋จธ๋๊ฐ ์์ฑ๋๋ค.```{"the", "the cat", "cat", "cat sat", "sat", "sat on", "on", "on the", "the mat", "mat"}````TextVectorization` ํด๋์ค์ `ngrams=N` ์ต์
์ ์ด์ฉํ๋ฉดN-๊ทธ๋จ๋ค๋ก ์ด๋ฃจ์ด์ง ์ดํ์์ธ์ ์์ฑํ ์ ์๋ค. | text_vectorization = TextVectorization(
ngrams=2,
max_tokens=20000,
output_mode="multi_hot",
) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
์ดํ์์ธ ์์ฑ๊ณผ ํ๋ จ์
, ๊ฒ์ฆ์
, ํ
์คํธ์
์ ๋ฒกํฐํ ๊ณผ์ ์ ๋์ผํ๋ค. | text_vectorization.adapt(text_only_train_ds)
binary_2gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
binary_2gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
binary_2gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y)) | _____no_output_____ | MIT | notebooks/dlp11_part01_introduction.ipynb | codingalzi/dlp |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.