markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
`**d` represents any number of keyword parameters
def dconcat(sep = ":", **dic): for k in dic.keys(): print("{}{}{}".format(k, sep, dic[k])) dconcat(hello = "world", python = "rocks", sep = "~")
hello~world python~rocks
MIT
Notebooks/Arguments-and-Unpacking.ipynb
gtavasoli/PyTips
UnpackingThe new feature [PEP 448](https://www.python.org/dev/peps/pep-0448/) added in **Python 3.5** allows `*a` and `**d` to be used outside of function parameters:
print(*range(5)) lst = [0, 1, 2, 3] print(*lst) a = *range(3), # The comma here cannot be omitted print(a) d = {"hello": "world", "python": "rocks"} print({**d}["python"]) print(*d) print([*d][0])
0 1 2 3 4 0 1 2 3 (0, 1, 2) rocks hello python hello
MIT
Notebooks/Arguments-and-Unpacking.ipynb
gtavasoli/PyTips
The so-called unpacking (Unpacking) can actually be regarded as removing the tuple of `()` or removing the dictionary of `{}`. This syntax also provides a more Pythonic way to merge dictionaries:
user = {'name': "Trey", 'website': "http://treyhunner.com"} defaults = {'name': "Anonymous User", 'page_name': "Profile Page"} print({**defaults, **user})
{'name': 'Trey', 'page_name': 'Profile Page', 'website': 'http://treyhunner.com'}
MIT
Notebooks/Arguments-and-Unpacking.ipynb
gtavasoli/PyTips
Using this unpacking method when calling a function is also available in **Python 2.7**:
print(concat(*"ILovePython"))
I/L/o/v/e/P/y/t/h/o/n
MIT
Notebooks/Arguments-and-Unpacking.ipynb
gtavasoli/PyTips
Average speed of earth is 29.93 km/s, so looks good.
vr0 = earth_diff['r'] / (24*3600) * km vr0 vtheta0 = earth_diff['theta'] vtheta0 vphi0 = earth_diff['phi'] vphi0
_____no_output_____
MIT
code/notebooks/eph-earth-velocity.ipynb
GandalfSaxe/letomes
Librairies
from typing import List, Union, Tuple, Callable, Dict from os import environ from random import seed from numpy.random import seed as np_seed from numpy import ndarray, zeros from pandas import DataFrame, read_csv from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow import random from tensorflow import keras from tensorflow.keras import applications from tensorflow import data import plotly.graph_objects as go import wandb from wandb.keras import WandbCallback from kaggle_secrets import UserSecretsClient
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
WandB
user_secrets = UserSecretsClient() api_key = user_secrets.get_secret("WANDB") wandb.login(key=api_key) run = wandb.init( project="pet_finder", entity="leopoldavezac", config={ 'learning_rate':0.001, 'epochs':20, 'batch_size':24, 'loss_func':'mse', 'img_width':224, 'img_length':224, 'efficient_net_symbol':'B0', 'efficient_net_trainable':False, 'dense_layers_post_efficient_net':[18, 9], 'dropout':0.2, 'data_augmentation_contrast':0.1, } )
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
Constants
DATA_PATH = '../input/petfinder-pawpularity-score' ID_VAR_NM = 'Id' TARGET_VAR_NM = 'Pawpularity' AUTOTUNE = tf.data.experimental.AUTOTUNE CONFIG = wandb.config
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
Load & Preprocess Data
def get_datasets() -> List[tf.data.Dataset]: df_train = load_df(set_nm='train') df_test = load_df(set_nm='test') df_train[TARGET_VAR_NM] /= 100 df_train = create_img_path_var(df_train, 'train') df_test = create_img_path_var(df_test, 'test') df_train, df_val = split(df_train) ds_train = create_dataset_with_preprocessed_imgs( df_train.img_path.values, df_train[TARGET_VAR_NM].values.astype('float'), augment=True ) ds_val = create_dataset_with_preprocessed_imgs( df_val.img_path.values, df_val[TARGET_VAR_NM].values.astype('float') ) ds_test = create_dataset_with_preprocessed_imgs( df_test.img_path.values ) return [ds_train, ds_val, ds_test] def load_df(set_nm:str) -> DataFrame: var_nms = [ID_VAR_NM] var_nms += [TARGET_VAR_NM] if set_nm == 'train' else [] return read_csv('{}/{}.csv'.format(DATA_PATH, set_nm), usecols=var_nms) def create_img_path_var(df: DataFrame, set_nm:str) -> DataFrame: df['img_path'] = '{}/{}/'.format(DATA_PATH, set_nm) + df[ID_VAR_NM] + '.jpg' df.drop(columns=ID_VAR_NM, inplace=True) return df def split(df: DataFrame) -> List[DataFrame]: train, val = train_test_split(df.values, test_size=0.2) df_train = DataFrame(train, columns=df.columns) df_val = DataFrame(val, columns=df.columns) return [df_train, df_val] def create_dataset_with_preprocessed_imgs(X_paths: ndarray, y: Union[None, ndarray] = None, augment:bool=False) -> data.Dataset: get_preprocessed_img = build_img_processor(y is not None) if y is not None: ds = data.Dataset.from_tensor_slices((X_paths, y)) else: ds = data.Dataset.from_tensor_slices((X_paths,)) ds = ds.map(get_preprocessed_img, num_parallel_calls=AUTOTUNE) if augment: augmentation_model = get_augmentation_model() ds = ds.map(lambda X, y: (augmentation_model(X, training=True), y)) ds = ds.batch(CONFIG.batch_size) ds = ds.prefetch(buffer_size=AUTOTUNE) return ds def build_img_processor(with_target: bool) -> Callable: def get_preprocessed_img(path: str) -> tf.Tensor: img = load_img(path) img = resize(img) img = eff_net_preprocess(img) return img def get_preprocessed_img_with_target(path:str, y:float) -> Tuple[Union[tf.Tensor, float]]: return (get_preprocessed_img(path), y) return get_preprocessed_img_with_target if with_target else get_preprocessed_img def load_img(path: str) -> tf.Tensor: img = tf.io.read_file(path) return tf.io.decode_jpeg(img) def resize(img: tf.Tensor) -> tf.Tensor: return tf.cast( tf.image.resize_with_pad(img, CONFIG.img_length, CONFIG.img_width), dtype=tf.int32 ) def eff_net_preprocess(img: tf.Tensor) -> tf.Tensor: return keras.applications.efficientnet.preprocess_input(img) def normalize(img: tf.Tensor) -> tf.Tensor: return img / 255.0 def get_augmentation_model() -> tf.keras.Model: return tf.keras.Sequential([ layers.RandomFlip("horizontal"), layers.RandomRotation(CONFIG.data_augmentation_contrast), ]) ds_train, ds_val, ds_test = get_datasets()
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
Img Dims Visualization
img_paths = ('{}/{}/'.format(DATA_PATH, 'train') + load_df('train')[ID_VAR_NM] + '.jpg').values img_dims = zeros((len(img_paths), 2)) for i, img_path in enumerate(img_paths): img_dims[i,:] = load_img(img_path).shape[:-1] fig = go.Figure() fig.add_trace(go.Histogram(x=img_dims[:,0], histnorm='probability', name='width')) fig.add_trace(go.Histogram(x=img_dims[:,1], histnorm='probability', name='height')) fig.update_layout(title_text='Distribution of Img Width and Height') fig.show()
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
Model
def get_model(efficient_net_model_nm:str, dense_layers_post_eff_net:List[int], dropout: float) -> tf.keras.Model: efficient_net = tf.keras.models.load_model('../input/keras-applications-models/{efficient_net_model_nm}.h5') if CONFIG.efficient_net_trainable: unfreeze_layers(efficient_net) layers = [ tf.keras.layers.Input(shape=(CONFIG.img_length, CONFIG.img_width, 3)), efficient_net, tf.keras.layers.BatchNormalization(), tf.keras.layers.Dropout(dropout) ] layers += [tf.keras.layers.Dense(nb_units) for nb_units in dense_layers_post_eff_net] layers += [tf.keras.layers.Dense(1, activation='sigmoid')] model = keras.models.Sequential(layers) print(model.summary()) return model def unfreez_layers(model: tf.keras.Model) -> None: for layer in model.layers: if not isinstance(layer, tf.keras.layers.BatchNormalization): layer.trainable = True else: layer.trainable = False def compile_model(model: keras.Model, learning_rate: float, loss_func:str) -> None: optimizer = keras.optimizers.Adam(learning_rate=learning_rate) model.compile(loss=loss_func, optimizer=optimizer, metrics=[keras.metrics.RootMeanSquaredError()]) def fit(model: keras.Model, ds_train: data.Dataset, ds_val: data.Dataset, epochs: int) -> None: early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3) model.fit(ds_train, epochs=epochs, validation_data=ds_val, callbacks=[WandbCallback(), early_stopping]) model = get_model(CONFIG.efficient_net_symbol, CONFIG.dense_layers_post_efficient_net, CONFIG.dropout) compile_model(model, CONFIG.learning_rate, CONFIG.loss_func) fit(model, ds_train, ds_val, CONFIG.epochs) run.finish()
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
Submissions
def save_test_pred(pred: ndarray) -> None: df_test = load_df_test() df_test[TARGET_VAR_NM] = pred df_test[[ID_VAR_NM, TARGET_VAR_NM]].to_csv('submission.csv', index=False) def load_df_test() -> DataFrame: return read_csv('{}/test.csv'.format(DATA_PATH), usecols=[ID_VAR_NM]) test_pred = model.predict(ds_test) test_pred *= 100 save_test_pred(test_pred)
_____no_output_____
MIT
pet-finder.ipynb
leopoldavezac/PetFinder
๋น…๋ฐ์ดํ„ฐ ๋ถ„์„ ๊ธฐ์‚ฌ ์‹ค๊ธฐ ์‹œํ—˜ ์˜ˆ์ œ ํ’€์ด 1. ์ž‘์—…ํ˜• ์ œ1์œ ํ˜• : ๋ฐ์ดํ„ฐ ์ฒ˜๋ฆฌ ์˜์—ญ* mtcars ๋ฐ์ดํ„ฐ์…‹(mtcars.csv)์˜ qsec ์ปฌ๋Ÿผ์„ ์ตœ์†Œ์ตœ๋Œ€ ์ฒ™๋„(Min-Max Scale)๋กœ ๋ณ€ํ™˜ํ•œ ํ›„ 0.5๋ณด๋‹ค ํฐ ๊ฐ’์„ ๊ฐ€์ง€๋Š” ๋ ˆ์ฝ”๋“œ ์ˆ˜๋ฅผ ๊ตฌํ•˜์‹œ์˜ค.
import pandas as pd # pandas import df = pd.read_csv('mtcars.csv') # df์— mtcars.csv๋ฅผ ์ฝ์–ด ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„์œผ๋กœ ์ €์žฅ df.head() # df์˜ ์•ž์—์„œ 5๊ฐœ ๋ฐ์ดํ„ฐ ์ถœ๋ ฅ
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ๋ฐฉ๋ฒ• 1 sklearn์˜ MinMaxScaler๋ฅผ ์ด์šฉํ•ด์„œ ๋ณ€ํ™˜
from sklearn.preprocessing import MinMaxScaler # sklearn์˜ min-max scaler import scaler = MinMaxScaler() # MinMaxScaler ๊ฐ์ฒด ์ƒ์„ฑ # scaler์— ๋ฐ์ดํ„ฐ๋ฅผ ๋„ฃ์–ด์„œ ๋ชจ๋ธ์„ ๋งŒ๋“ค๊ณ  ๊ฐ’์„ ๋ฎ์–ด์”จ์›€ df['qsec'] = scaler.fit_transform(df[['qsec']]) # qsec๊ฐ€ 0.5๋ณด๋‹ค ํฐ ๋ฐ์ดํ„ฐ๋งŒ ์ƒ‰์ธํ•ด์„œ ๊ธธ์ด๋ฅผ ๊ตฌํ•จ answer = len(df[df['qsec']>0.5]) print(answer)
9
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ๋ฐฉ๋ฒ• 2 Min-Max Scale์„ ์ˆ˜์‹์œผ๋กœ ๋งŒ๋“ค์–ด์„œ ๋ณ€ํ™˜์‹œํ‚ด
df['qsec'] = (df['qsec'] - df['qsec'].min()) / (df['qsec'].max() - df['qsec'].min()) answer = len(df[df['qsec']>0.5]) print(answer)
9
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
2. ์ž‘์—…ํ˜• ์ œ2์œ ํ˜• : ๋ชจํ˜• ๊ตฌ์ถ• ๋ฐ ํ‰๊ฐ€ ์˜์—ญ* ์•„๋ž˜๋Š” ๋ฐฑํ™”์  ๊ณ ๊ฐ์˜ 1๋…„๊ฐ„ ๊ตฌ๋งค ๋ฐ์ดํ„ฐ์ด๋‹ค.![image.png](attachment:291e1ffc-e949-4bc0-98fe-69e2a04d8546.png) ๊ฒฐ๊ณผ : X_test๋ฐ์ดํ„ฐ๋กœ ๋‚จ์ž์ผ ํ™•๋ฅ ์„ ๊ตฌํ•ด์„œ cust_id์™€ ๋‚จ์ž์ผ ํ™•๋ฅ ๋งŒ ๊ฐ€์ง„ csv๋กœ ์ƒ์„ฑ ํ‰๊ฐ€์ง€ํ‘œ : ROC-AUC Curve ํ™•์ธ ์‚ฌํ•ญ* ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ* Feature Engineering* ๋ถ„๋ฅ˜ ๋ชจ๋ธ* ์ตœ์ ํ™”* ์•™์ƒ๋ธ” 1. EDA
import pandas as pd import numpy as np X_train = pd.read_csv('X_train.csv', encoding='euc-kr') # ํ•œ๊ธ€์ด ์žˆ์–ด ์ธ์ฝ”๋”ฉํ•˜์—ฌ ์ฝ๋ฏ• y_train = pd.read_csv('y_train.csv') X_test = pd.read_csv('X_test.csv', encoding='euc-kr') # ํ•œ๊ธ€์ด ์žˆ์–ด ์ธ์ฝ”๋”ฉํ•˜์—ฌ ์ฝ๋ฏ• display(X_train.head()) display(y_train.head()) display(X_test.head()) train_data = X_train.merge(y_train, on='cust_id', how='outer') # X์™€ y๋ฅผ ํ•ฉ์นจ train_data.head()
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์นผ๋Ÿผ ๋ณ„ ๋ฐ์ดํ„ฐ ํƒ€์ž…๊ณผ ํ˜•ํƒœ ํ™•์ธ * ํ™˜๋ถˆ๊ธˆ์•ก์— ๊ฒฐ์ธก์น˜ ์žˆ์Œ * ์ฃผ๊ตฌ๋งค์ƒํ’ˆ๊ณผ ์ฃผ๊ตฌ๋งค์ง€์ ๋งŒ ๋ฒ”์ฃผํ˜• ๋ฐ์ดํ„ฐ : ํ•„์š”์‹œ one-hot encoding
train_data.info()
<class 'pandas.core.frame.DataFrame'> Int64Index: 3500 entries, 0 to 3499 Data columns (total 11 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 cust_id 3500 non-null int64 1 ์ด๊ตฌ๋งค์•ก 3500 non-null int64 2 ์ตœ๋Œ€๊ตฌ๋งค์•ก 3500 non-null int64 3 ํ™˜๋ถˆ๊ธˆ์•ก 1205 non-null float64 4 ์ฃผ๊ตฌ๋งค์ƒํ’ˆ 3500 non-null object 5 ์ฃผ๊ตฌ๋งค์ง€์  3500 non-null object 6 ๋‚ด์ ์ผ์ˆ˜ 3500 non-null int64 7 ๋‚ด์ ๋‹น๊ตฌ๋งค๊ฑด์ˆ˜ 3500 non-null float64 8 ์ฃผ๋ง๋ฐฉ๋ฌธ๋น„์œจ 3500 non-null float64 9 ๊ตฌ๋งค์ฃผ๊ธฐ 3500 non-null int64 10 gender 3500 non-null int64 dtypes: float64(3), int64(6), object(2) memory usage: 328.1+ KB
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ๊ฒฐ์ธก์น˜ ํ™•์ธ * ํ™˜๋ถˆ๊ธˆ์•ก์— ๊ฒฐ์ธก์น˜๊ฐ€ ๋Œ€๋ถ€๋ถ„(2295 / 3500)
train_data.isnull().sum()
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ์˜ ํ™˜๋ถˆ๊ธˆ์•ก๋„ ๊ฒฐ์ธก์น˜๊ฐ€ ๋งŽ์Œ
X_test.isnull().sum()
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์ˆซ์žํ˜• ๋ฐ์ดํ„ฐ์˜ ๋ถ„ํฌ ํ™•์ธ * ์ปฌ๋Ÿผ๋งˆ๋‹ค์˜ ์Šค์ผ€์ผ ์ฐจ์ด๊ฐ€ ์ปค์„œ ๋ณ€ํ™˜ ํ•„์š”
train_data.describe()
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ๋ฌธ์ž ๋ฐ์ดํ„ฐ ๋ถ„ํฌ ํ™•์ธ
train_data.describe(include=[object]) display(train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique(), len(train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique())) display(X_test['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique(), len(X_test['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique())) display(train_data['์ฃผ๊ตฌ๋งค์ง€์ '].unique(), len(train_data['์ฃผ๊ตฌ๋งค์ง€์ '].unique())) display(X_test['์ฃผ๊ตฌ๋งค์ง€์ '].unique(), len(X_test['์ฃผ๊ตฌ๋งค์ง€์ '].unique()))
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์ฃผ๊ตฌ๋งค์ƒํ’ˆ์—์„œ ์ฐจ์ด๋‚˜๋Š” ์ƒํ’ˆ ํ™•์ธ
for i in train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique(): if i not in X_test['์ฃผ๊ตฌ๋งค์ƒํ’ˆ'].unique(): print(i)
์†Œํ˜•๊ฐ€์ „
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* train_data์— ์†Œํ˜• ๊ฐ€์ „ ๋ฐ์ดํ„ฐ ํ™•์ธ
train_data[train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ']=='์†Œํ˜•๊ฐ€์ „'] 2/3500 # 0.05% ์†Œํ˜•๊ฐ€์ „ ๋ฐ์ดํ„ฐ
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์ด๊ตฌ๋งค์•ก๊ณผ ์ตœ๋Œ€๊ตฌ๋งค์•ก์— ์Œ์ˆ˜๊ฐ€ ํ™•์ธ๋˜์–ด ํ•ด๋‹น ๋ฐ์ดํ„ฐ ์ถœ๋ ฅ * ๋ชจ๋‘ ์—ฌ์ž์˜ ๊ฒฝ์šฐ๋กœ ํ™•์ธ๋จ * ํ™˜๋ถˆ ๊ธˆ์•ก์ด ์ตœ๋Œ€๊ตฌ๋งค์•ก๋ณด๋‹ค ๋งŽ์€ ๊ฒƒ์˜ ์˜๋ฏธ? * ์ตœ๋Œ€๊ตฌ๋งค์•ก์ด ์Œ์ˆ˜๋Š” ๋ญ”๊ฐ€?
train_data[(train_data['์ด๊ตฌ๋งค์•ก']<=0) | (train_data['์ตœ๋Œ€๊ตฌ๋งค์•ก']<=0)]
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ๋„ ํ™•์ธ * 0๊ณผ ์Œ์ˆ˜์˜ ๊ฐ’์„ ์–ด๋–ป๊ฒŒ ์ฒ˜๋ฆฌํ•  ๊ฒƒ์ธ๊ฐ€?
X_test[(X_test['์ด๊ตฌ๋งค์•ก']<=0) | (X_test['์ตœ๋Œ€๊ตฌ๋งค์•ก']<=0)]
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* train_data์—์„œ gender์— ๋”ฐ๋ฅธ ๊ฐ’๋“ค ๋น„๊ต
display(train_data.groupby('gender').min()) display(train_data.groupby('gender').mean()) display(train_data.groupby('gender').max())
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* train_data๋ผ๋ฆฌ์˜ ์ƒ๊ด€๊ณ„์ˆ˜ ํ™•์ธ * gender์™€ ์ƒ๊ด€๊ด€๊ณ„๊ฐ€ ๋†’์€ ๋ฐ์ดํ„ฐ๊ฐ€ ์—†์Œ
train_data.corr()
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์„ฑ๋ณ„์— ๋”ฐ๋ฅธ ํŠน์ด์ ์ด ๋ณด์ด์ง€ ์•Š์Œ* ์ „์ฒด ๋ฐ์ดํ„ฐ์— ์„ฑ๋ณ„ ๋ฐ์ดํ„ฐ ์ˆ˜ ํ™•์ธ
print('์ „์ฒด ๋ฐ์ดํ„ฐ :', len(train_data)) print('์—ฌ์ž ๋ฐ์ดํ„ฐ ์ˆ˜ :', len(train_data[train_data['gender']==0])) print('๋‚จ์ž ๋ฐ์ดํ„ฐ ์ˆ˜ :', len(train_data[train_data['gender']==1])) print('๋ฐ์ดํ„ฐ์—์„œ ๋‚จ์ž์ผ ํ™•๋ฅ  : {}%'.format(round((len(train_data[train_data['gender']==1]) / len(train_data))*100, 1)))
์ „์ฒด ๋ฐ์ดํ„ฐ : 3500 ์—ฌ์ž ๋ฐ์ดํ„ฐ ์ˆ˜ : 2184 ๋‚จ์ž ๋ฐ์ดํ„ฐ ์ˆ˜ : 1316 ๋ฐ์ดํ„ฐ์—์„œ ๋‚จ์ž์ผ ํ™•๋ฅ  : 37.6%
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
2. ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ * ํ™˜๋ถˆ๊ธˆ์•ก์˜ ๊ฒฐ์ธก์น˜๋ฅผ 0์œผ๋กœ ๋Œ€์ฒด
train_data['ํ™˜๋ถˆ๊ธˆ์•ก'].fillna(0, inplace=True) X_test['ํ™˜๋ถˆ๊ธˆ์•ก'].fillna(0, inplace=True) display(train_data.isnull().sum()) display(X_test.isnull().sum())
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ์ฃผ๊ตฌ๋งค์ƒํ’ˆ์ด ์†Œํ˜•๊ฐ€์ „์ธ ๋ฐ์ดํ„ฐ ์‚ญ์ œ
train_data.drop(train_data[train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ']=='์†Œํ˜•๊ฐ€์ „'].index, inplace=True) train_data[train_data['์ฃผ๊ตฌ๋งค์ƒํ’ˆ']=='์†Œํ˜•๊ฐ€์ „']
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
3. Feature Engineering * ์—ฐ์†ํ˜• ๋ฐ์ดํ„ฐ : ํ‘œ์ค€ ์ •๊ทœ ๋ถ„ํฌ๋กœ ๋ณ€ํ™˜ * ๋‚ด์ ๋‹น๊ตฌ๋งค๊ฑด์ˆ˜, ๊ตฌ๋งค์ฃผ๊ธฐ๋Š” ๊ฒฐ๊ณผ๋ฅผ ๋ณด๊ณ  ์‚ญ์ œ๋„ ๊ฒ€ํ† (์ƒ๊ด€๊ณ„์ˆ˜ ๋‚ฎ์Œ)
train_data.head() conti_cols = ['์ด๊ตฌ๋งค์•ก', '์ตœ๋Œ€๊ตฌ๋งค์•ก', 'ํ™˜๋ถˆ๊ธˆ์•ก','๋‚ด์ ์ผ์ˆ˜', '๋‚ด์ ๋‹น๊ตฌ๋งค๊ฑด์ˆ˜', '์ฃผ๋ง๋ฐฉ๋ฌธ๋น„์œจ', '๊ตฌ๋งค์ฃผ๊ธฐ'] # ํ‘œ์ค€ ์ •๊ทœ ๋ถ„ํ‘œ scaler ์‚ฌ์šฉํ•˜์—ฌ train๋ฐ์ดํ„ฐ์—์„œ ๋งŒ๋“ค scaler๋ฅผ test์— ๋™์ผํ•˜๊ฒŒ ์ ์šฉํ•˜์—ฌ ๋ณ€ํ™˜ from sklearn.preprocessing import StandardScaler for col in conti_cols: scaler = StandardScaler() scaler.fit(train_data[[col]]) train_data[col] = scaler.transform(train_data[[col]]) X_test[col] = scaler.transform(X_test[[col]]) display(train_data.describe()) display(X_test.describe())
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
* ๋ฒ”์ฃผํ˜• ๋ฐ์ดํ„ฐ ์›ํ•ซ์ธ์ฝ”๋”ฉ
categorical_cols = ['์ฃผ๊ตฌ๋งค์ƒํ’ˆ', '์ฃผ๊ตฌ๋งค์ง€์ '] for col in categorical_cols: temp = pd.get_dummies(train_data[col]) train_data = pd.concat([train_data, temp], axis=1) del train_data[col] temp = pd.get_dummies(X_test[col]) X_test = pd.concat([X_test, temp], axis=1) del X_test[col] display(train_data.head()) display(X_test.head())
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
4. ๋ถ„๋ฅ˜ ์•Œ๊ณ ๋ฆฌ์ฆ˜
from sklearn.ensemble import RandomForestClassifier # ๋žœ๋คํฌ๋ ˆ์ŠคํŠธ from sklearn.linear_model import LogisticRegression # ๋กœ์ง€์Šคํ‹ฑํšŒ๊ท€ from sklearn.metrics import roc_auc_score # ์˜ˆ์ œ์˜ ํ‰๊ฐ€์ง€ํ‘œ์ธ ROC_AUC score x_cols = list(train_data.columns) x_cols.remove('cust_id') x_cols.remove('gender') X = train_data[x_cols] y = train_data['gender'] test_x = X_test[x_cols] # ๋žœ๋คํฌ๋ ˆ์ŠคํŠธ ๋ชจ๋ธ ์ƒ์„ฑ/ํ•™์Šต model_rf = RandomForestClassifier(n_estimators=100, max_leaf_nodes=32) model_rf.fit(X, y) pred_rf = model_rf.predict_proba(X) print('RF ROCAUC Score: ', roc_auc_score(y, pred_rf[:,1])) # ๋กœ์ง€์Šคํ‹ฑ ํšŒ๊ท€ ๋ชจ๋ธ ์ƒ์„ฑ/ํ•™์Šต model_lr = LogisticRegression() model_lr.fit(X, y) pred_lr = model_lr.predict_proba(X) print('LR ROCAUC Score: ', roc_auc_score(y, pred_lr[:,1]))
RF ROCAUC Score: 0.7546568802481672 LR ROCAUC Score: 0.6955802615788438
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
5. ๊ฒฐ๊ณผ ์ œ์ถœ
# ROC_AUC score๊ฐ€ ๋†’์€ ๋žœ๋คํด์ŠคํŠธ๋กœ ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ๋กœ ๊ฒฐ๊ณผ ์˜ˆ์ธก๊ฐ’ ์ •๋ฆฌ pred_result = pd.DataFrame(model_rf.predict_proba(test_x)) result = pd.concat([X_test['cust_id'], pred_result[[1]]], axis=1) result.columns = [['cust_id', 'gender']] # ์˜ˆ์ธก๊ฒฐ๊ณผ์˜ ์ปฌ๋Ÿผ์ด 1์ด๋ฏ€๋กœ ๋ฌธ์ œ์™€ ๋™์ผํ•˜๊ฒŒ ์ปฌ๋Ÿผ ๋ณ€๊ฒฝ result result.to_csv('result.csv', index=False) # ๋ณ„๋„์˜ ์ธ๋ฑ์Šค ์—†์ด csv๋กœ ์ €์žฅ
_____no_output_____
MIT
๋น…๋ถ„๊ธฐ ์‹ค๊ธฐ ์˜ˆ์ œํ’€์ด.ipynb
kamzzang/ADPStudy
A K-means clustering project The purpose of this exercise is to implement the K-means clustering module of pyspark and find how many hacker groups were involved in the data brach of a certain technology firm. The forensic engineers of the firm were able to collect some meta data of each brach. The firm suspects that there might be two or three hacker group. But they are sure that each group attacked the same number of times, i.e. the attack from the hacker groups was 50 - 50.
import findspark findspark.init('/home/yohannes/spark-2.4.7-bin-hadoop2.7') %config Completer.use_jedi = False from pyspark.sql import SparkSession spark = SparkSession.builder.appName('Kmeans').getOrCreate() data = spark.read.csv('hack_data.csv',header=True,inferSchema=True)
_____no_output_____
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
Data exploration
data.printSchema() data.head(1)
_____no_output_____
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
Creating features vector
from pyspark.ml.feature import VectorAssembler data.columns assembler = VectorAssembler(inputCols=['Session_Connection_Time', 'Bytes Transferred', 'Kali_Trace_Used', 'Servers_Corrupted', 'Pages_Corrupted', 'WPM_Typing_Speed'], outputCol='features') final_data = assembler.transform(data) final_data.printSchema()
root |-- Session_Connection_Time: double (nullable = true) |-- Bytes Transferred: double (nullable = true) |-- Kali_Trace_Used: integer (nullable = true) |-- Servers_Corrupted: double (nullable = true) |-- Pages_Corrupted: double (nullable = true) |-- Location: string (nullable = true) |-- WPM_Typing_Speed: double (nullable = true) |-- features: vector (nullable = true)
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
Scaling the data
from pyspark.ml.feature import StandardScaler scaler = StandardScaler(inputCol='features',outputCol='scaledFeat') final_data = scaler.fit(final_data).transform(final_data) final_data.printSchema()
root |-- Session_Connection_Time: double (nullable = true) |-- Bytes Transferred: double (nullable = true) |-- Kali_Trace_Used: integer (nullable = true) |-- Servers_Corrupted: double (nullable = true) |-- Pages_Corrupted: double (nullable = true) |-- Location: string (nullable = true) |-- WPM_Typing_Speed: double (nullable = true) |-- features: vector (nullable = true) |-- scaledFeat: vector (nullable = true)
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
K-means clustering model
from pyspark.ml.clustering import KMeans # let's try with the assumption that there were two hacker groups kmeans = KMeans(featuresCol='scaledFeat',k=2) model = kmeans.fit(final_data) results = model.transform(final_data) centers = model.clusterCenters() print(centers) results.printSchema() results.describe().show() results.groupBy('prediction').count().show() # This shows that the attack was equaly sheared by the hacker groups
+----------+-----+ |prediction|count| +----------+-----+ | 1| 167| | 0| 167| +----------+-----+
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
Graphical visualization of the clusters
results_pd = results.toPandas() results_pd import matplotlib.pyplot as plt %matplotlib inline plot = plt.scatter(data=results_pd, x=results_pd.index, y=results_pd['Bytes Transferred'],c=results_pd['prediction']) plt.ylabel('Bytes Transferred')
_____no_output_____
MIT
K-means clustering_pyspark.ipynb
Molla80/K-means-clustering-using-pyspark
Credit Risk ClassificationCredit risk poses a classification problem thatโ€™s inherently imbalanced. This is because healthy loans easily outnumber risky loans. In this Challenge, youโ€™ll use various techniques to train and evaluate models with imbalanced classes. Youโ€™ll use a dataset of historical lending activity from a peer-to-peer lending services company to build a model that can identify the creditworthiness of borrowers. Instructions:This challenge consists of the following subsections:* Split the Data into Training and Testing Sets* Create a Logistic Regression Model with the Original Data* Predict a Logistic Regression Model with Resampled Training Data Split the Data into Training and Testing SetsOpen the starter code notebook and then use it to complete the following steps.1. Read the `lending_data.csv` data from the `Resources` folder into a Pandas DataFrame.2. Create the labels set (`y`) from the โ€œloan_statusโ€ column, and then create the features (`X`) DataFrame from the remaining columns. > **Note** A value of `0` in the โ€œloan_statusโ€ column means that the loan is healthy. A value of `1` means that the loan has a high risk of defaulting. 3. Check the balance of the labels variable (`y`) by using the `value_counts` function.4. Split the data into training and testing datasets by using `train_test_split`. Create a Logistic Regression Model with the Original DataEmploy your knowledge of logistic regression to complete the following steps:1. Fit a logistic regression model by using the training data (`X_train` and `y_train`).2. Save the predictions on the testing data labels by using the testing feature data (`X_test`) and the fitted model.3. Evaluate the modelโ€™s performance by doing the following: * Calculate the accuracy score of the model. * Generate a confusion matrix. * Print the classification report.4. Answer the following question: How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels? Predict a Logistic Regression Model with Resampled Training DataDid you notice the small number of high-risk loan labels? Perhaps, a model that uses resampled data will perform better. Youโ€™ll thus resample the training data and then reevaluate the model. Specifically, youโ€™ll use `RandomOverSampler`.To do so, complete the following steps:1. Use the `RandomOverSampler` module from the imbalanced-learn library to resample the data. Be sure to confirm that the labels have an equal number of data points. 2. Use the `LogisticRegression` classifier and the resampled data to fit the model and make predictions.3. Evaluate the modelโ€™s performance by doing the following: * Calculate the accuracy score of the model. * Generate a confusion matrix. * Print the classification report. 4. Answer the following question: How well does the logistic regression model, fit with oversampled data, predict both the `0` (healthy loan) and `1` (high-risk loan) labels? Write a Credit Risk Analysis ReportFor this section, youโ€™ll write a brief report that includes a summary and an analysis of the performance of both machine learning models that you used in this challenge. You should write this report as the `README.md` file included in your GitHub repository.Structure your report by using the report template that `Starter_Code.zip` includes, and make sure that it contains the following:1. An overview of the analysis: Explain the purpose of this analysis.2. The results: Using bulleted lists, describe the balanced accuracy scores and the precision and recall scores of both machine learning models.3. A summary: Summarize the results from the machine learning models. Compare the two versions of the dataset predictions. Include your recommendation for the model to use, if any, on the original vs. the resampled data. If you donโ€™t recommend either model, justify your reasoning.
# Import the modules import numpy as np import pandas as pd from pathlib import Path from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import confusion_matrix from imblearn.metrics import classification_report_imbalanced import warnings warnings.filterwarnings('ignore')
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
--- Split the Data into Training and Testing Sets Step 1: Read the `lending_data.csv` data from the `Resources` folder into a Pandas DataFrame.
# Read the CSV file from the Resources folder into a Pandas DataFrame # Using the read_csv function and Path module, create a DataFrame lending_data_df = pd.read_csv( Path('./Resources/lending_data.csv'), ).dropna() # Review the DataFrame display(lending_data_df.head()) display(lending_data_df.tail())
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 2: Create the labels set (`y`) from the โ€œloan_statusโ€ column, and then create the features (`X`) DataFrame from the remaining columns.
# Separate the data into labels and features y = lending_data_df['loan_status'] # Separate the X variable, the features X = lending_data_df[['loan_size','interest_rate','borrower_income','debt_to_income','num_of_accounts','derogatory_marks','total_debt']] # Review the y variable Series display(y[:5]) # Review the X variable DataFrame display(X.head())
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 3: Check the balance of the labels variable (`y`) by using the `value_counts` function.
# Check the balance of our target values y.value_counts()
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 4: Split the data into training and testing datasets by using `train_test_split`.
# Import the train_test_learn module from sklearn.model_selection import train_test_split # Split the data using train_test_split # Assign a random_state of 1 to the function X_train, X_test,y_train,y_test= train_test_split(X, y,random_state=1)
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
--- Create a Logistic Regression Model with the Original Data Step 1: Fit a logistic regression model by using the training data (`X_train` and `y_train`).
# Import the LogisticRegression module from SKLearn from sklearn.linear_model import LogisticRegression # Instantiate the Logistic Regression model # Assign a random_state parameter of 1 to the model logistic_regression_model = LogisticRegression(random_state=1) # Fit the model using training data logistic_regression_model.fit(X_train,y_train)
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 2: Save the predictions on the testing data labels by using the testing feature data (`X_test`) and the fitted model.
# Make a prediction using the testing data y_predict = logistic_regression_model.predict(X_test)
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 3: Evaluate the modelโ€™s performance by doing the following:* Calculate the accuracy score of the model.* Generate a confusion matrix.* Print the classification report.
# Print the balanced_accuracy score of the model balanced_accuracy = balanced_accuracy_score(y_test,y_predict) print(balanced_accuracy) # Generate a confusion matrix for the model logistic_regression_matrix =confusion_matrix(y_test, y_predict) print(logistic_regression_matrix) # Print the classification report for the model logistic_regression_report = classification_report_imbalanced(y_test, y_predict) print(logistic_regression_report)
pre rec spe f1 geo iba sup 0 1.00 0.99 0.91 1.00 0.95 0.91 18765 1 0.85 0.91 0.99 0.88 0.95 0.90 619 avg / total 0.99 0.99 0.91 0.99 0.95 0.91 19384
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 4: Answer the following question. **Question:** How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels? **Answer:** The model performed better for the 0 class of samples than it did for the 1 class of samples. The precision and the recall for the 0 class (healthy loan) is much better than that for the 1 class (high-risk loan). The precision for the 0 values is very high at 1.00. This means that out of all the times that the model predicted a testing data observation to be the value 0, 100% of those predictions were correct. This number is largely due to the fact we are workong with an imbalanced data set where 0 values represent the majority class with 18765 instances found in the data versus only 619 instances in the minority class. In contrast, out of all the times that the model predicted a value of 1, only 85% of those predictions were correct with 1 class represents the minority class. The recall for the 0 and 1 classes almost matches which means the model accuratly calculated the respecitve class values equaly. --- Predict a Logistic Regression Model with Resampled Training Data Step 1: Use the `RandomOverSampler` module from the imbalanced-learn library to resample the data. Be sure to confirm that the labels have an equal number of data points.
# Import the RandomOverSampler module form imbalanced-learn from imblearn.over_sampling import RandomOverSampler # Instantiate the random oversampler model # # Assign a random_state parameter of 1 to the model random_oversampler_model = RandomOverSampler(random_state=1) # Fit the original training data to the random_oversampler model X_resampled, y_resampled = random_oversampler_model.fit_resample(X_train,y_train) # Count the distinct values of the resampled labels data y_resampled.value_counts()
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 2: Use the `LogisticRegression` classifier and the resampled data to fit the model and make predictions.
# Instantiate the Logistic Regression model # Assign a random_state parameter of 1 to the model logistic_regression_resample_model = LogisticRegression(random_state=1) # Fit the model using the resampled training data logistic_regression_resample_model.fit(X_resampled, y_resampled) # Make a prediction using the testing data y_resampled_perdict = logistic_regression_resample_model.predict(X_test)
_____no_output_____
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Step 3: Evaluate the modelโ€™s performance by doing the following:* Calculate the accuracy score of the model.* Generate a confusion matrix.* Print the classification report.
# Print the balanced_accuracy score of the model balanced_accuracy = balanced_accuracy_score(y_test,y_resampled_perdict) print(balanced_accuracy) # Generate a confusion matrix for the model logistic_regression_resample_matrtix = confusion_matrix(y_test, y_resampled_perdict) print(logistic_regression_resample_matrtix) # Print the classification report for the model logistic_regression_resample_report = classification_report_imbalanced(y_test, y_resampled_perdict) print(logistic_regression_resample_report)
pre rec spe f1 geo iba sup 0 1.00 0.99 0.99 1.00 0.99 0.99 18765 1 0.84 0.99 0.99 0.91 0.99 0.99 619 avg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384
MIT
credit_risk_resampling.ipynb
douglasg-fintec/Credit_Risk_Resampling
Imports
import os import time import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms, models from collections import OrderedDict import PIL device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dataset_location = '/home/marcin/Datasets/camvid/'
_____no_output_____
MIT
PyTorchNN/1610_PT_FCU.ipynb
marcinbogdanski/ai-sketchpad
CamVid Dataset
def download(url, dest, md5sum): import os import urllib import hashlib folder, file = os.path.split(dest) if folder != '': os.makedirs(folder, exist_ok=True) if not os.path.isfile(dest): print('Downloading', file, '...') urllib.request.urlretrieve(url, dest) else: print('Already Exists:', file) assert hashlib.md5(open(dest, 'rb').read()).hexdigest() == md5sum download(url='https://github.com/alexgkendall/SegNet-Tutorial/archive/master.zip', dest=os.path.join(dataset_location, 'master.zip'), md5sum='9a61b9d172b649f6e5da7e8ebf75338f') def extract(src, dest): import os import zipfile path, file = os.path.split(src) extract_path, _ = os.path.splitext(src) already_extracted = os.path.isdir(dest) if not already_extracted: with zipfile.ZipFile(src, 'r') as zf: print('Extracting', file, '...') zf.extractall(dest) else: print('Already Extracted:', file) assert os.path.isdir(extract_path) extract(src=os.path.join(dataset_location, 'master.zip'), dest=os.path.join(dataset_location, 'master')) class camvidLoader(torch.utils.data.Dataset): def __init__( self, root, split="train", is_transform=False, img_size=None, augmentations=None, img_norm=True, test_mode=False, ): self.root = root self.split = split self.img_size = [360, 480] self.is_transform = is_transform self.augmentations = augmentations self.img_norm = img_norm self.test_mode = test_mode self.mean = np.array([104.00699, 116.66877, 122.67892]) self.n_classes = 12 self.files = collections.defaultdict(list) if not self.test_mode: for split in ["train", "test", "val"]: file_list = os.listdir(root + "/" + split) self.files[split] = file_list def __len__(self): return len(self.files[self.split]) def __getitem__(self, index): img_name = self.files[self.split][index] img_path = self.root + "/" + self.split + "/" + img_name lbl_path = self.root + "/" + self.split + "annot/" + img_name img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.uint8) if self.augmentations is not None: img, lbl = self.augmentations(img, lbl) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl def transform(self, img, lbl): img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode img = img[:, :, ::-1] # RGB -> BGR img = img.astype(np.float64) img -= self.mean if self.img_norm: # Resize scales images from 0 to 255, thus we need # to divide by 255.0 img = img.astype(float) / 255.0 # NHWC -> NCHW img = img.transpose(2, 0, 1) img = torch.from_numpy(img).float() lbl = torch.from_numpy(lbl).long() return img, lbl def decode_segmap(self, temp, plot=False): Sky = [128, 128, 128] Building = [128, 0, 0] Pole = [192, 192, 128] Road = [128, 64, 128] Pavement = [60, 40, 222] Tree = [128, 128, 0] SignSymbol = [192, 128, 128] Fence = [64, 64, 128] Car = [64, 0, 128] Pedestrian = [64, 64, 0] Bicyclist = [0, 128, 192] Unlabelled = [0, 0, 0] label_colours = np.array( [ Sky, Building, Pole, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled, ] ) r = temp.copy() g = temp.copy() b = temp.copy() for l in range(0, self.n_classes): r[temp == l] = label_colours[l, 0] g[temp == l] = label_colours[l, 1] b[temp == l] = label_colours[l, 2] rgb = np.zeros((temp.shape[0], temp.shape[1], 3)) rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 return rgb import scipy.misc as m import collections t_loader = camvidLoader( root=os.path.join(dataset_location, 'master/SegNet-Tutorial-master/CamVid'), split='train', is_transform=True, img_size=(360, 480)) img, lbl = t_loader[0] lbl.max() t_loader.files['train'][0] import functools class fcn32s(nn.Module): def __init__(self, n_classes=21, learned_billinear=False): super(fcn32s, self).__init__() self.learned_billinear = learned_billinear self.n_classes = n_classes self.loss = functools.partial(cross_entropy2d, size_average=False) self.conv_block1 = nn.Sequential( nn.Conv2d(3, 64, 3, padding=100), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) self.conv_block2 = nn.Sequential( nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) self.conv_block3 = nn.Sequential( nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) self.conv_block4 = nn.Sequential( nn.Conv2d(256, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) self.conv_block5 = nn.Sequential( nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True), ) self.classifier = nn.Sequential( nn.Conv2d(512, 4096, 7), nn.ReLU(inplace=True), nn.Dropout2d(), nn.Conv2d(4096, 4096, 1), nn.ReLU(inplace=True), nn.Dropout2d(), nn.Conv2d(4096, self.n_classes, 1), ) if self.learned_billinear: raise NotImplementedError def forward(self, x): conv1 = self.conv_block1(x) conv2 = self.conv_block2(conv1) conv3 = self.conv_block3(conv2) conv4 = self.conv_block4(conv3) conv5 = self.conv_block5(conv4) score = self.classifier(conv5) out = F.upsample(score, x.size()[2:]) return out def init_vgg16_params(self, vgg16, copy_fc8=True): blocks = [ self.conv_block1, self.conv_block2, self.conv_block3, self.conv_block4, self.conv_block5, ] ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]] features = list(vgg16.features.children()) for idx, conv_block in enumerate(blocks): for l1, l2 in zip(features[ranges[idx][0] : ranges[idx][1]], conv_block): if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d): assert l1.weight.size() == l2.weight.size() assert l1.bias.size() == l2.bias.size() l2.weight.data = l1.weight.data l2.bias.data = l1.bias.data for i1, i2 in zip([0, 3], [0, 3]): l1 = vgg16.classifier[i1] l2 = self.classifier[i2] l2.weight.data = l1.weight.data.view(l2.weight.size()) l2.bias.data = l1.bias.data.view(l2.bias.size()) n_class = self.classifier[6].weight.size()[0] if copy_fc8: l1 = vgg16.classifier[6] l2 = self.classifier[6] l2.weight.data = l1.weight.data[:n_class, :].view(l2.weight.size()) l2.bias.data = l1.bias.data[:n_class] def cross_entropy2d(input, target, weight=None, size_average=True): n, c, h, w = input.size() nt, ht, wt = target.size() # Handle inconsistent size between input and target if h != ht and w != wt: # upsample labels input = F.interpolate(input, size=(ht, wt), mode="bilinear", align_corners=True) input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) target = target.view(-1) loss = F.cross_entropy( input, target, weight=weight, size_average=size_average, ignore_index=250 ) return loss model = fcn32s(n_classes=12) vgg16 = models.vgg16(pretrained=True) model.init_vgg16_params(vgg16) res = model(img.expand(1, -1, -1, -1)) def plot_all(img, res, lbl): fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=[16,9]) kkk = np.array(img.numpy().transpose(1, 2, 0)*255 + t_loader.mean, dtype=int) kkk = kkk[:,:,::-1] ax1.imshow(kkk) arr = np.argmax( res.detach()[0].numpy(), axis=0) # res to numpy ax2.imshow(t_loader.decode_segmap(arr)) ax3.imshow(t_loader.decode_segmap(lbl.numpy())) plot_all(img, res, lbl) for e in range(300000)
_____no_output_____
MIT
PyTorchNN/1610_PT_FCU.ipynb
marcinbogdanski/ai-sketchpad
United States - Crime Rates - 1960 - 2014 Introduction:This time you will create a data Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries
import numpy as np import pandas as pd
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv). Step 3. Assign it to a variable called crime.
url = "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv" crime = pd.read_csv(url) crime.head()
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 4. What is the type of the columns?
crime.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 55 entries, 0 to 54 Data columns (total 12 columns): Year 55 non-null int64 Population 55 non-null int64 Total 55 non-null int64 Violent 55 non-null int64 Property 55 non-null int64 Murder 55 non-null int64 Forcible_Rape 55 non-null int64 Robbery 55 non-null int64 Aggravated_assault 55 non-null int64 Burglary 55 non-null int64 Larceny_Theft 55 non-null int64 Vehicle_Theft 55 non-null int64 dtypes: int64(12) memory usage: 5.2 KB
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now. Step 5. Convert the type of the column Year to datetime64
# pd.to_datetime(crime) crime.Year = pd.to_datetime(crime.Year, format='%Y') crime.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 55 entries, 0 to 54 Data columns (total 12 columns): Year 55 non-null datetime64[ns] Population 55 non-null int64 Total 55 non-null int64 Violent 55 non-null int64 Property 55 non-null int64 Murder 55 non-null int64 Forcible_Rape 55 non-null int64 Robbery 55 non-null int64 Aggravated_assault 55 non-null int64 Burglary 55 non-null int64 Larceny_Theft 55 non-null int64 Vehicle_Theft 55 non-null int64 dtypes: datetime64[ns](1), int64(11) memory usage: 5.2 KB
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 6. Set the Year column as the index of the dataframe
crime = crime.set_index('Year', drop = True) crime.head()
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 7. Delete the Total column
del crime['Total'] crime.head()
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 8. Group the year by decades and sum the values Pay attention to the Population column number, summing this column is a mistake
# To learn more about .resample (https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html) # To learn more about Offset Aliases (http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases) # Uses resample to sum each decade crimes = crime.resample('10AS').sum() # Uses resample to get the max value only for the "Population" column population = crime['Population'].resample('10AS').max() # Updating the "Population" column crimes['Population'] = population crimes
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Step 9. What is the mos dangerous decade to live in the US?
# apparently the 90s was a pretty dangerous time in the US crime.idxmax(0)
_____no_output_____
BSD-3-Clause
04_Apply/US_Crime_Rates/Exercises_with_solutions.ipynb
chisus089/3_pandas_exercises
Exercise1.1Plot $f(x) = 1 - e ^ (2 * x)$ over $[-1, 1]$ with intervals $.01$
""" Exercise1.1 Plot f(x) = 1 - e ^ (2 * x) over [-1, 1] with intervals .01 """ x_range = arange(-1, 1, .01) y_range = array([1 - exp(2 * x) for x in x_range]) plot(x_range, y_range, 'k-', label = "Exercise1.1") ylabel("y") xlabel("x") legend(loc='upper right')
_____no_output_____
MIT
Exercise01.ipynb
lnsongxf/Applied_Computational_Economics_and_Finance
Exercise1.2Solve matrix multiplication of$$AB = \left[\begin{array}{ccc} 0 &-1& 2\\-2& -1& 4\\2& 7& -2\end{array}\right]\left[\begin{array}{ccc} -7& 1& 1\\ 7& -3& -2\\3& 5& 0\end{array}\right]$$ $$y = [3, -1, 2] $$Solve $C = A*B$, $$Cx = y$$.
""" Exercise1.2 Solve matrix multiplication """ #from numpy import array, linalg A = array([[0, -1, 2], [-2, -1, 4], [2, 7, -2]]) B = array([[-7, 1, 1], [7, -3, -2], [3, 5, 0]]) y = array([3, -1, 2]) # part_a(): """ Solve Cx = y using standard matrix multiplication for A and B """ C = A.dot(B) x = linalg.solve(C, y) print("The standard matrix product C: " ,C) print("\nSolution from Matrix multiplication: ", x) #part_b(): """ Solve Cx = y using element-wise multiplication (Hadamard product) """ C = A * B x = linalg.solve(C, y) print("\nThe element-by-element matrix product C:") print(C) print("\nSolution from Element-wise multiplication:") print(x)
The element-by-element matrix product C: [[ 0 -1 2] [-14 3 -8] [ 6 35 0]] Solution from Element-wise multiplication: [-0.79958678 0.19421488 1.59710744]
MIT
Exercise01.ipynb
lnsongxf/Applied_Computational_Economics_and_Finance
Exercise1.3calculate the time series$$yt = 5 + .05 * t + Et$$ (Where E is epsilon)for years $1960, 1961, ..., 2016$ assuming $Et$ independently and identically distributed with mean $0$ and sigma $0.2$.
# Setting a random seed for reproducibility rnd = np.random.RandomState(seed=123) # https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.html """ Exercise1.3 calculate the time series """ #from numpy import random, array, polyfit, poly1d mu = -0.2 sigma = 0.2 """ Create the time series, yt, then perform a regress on yt, plot yt and the its trendline """ start_year = 1960 end_year = 2016 t_array = array(range(start_year, end_year + 1)) # Generating a random array epsilon_t = array(rnd.normal(mu, sigma,len(t_array))) #https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)]) fit = polyfit(t_array, yt, 1) #https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html """ Least squares polynomial fit. Fit a polynomial p(x) = p[0] * x**deg + ... + p[deg] of degree deg to points (x, y). Returns a vector of coefficients p that minimises the squared error. """ fit_func = poly1d(fit) """ https://docs.scipy.org/doc/numpy/reference/generated/numpy.poly1d.html A one-dimensional polynomial class. A convenience class, used to encapsulate โ€œnaturalโ€ operations on polynomials so that said operations may take on their customary form in code . """ # two plots together plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
_____no_output_____
MIT
Exercise01.ipynb
lnsongxf/Applied_Computational_Economics_and_Finance
Exercise1.4Consider the original example with the farmer where acreage planted will be$$a = 0.5 + 0.5 * Ep$$ (Ep is expected price)Quantity q is equivalent to$$q = a * y$$ (y is yield)Clearing price p is$$p = 3 - 2 * q$$Assume in our case that yield will be a random two point distribution s.t.```y = array([0.7, 1.3])```Our goal is to compute the variance of this price distribution, otherwise knownas $sigma^2$ for part a.
#from math import exp, fabs #from numpy import array, var #part_a(): """ Compute the variance in price """ a = 1 y, w = array([0.7, 1.3]), array([0.5, 0.5]) for _ in range(100): a_previous = a p = 3 - 2 * a * y f = w.dot(p) a = 0.5 + 0.5 * f if fabs(a_previous - a) < exp(-8): break print "acreage", a, "variance:", var(p), "expectation", p.dot(w)
_____no_output_____
MIT
Exercise01.ipynb
lnsongxf/Applied_Computational_Economics_and_Finance
Model
xb, yb = dls_feat.one_batch(); xb.shape from torch.nn import TransformerEncoder, TransformerEncoderLayer class SeqHead(nn.Module): def __init__(self): super().__init__() # d_model = 2048+6+1 d_model = 1024 n_head = 4 self.flat = nn.Sequential(AdaptiveConcatPool2d(), Flatten()) self.hook = ReshapeBodyHook(self.flat) # self.linear = nn.Linear(d_model+7, d_model) encoder_layers = TransformerEncoderLayer(d_model, n_head, d_model*2) self.transformer = TransformerEncoder(encoder_layers, 4) self.head = nn.Sequential(nn.Linear(d_model,6)) def forward(self, x): x = self.flat(x) # x = torch.cat(x, axis=-1) # x = self.linear(x) feat = self.transformer(x.transpose(0,1)) return self.head(feat.transpose(0,1)) m = SeqHead() name = 'train3d_baseline_feat_transformer' learn = get_learner(dls_feat, m, name=name) learn.add_cb(DePadLoss()) xb.shape # with torch.no_grad(): # learn.model(xb).shape # learn.summary()
_____no_output_____
Apache-2.0
03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb
bearpelican/rsna_retro
Training
learn.lr_find() do_fit(learn, 10, 1e-4) learn.save(f'runs/{name}-1')
_____no_output_____
Apache-2.0
03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb
bearpelican/rsna_retro
Testing
sub_fn = f'subm/{name}' learn.load(f'runs/{name}-1') learn.validate() learn.dls = get_3d_dls_feat(Meta.df_tst, path=path_feat_tst_384avg, bs=32, test=True) preds,targs = learn.get_preds() preds.shape, preds.min(), preds.max() pred_csv = submission(Meta.df_tst, preds, fn=sub_fn) api.competition_submit(f'{sub_fn}.csv', name, 'rsna-intracranial-hemorrhage-detection') api.competitions_submissions_list('rsna-intracranial-hemorrhage-detection')[0]
_____no_output_____
Apache-2.0
03_train3d_experiments/03_train3d_02d_train_transformer_head.ipynb
bearpelican/rsna_retro
1. Create Train Script
%%file train #!/usr/bin/env python from sklearn.neighbors import KNeighborsClassifier import pandas as pd import numpy as np import pickle import os np.random.seed(123) # Define paths for Model Training inside Container. INPUT_PATH = '/opt/ml/input/data' OUTPUT_PATH = '/opt/ml/output' MODEL_PATH = '/opt/ml/model' PARAM_PATH = '/opt/ml/input/config/hyperparameters.json' # Training data sitting in S3 will be copied to this location during training when used with File MODE. TRAIN_DATA_PATH = f'{INPUT_PATH}/train' TEST_DATA_PATH = f'{INPUT_PATH}/test' def train(): print("------- [STARTING TRAINING] -------") train_df = pd.read_csv(os.path.join(TRAIN_DATA_PATH, 'train.csv'), names=['class', 'mass', 'width', 'height', 'color_score']) train_df.head() X_train = train_df[['mass', 'width', 'height', 'color_score']] y_train = train_df['class'] knn = KNeighborsClassifier() knn.fit(X_train, y_train) # Save the trained Model inside the Container with open(os.path.join(MODEL_PATH, 'model.pkl'), 'wb') as out: pickle.dump(knn, out) print("------- [TRAINING COMPLETE!] -------") print("------- [STARTING EVALUATION] -------") test_df = pd.read_csv(os.path.join(TEST_DATA_PATH, 'test.csv'), names=['class', 'mass', 'width', 'height', 'color_score']) X_test = train_df[['mass', 'width', 'height', 'color_score']] y_test = train_df['class'] acc = knn.score(X_test, y_test) print('Accuracy = {:.2f}%'.format(acc * 100)) print("------- [EVALUATION DONE!] -------") if __name__ == '__main__': train()
Overwriting train
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
2. Create Serve Script
%%file serve #!/usr/bin/env python from flask import Flask, Response, request from io import StringIO import pandas as pd import logging import pickle import os app = Flask(__name__) MODEL_PATH = '/opt/ml/model' # Singleton Class for holding the Model class Predictor: model = None @classmethod def load_model(cls): print('[LOADING MODEL]') if cls.model is None: with open(os.path.join(MODEL_PATH, 'model.pkl'), 'rb') as file_: cls.model = pickle.load(file_) print('MODEL LOADED!') return cls.model @classmethod def predict(cls, X): clf = cls.load_model() return clf.predict(X) @app.route('/ping', methods=['GET']) def ping(): print('[HEALTH CHECK]') model = Predictor.load_model() status = 200 if model is None: status = 404 return Response(response={"HEALTH CHECK": "OK"}, status=status, mimetype='application/json') @app.route('/invocations', methods=['POST']) def invoke(): data = None # Transform Payload in CSV to Pandas DataFrame. if request.content_type == 'text/csv': data = request.data.decode('utf-8') data = StringIO(data) data = pd.read_csv(data, header=None) else: return flask.Response(response='This Predictor only supports CSV data', status=415, mimetype='text/plain') logging.info('Invoked with {} records'.format(data.shape[0])) predictions = Predictor.predict(data) # Convert from numpy back to CSV out = StringIO() pd.DataFrame({'results': predictions}).to_csv(out, header=False, index=False) result = out.getvalue() return Response(response=result, status=200, mimetype='text/csv') if __name__ == '__main__': app.run(host='0.0.0.0', port=8080)
Overwriting serve
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
3. Build a Docker Image and Push to ECR
%%sh # Assign a name for your Docker image. image_name=byoc-sklearn echo "Image Name: ${image_name}" # Retrieve AWS Account. account=$(aws sts get-caller-identity --query Account --output text) # Get the region defined in the current configuration (default to us-east-1 if none defined). region=$(aws configure get region) region=${region:-us-east-1} echo "Account: ${account}" echo "Region: ${region}" repository="${account}.dkr.ecr.${region}.amazonaws.com" echo "Repository: ${repository}" image="${account}.dkr.ecr.${region}.amazonaws.com/${image_name}:latest" echo "Image URI: ${image}" # If the repository does not exist in ECR, create it. aws ecr describe-repositories --repository-names ${image_name} > /dev/null 2>&1 if [ $? -ne 0 ] then aws ecr create-repository --repository-name ${image_name} > /dev/null fi # Get the login command from ECR and execute it directly. aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin ${repository} # Build the docker image locally with the image name and tag it. docker build -t ${image_name} . docker tag ${image_name} ${image} # Finally, push image to ECR with the full image name. docker push ${image}
Image Name: byoc-sklearn Account: 892313895307 Region: us-east-1 Repository: 892313895307.dkr.ecr.us-east-1.amazonaws.com Image URI: 892313895307.dkr.ecr.us-east-1.amazonaws.com/byoc-sklearn:latest Login Succeeded Sending build context to Docker daemon 80.38kB Step 1/8 : FROM python:3.7 ---> 5b86e11778a2 Step 2/8 : COPY requirements.txt ./ ---> Using cache ---> 8623cb69764a Step 3/8 : RUN pip install --no-cache-dir -r requirements.txt ---> Using cache ---> 00be6a106a8c Step 4/8 : COPY train /usr/local/bin ---> Using cache ---> f55d18c34b89 Step 5/8 : RUN chmod +x /usr/local/bin/train ---> Using cache ---> aae62ce0c43b Step 6/8 : COPY serve /usr/local/bin ---> Using cache ---> d9408249ae77 Step 7/8 : RUN chmod +x /usr/local/bin/serve ---> Using cache ---> 04fc001c0b7c Step 8/8 : EXPOSE 8080 ---> Using cache ---> 6990c97b2383 Successfully built 6990c97b2383 Successfully tagged byoc-sklearn:latest The push refers to repository [892313895307.dkr.ecr.us-east-1.amazonaws.com/byoc-sklearn] 032f1a03bf08: Preparing 053f064686a0: Preparing 59239f9a3c52: Preparing 34bf625dab71: Preparing 415a4c435e2d: Preparing a9066f74cbd8: Preparing 1b17be258ee0: Preparing 6522a2852221: Preparing 56a69ef72608: Preparing 6f7043721c9b: Preparing a933681cf349: Preparing f49d20b92dc8: Preparing fe342cfe5c83: Preparing 630e4f1da707: Preparing 9780f6d83e45: Preparing 56a69ef72608: Waiting f49d20b92dc8: Waiting 6f7043721c9b: Waiting fe342cfe5c83: Waiting 630e4f1da707: Waiting 9780f6d83e45: Waiting a933681cf349: Waiting 1b17be258ee0: Waiting a9066f74cbd8: Waiting 6522a2852221: Waiting 34bf625dab71: Layer already exists 032f1a03bf08: Layer already exists 053f064686a0: Layer already exists 415a4c435e2d: Layer already exists 59239f9a3c52: Layer already exists a9066f74cbd8: Layer already exists 1b17be258ee0: Layer already exists 6f7043721c9b: Layer already exists 56a69ef72608: Layer already exists 6522a2852221: Layer already exists f49d20b92dc8: Layer already exists a933681cf349: Layer already exists fe342cfe5c83: Layer already exists 9780f6d83e45: Layer already exists 630e4f1da707: Layer already exists latest: digest: sha256:a2ebe6e788d472b87131c8ee6e7ef75d3e2cb27b01d9f1d41d6d4e88274d0e5e size: 3467
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Imports
from sagemaker.predictor import csv_serializer import pandas as pd import sagemaker
_____no_output_____
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Essentials
role = sagemaker.get_execution_role() session = sagemaker.Session() account = session.boto_session.client('sts').get_caller_identity()['Account'] region = session.boto_session.region_name image_name = 'byoc-sklearn' image_uri = f'{account}.dkr.ecr.{region}.amazonaws.com/{image_name}:latest'
_____no_output_____
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Train (Local Mode)
model = sagemaker.estimator.Estimator( image_name=image_uri, role=role, train_instance_count=1, train_instance_type='local', sagemaker_session=None ) model.fit({'train': 'file://.././DATA/train/train.csv', 'test': 'file://.././DATA/test/test.csv'})
Creating tmp815252o0_algo-1-n7amc_1 ... Attaching to tmp815252o0_algo-1-n7amc_12mdone algo-1-n7amc_1 | ------- [STARTING TRAINING] ------- algo-1-n7amc_1 | ------- [TRAINING COMPLETE!] ------- algo-1-n7amc_1 | ------- [STARTING EVALUATION] ------- algo-1-n7amc_1 | Accuracy = 97.73% algo-1-n7amc_1 | ------- [EVALUATION DONE!] ------- tmp815252o0_algo-1-n7amc_1 exited with code 0 Aborting on container exit... ===== Job Complete =====
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Deploy (Locally)
predictor = model.deploy(1, 'local', endpoint_name='byoc-sklearn', serializer=csv_serializer)
Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Evaluate Real Time Inference (Locally)
df = pd.read_csv('.././DATA/test/test.csv', header=None) test_df = df.sample(1) test_df test_df.drop(test_df.columns[[0]], axis=1, inplace=True) test_df test_df.values prediction = predictor.predict(test_df.values).decode('utf-8').strip() prediction
_____no_output_____
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Train (using SageMaker)
WORK_DIRECTORY = '.././DATA' train_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/train', key_prefix='byoc-sklearn/train') test_data_s3_pointer = session.upload_data(f'{WORK_DIRECTORY}/test', key_prefix='byoc-sklearn/test') train_data_s3_pointer test_data_s3_pointer model = sagemaker.estimator.Estimator( image_name=image_uri, role=role, train_instance_count=1, train_instance_type='ml.m5.xlarge', sagemaker_session=session # ensure the session is set to session ) model.fit({'train': train_data_s3_pointer, 'test': test_data_s3_pointer})
's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2. 's3_input' class will be renamed to 'TrainingInput' in SageMaker Python SDK v2.
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Deploy Trained Model as SageMaker Endpoint
predictor = model.deploy(1, 'ml.m5.xlarge', endpoint_name='byoc-sklearn', serializer=csv_serializer)
Parameter image will be renamed to image_uri in SageMaker Python SDK v2.
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Real Time Inference using Deployed Endpoint
df = pd.read_csv('.././DATA/test/test.csv', header=None) test_df = df.sample(1) test_df.drop(test_df.columns[[0]], axis=1, inplace=True) test_df test_df.values prediction = predictor.predict(test_df.values).decode('utf-8').strip() prediction
_____no_output_____
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Batch Transform (Batch Inference) using Trained SageMaker Model
bucket_name = session.default_bucket() output_path = f's3://{bucket_name}/byoc-sklearn/batch_test_out' transformer = model.transformer(instance_count=1, instance_type='ml.m5.xlarge', output_path=output_path, assemble_with='Line', accept='text/csv') WORK_DIRECTORY = '.././DATA' batch_input = session.upload_data(f'{WORK_DIRECTORY}/batch_test', key_prefix='byoc-sklearn/batch_test') transformer.transform(batch_input, content_type='text/csv', split_type='Line', input_filter='$') transformer.wait()
.Gracefully stopping... (press Ctrl+C again to force) .........................  * Serving Flask app "serve" (lazy loading) * Environment: production WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Debug mode: off * Running on http://0.0.0.0:8080/ (Press CTRL+C to quit) 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mGET /ping HTTP/1.1#033[0m" 200 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[33mGET /execution-parameters HTTP/1.1#033[0m" 404 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - INFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - INFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -  * Serving Flask app "serve" (lazy loading) * Environment: production WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Debug mode: off * Running on http://0.0.0.0:8080/ (Press CTRL+C to quit) 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mGET /ping HTTP/1.1#033[0m" 200 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[33mGET /execution-parameters HTTP/1.1#033[0m" 404 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - INFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - 169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 - INFO:werkzeug:169.254.255.130 - - [04/Nov/2020 18:01:34] "#033[37mPOST /invocations HTTP/1.1#033[0m" 200 -
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
Inspect Batch Transformed Output
s3_client = session.boto_session.client('s3') s3_client.download_file(bucket_name, 'byoc-sklearn/batch_test_out/batch_test.csv.out', '.././DATA/batch_test/batch_test.csv.out') with open('.././DATA/batch_test/batch_test.csv.out', 'r') as f: results = f.readlines() print("Transform results: \n{}".format(''.join(results)))
Transform results: 1 3 0 1 1 3 1 3 0 0 0 3 0 0 2
Apache-2.0
SageMaker/Training-Inference/6. BYOC Sklearn/BYOC Sklearn.ipynb
arunprsh/AI-ML-Examples
11์žฅ ์ž์—ฐ์–ด์ฒ˜๋ฆฌ 1๋ถ€ **๊ฐ์‚ฌ๋ง**: ํ”„๋ž‘์†Œ์™€ ์ˆ„๋ ˆ์˜ [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff) 10์žฅ์— ์‚ฌ์šฉ๋œ ์ฝ”๋“œ์— ๋Œ€ํ•œ ์„ค๋ช…์„ ๋‹ด๊ณ  ์žˆ์œผ๋ฉฐ ํ…์„œํ”Œ๋กœ์šฐ 2.6 ๋ฒ„์ „์—์„œ ์ž‘์„ฑ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ์†Œ์Šค์ฝ”๋“œ๋ฅผ ๊ณต๊ฐœํ•œ ์ €์ž์—๊ฒŒ ๊ฐ์‚ฌ๋“œ๋ฆฝ๋‹ˆ๋‹ค.**tensorflow ๋ฒ„์ „๊ณผ GPU ํ™•์ธ**- ๊ตฌ๊ธ€ ์ฝ”๋žฉ ์„ค์ •: '๋Ÿฐํƒ€์ž„ -> ๋Ÿฐํƒ€์ž„ ์œ ํ˜• ๋ณ€๊ฒฝ' ๋ฉ”๋‰ด์—์„œ GPU ์ง€์ • ํ›„ ์•„๋ž˜ ๋ช…๋ น์–ด ์‹คํ–‰ ๊ฒฐ๊ณผ ํ™•์ธ ``` !nvidia-smi ```- ์‚ฌ์šฉ๋˜๋Š” tensorflow ๋ฒ„์ „ ํ™•์ธ ```python import tensorflow as tf tf.__version__ ```- tensorflow๊ฐ€ GPU๋ฅผ ์‚ฌ์šฉํ•˜๋Š”์ง€ ์—ฌ๋ถ€ ํ™•์ธ ```python tf.config.list_physical_devices('GPU') ``` ์ฃผ์š”๋‚ด์šฉ - ์ž์—ฐ์–ด์ฒ˜๋ฆฌ(Natural Language Processing) ์†Œ๊ฐœ - ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ(bag-of-words) ๋ชจ๋ธ - ์ˆœ์ฐจ(sequence) ๋ชจ๋ธ- ์ˆœ์ฐจ ๋ชจ๋ธ ํ™œ์šฉ - ์–‘๋ฐฉํ–ฅ ์ˆœํ™˜์‹ ๊ฒฝ๋ง(bidirectional LSTM) ์ ์šฉ- ํŠธ๋žœ์Šคํฌ๋จธ(Transformer) ํ™œ์šฉ- ์‹œํ€€์Šค-ํˆฌ-์‹œํ€€์Šค(seq2seq) ๋ชจ๋ธ ํ™œ์šฉ 11.1 ์ž์—ฐ์–ด์ฒ˜๋ฆฌ ์†Œ๊ฐœ ํŒŒ์ด์ฌ, ์ž๋ฐ”, C, C++, C, ์ž๋ฐ”์Šคํฌ๋ฆฝํŠธ ๋“ฑ ์ปดํ“จํ„ฐ ํ”„๋กœ๊ทธ๋ž˜๋ฐ์–ธ์–ด์™€ ๊ตฌ๋ถ„ํ•˜๊ธฐ ์œ„ํ•ด ์ผ์ƒ์—์„œ ์‚ฌ์šฉ๋˜๋Š” ํ•œ๊ตญ์–ด, ์˜์–ด ๋“ฑ์„ __์ž์—ฐ์–ด__(natural language)๋ผ ๋ถ€๋ฅธ๋‹ค. ์ž์—ฐ์–ด์˜ ํŠน์„ฑ์ƒ ์ •ํ™•ํ•œ ๋ถ„์„์„ ์œ„ํ•œ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ๊ตฌํ˜„ํ•˜๋Š” ์ผ์€ ์‚ฌ์‹ค์ƒ ๋งค์šฐ ์–ด๋ ต๋‹ค. ๋”ฅ๋Ÿฌ๋‹ ๊ธฐ๋ฒ•์ด ํ™œ์šฉ๋˜๊ธฐ ์ด์ „๊นข์ง€ ์ ์ ˆํ•œ ๊ทœ์น™์„ ๊ตฌ์„ฑํ•˜์—ฌ ์ž์—ฐ์–ด๋ฅผ ์ดํ•ดํ•˜๋ ค๋Š” ์ˆ˜ ๋งŽ์€ ์‹œ๋„๊ฐ€ ์žˆ์–ด์™”์ง€๋งŒ ๋ณ„๋กœ ์„ฑ๊ณต์ ์ด์ง€ ์•Š์•˜๋‹ค.1990๋…„๋Œ€๋ถ€ํ„ฐ ์ธํ„ฐ๋„ท์œผ๋กœ๋ถ€ํ„ฐ ๊ตฌํ•ด์ง„ ์—„์ฒญ๋‚œ ์–‘์˜ ํ…์ŠคํŠธ ๋ฐ์ดํ„ฐ์— ๋จธ์‹ ๋Ÿฌ๋‹ ๊ธฐ๋ฒ•์„์ ์šฉํ•˜๊ธฐ ์‹œ์ž‘ํ–ˆ๋‹ค. ๋‹จ, ์ฃผ์š” ๋ชฉ์ ์ด **์–ธ์–ด์˜ ์ดํ•ด**๊ฐ€ ์•„๋‹ˆ๋ผ ์•„๋ž˜ ์˜ˆ์ œ๋“ค์ฒ˜๋Ÿผ ์ž…๋ ฅ ํ…์ŠคํŠธ๋ฅผ ๋ถ„์„ํ•˜์—ฌ**ํ†ต๊ณ„์ ์œผ๋กœ ์œ ์šฉํ•œ ์ •๋ณด๋ฅผ ์˜ˆ์ธก**ํ•˜๋Š” ๋ฐฉํ–ฅ์œผ๋กœ ์ˆ˜์ •๋˜์—ˆ๋‹ค.- ํ…์ŠคํŠธ ๋ถ„๋ฅ˜: "์ด ๋ฌธ์žฅ์˜ ์ฃผ์ œ๋Š”?"- ๋‚ด์šฉ ํ•„ํ„ฐ๋ง: "์š•์„ค์ด ํฌํ•จ๋˜์—ˆ๋‚˜?"- ๊ฐ์„ฑ ๋ถ„์„: "๋‚ด์šฉ์ด ๊ธ์ •์ด์•ผ ๋ถ€์ •์ด์•ผ?"- ์–ธ์–ด ๋ชจ๋ธ๋ง: "์ด ๋ฌธ์žฅ์— ์ด์–ด ์–ด๋–ค ๋‹จ์–ด๊ฐ€ ์žˆ์–ด์•ผ ํ•˜์ง€?"- ๋ฒˆ์—ญ: "์ด๊ฑฐ๋ฅผ ํ•œ๊ตญ์–ด๋กœ ์–ด๋–ป๊ฒŒ ๋งํ•ด?"- ์š”์•ฝ: "์ด ๊ธฐ์‚ฌ๋ฅผ ํ•œ ์ค„๋กœ ์š”์•ฝํ•˜๋ฉด?"์ด์™€ ๊ฐ™์€ ๋ถ„์„์„ **์ž์—ฐ์–ด์ฒ˜๋ฆฌ**(NLP, Natural Language Processing)์ด๋ผ ํ•˜๋ฉฐ๋‹จ์–ด(words), ๋ฌธ์žฅ(sentences), ๋ฌธ๋‹จ(paragraphs) ๋“ฑ์—์„œ ์ฐพ์„ ์ˆ˜ ์žˆ๋Š”ํŒจํ„ด(pattern)์„ ์ธ์‹ํ•˜๋ ค ์‹œ๋„ํ•œ๋‹ค. **๋จธ์‹ ๋Ÿฌ๋‹ ํ™œ์šฉ** ์ž์—ฐ์–ด์ฒ˜๋ฆฌ๋ฅผ ์œ„ํ•ด 1990๋…„๋Œ€๋ถ€ํ„ฐ ์‹œ์ž‘๋œ ๋จธ์‹ ๋Ÿฌ๋‹ ํ™œ์šฉ์˜ ๋ณ€ํ™”๊ณผ์ •์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. - 1990 - 2010๋…„๋Œ€ ์ดˆ๋ฐ˜: ๊ฒฐ์ •ํŠธ๋ฆฌ(decision trees), ๋กœ์ง€์Šคํ‹ฑ ํšŒ๊ท€(logistic regression) ๋ชจ๋ธ์ด ์ฃผ๋กœ ํ™œ์šฉ๋จ.- 2014-2015: LSTM ๋“ฑ ์‹œํ€€์Šค ์ฒ˜๋ฆฌ ์•Œ๊ณ ๋ฆฌ์ฆ˜ ํ™œ์šฉ ์‹œ์ž‘- 2015-2017: (์–‘๋ฐฉํ–ฅ) ์ˆœํ™˜์‹ ๊ฒฝ๋ง์ด ๊ธฐ๋ณธ์ ์œผ๋กœ ํ™œ์šฉ๋จ.- 2017-2018: ํŠธ๋žœ์Šคํฌ๋จธ(Transformer) ๋ชจ๋ธ์ด ์ตœ๊ณ ์˜ ์„ฑ๋Šฅ ๋ฐœํœ˜ํ•˜๋ฉฐ, ๋งŽ์€ ๋‚œ์ œ๋“ค์„ ํ•ด๊ฒฐํ•จ. ํ˜„์žฌ ๊ฐ€์žฅ ๋งŽ์ด ํ™œ์šฉ๋˜๋Š” ๋ชจ๋ธ์ž„. 11.2 ํ…์ŠคํŠธ ๋ฒกํ„ฐํ™” ๋”ฅ๋Ÿฌ๋‹ ๋ชจ๋ธ์€ ํ…์ŠคํŠธ ์ž์ฒด๋ฅผ ์ฒ˜๋ฆฌํ•  ์ˆ˜ ์—†๋‹ค.๋”ฐ๋ผ์„œ ํƒ์ŠคํŠธ๋ฅผ ์ˆ˜์น˜ํ˜• ํ…์„œ(numeric tensors)๋กœ ๋ณ€ํ™˜ํ•˜๋Š” **ํ…์ŠคํŠธ ๋ฒกํ„ฐํ™”**(text vectorization) ๊ณผ์ •์ด ์š”๊ตฌ๋˜๋ฉฐ๋ณดํ†ต ๋‹ค์Œ ์„ธ ๋‹จ๊ณ„๋ฅผ ๋”ฐ๋ฅธ๋‹ค.1. **ํ…์ŠคํŠธ ํ‘œ์ค€ํ™”**(text standardization): ์†Œ๋ฌธ์žํ™”, ๋งˆ์นจํ‘œ ์ œ๊ฑฐ ๋“ฑ๋“ฑ1. **ํ† ํฐํ™”**(tokenization): ๊ธฐ๋ณธ ๋‹จ์œ„์˜ **์œ ๋‹›**(units)์œผ๋กœ ์ชผ๊ฐœ๊ธฐ - ํ† ํฐ ์˜ˆ์ œ: ๋ฌธ์ž, ๋‹จ์–ด, ๋‹จ์–ธ๋“ค์˜ ์ง‘ํ•ฉ ๋“ฑ๋“ฑ1. **์–ดํœ˜ ์ƒ‰์ธํ™”**(vocabulary indexing): ํ† ํฐ ๊ฐ๊ฐ์„ ํ•˜๋‚˜์˜ ์ˆ˜์น˜ํ˜• ๋ฒกํ„ฐ(numerical vector)๋กœ ๋ณ€ํ™˜.์•„๋ž˜ ๊ทธ๋ฆผ์€ ํ…์ŠคํŠธ ๋ฒกํ„ฐํ™”์˜ ๊ธฐ๋ณธ์ ์ธ ๊ณผ์ •์„ ์ž˜ ๋ณด์—ฌ์ค€๋‹ค. ๊ทธ๋ฆผ ์ถœ์ฒ˜: [Deep Learning with Python(Manning MEAP)](https://www.manning.com/books/deep-learning-with-python-second-edition) **ํ…์ŠคํŠธ ํ‘œ์ค€ํ™”** ๋‹ค์Œ ๋‘ ๋ฌธ์žฅ์„ ํ‘œ์ค€ํ™”๋ฅผ ํ†ตํ•ด ๋™์ผํ•œ ๋ฌธ์žฅ์œผ๋กœ ๋ณ€ํ™˜ํ•ด๋ณด์ž.- "sunset came. i was staring at the Mexico sky. Isnt nature splendid??"- "Sunset came; I stared at the M&eacute;xico sky. Isn't nature splendid?" ์˜ˆ๋ฅผ ๋“ค์–ด ๋‹ค์Œ ํ‘œ์ค€ํ™” ๊ธฐ๋ฒ•์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค.- ๋ชจ๋‘ ์†Œ๋ฌธ์žํ™”- `.`, `;`, `?`, `'` ๋“ฑ ํŠน์ˆ˜ ๊ธฐํ˜ธ ์ œ๊ฑฐ- ํŠน์ˆ˜ ์•ŒํŒŒ๋ฒณ ๋ณ€ํ™˜: "&eacute;"๋ฅผ "e"๋กœ, "&aelig;"๋ฅผ "ae"๋กœ ๋“ฑ๋“ฑ- ๋™์‚ฌ/๋ช…์‚ฌ์˜ ๊ธฐ๋ณธํ˜• ํ™œ์šฉ: "cats"๋ฅผ "[cat]"๋กœ, "was staring"๊ณผ "stared"๋ฅผ "[stare]"๋กœ ๋“ฑ๋“ฑ. ๊ทธ๋Ÿฌ๋ฉด ์œ„ ๋‘ ๋ฌธ์žฅ ๋ชจ๋‘ ์•„๋ž˜ ๋ฌธ์žฅ์œผ๋กœ ๋ณ€ํ™˜๋œ๋‹ค. - "sunset came i [stare] at the mexico sky isnt nature splendid" ํ‘œ์ค€ํ™” ๊ณผ์ •์„ ํ†ตํ•ด ์–ด๋А ์ •๋„์˜ ์ •๋ณด๋ฅผ ์ƒ์‹คํ•˜๊ฒŒ ๋˜์ง€๋งŒํ•™์Šตํ•ด์•ผํ•  ๋‚ด์šฉ์„ ์ค„์—ฌ ์ผ๋ฐ˜ํ™” ์„ฑ๋Šฅ์ด ๋ณด๋‹ค ์ข‹์€ ๋ชจ๋ธ์„ ํ›ˆ๋ จ์‹œํ‚ค๋Š” ์žฅ์ ์ด ์žˆ๋‹ค.ํ•˜์ง€๋งŒ ๋ถ„์„ ๋ชฉ์ ์— ๋”ฐ๋ผ ํ‘œ์ค€ํ™” ๊ธฐ๋ฒ•์€ ๊ฒฝ์šฐ์— ๋”ฐ๋ผ ๋‹ฌ๋ผ์งˆ ์ˆ˜ ์žˆ์Œ์— ์ฃผ์˜ํ•ด์•ผ ํ•œ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ์ธํ„ฐ๋ทฐ ๊ธฐ์‚ฌ์˜ ๊ฒฝ์šฐ ๋ฌผ์Œํ‘œ(`?`)๋Š” ์ œ๊ฑฐํ•˜๋ฉด ์•ˆ๋œ๋‹ค. **ํ† ํฐํ™”** ํ…์ŠคํŠธ ํ‘œ์ค€ํ™” ์ดํ›„ ๋ฐ์ดํ„ฐ ๋ถ„์„์˜ ๊ธฐ๋ณธ ๋‹จ์œ„์ธ ํ† ํฐ์œผ๋กœ ์ชผ๊ฐœ์•ผ ํ•œ๋‹ค.๋ณดํ†ต ์•„๋ž˜ ์„ธ ๊ฐ€์ง€ ๋ฐฉ์‹ ์ค‘์— ํ•˜๋‚˜๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค.- ๋‹จ์–ด ๊ธฐ์ค€ ํ† ํฐํ™”(word-level tokenization) - ๊ณต๋ฐฑ์œผ๋กœ ๊ตฌ๋ถ„๋œ ๋‹จ์–ด๋“ค๋กœ ์ชผ๊ฐœ๊ธฐ. - ๊ฒฝ์šฐ์— ๋”ฐ๋ผ ๋™์‚ฌ ์–ด๊ทผ๊ณผ ์–ด๋ฏธ๋ฅผ ๊ตฌ๋ถ„ํ•˜๊ธฐ๋„ ํ•จ: "star+ing", "call+ed" ๋“ฑ๋“ฑ- N-๊ทธ๋žจ ํ† ํฐํ™”(N-gram tokenization) - N-๊ทธ๋žจ ํ† ํฐ: ์—ฐ์†์œผ๋กœ ์œ„์น˜ํ•œ N ๊ฐœ(์ดํ•˜)์˜ ๋‹จ์–ด ๋ฌถ์Œ - ์˜ˆ์ œ: "the cat", "he was" ๋“ฑ์€ 2-๊ทธ๋žจ ํ† ํฐ์ด๋‹ค.- ๋ฌธ์ž ๊ธฐ์ค€ ํ† ํฐํ™”(character-level tokenization) - ํ•˜๋‚˜์˜ ๋ฌธ์ž๊ฐ€ ํ•˜๋‚˜์˜ ํ† ํฐ์ž„. - ๋ฌธ์žฅ ์ƒ์„ฑ, ์Œ์„ฑ ์ธ์‹ ๋“ฑ์—์„œ ํ™œ์šฉ๋จ. ์ผ๋ฐ˜์ ์œผ๋กœ ๋ฌธ์ž ๊ธฐ์ค€ ํ† ํฐํ™”๋Š” ์ž˜ ์‚ฌ์šฉ๋˜์ง€ ์•Š๋Š”๋‹ค. ์—ฌ๊ธฐ์„œ๋„ ๋‹จ์–ด ๊ธฐ์ค€ ๋˜๋Š” N-๊ทธ๋žจ ํ† ํฐํ™”๋งŒ ์ด์šฉํ•œ๋‹ค.- ๋‹จ์–ด ๊ธฐ์ค€ ํ† ํฐํ™”: ๋‹จ์–ด๋“ค์˜ ์ˆœ์„œ๋ฅผ ์ค‘์š”์‹œํ•˜๋Š” **์ˆœ์ฐจ ๋ชจ๋ธ**(sequence models)์„ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ ํ™œ์šฉ- N-๊ทธ๋žจ ํ† ํฐํ™”: ๋‹จ์–ธ๋“ค์˜ ์ˆœ์„œ๋ฅผ ๋ณ„๋กœ ์ƒ๊ด€ํ•˜์ง€ ์•Š๋Š” **๋‹จ์–ด์ฃผ๋จธ๋‹ˆ(bag-of-words, BOW)** ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ ํ™œ์šฉ - N-๊ทธ๋žจ: ๋‹จ์–ด๋“ค ์‚ฌ์ด์˜ ์ˆœ์„œ์— ๋Œ€ํ•œ ์ง€์—ญ ์ •๋ณด๋ฅผ ์–ด๋А ์ •๋„ ์œ ์ง€ํ•จ. - ์ผ์ข…์˜ ํŠน์„ฑ ๊ณตํ•™(feature engineering) ๊ธฐ๋ฒ•์ด๋ฉฐ ๋”ฐ๋ผ์„œ ์–•์€ ํ•™์Šต ๊ธฐ๋ฐ˜์˜ ์–ธ์–ด์ฒ˜๋ฆฌ(shallow language-processing) ๋ชจ๋ธ์— ํ™œ์šฉ๋จ. - 1์ฐจ์› ํ•ฉ์„ฑ๊ณฑ ์‹ ๊ฒฝ๋ง, ์ˆœํ™˜ ์‹ ๊ฒฝ๋ง, ํŠธ๋žœ์Šคํฌ๋จธ ๋“ฑ์€ ์ด ๊ธฐ๋ฒ•์„ ์‚ฌ์šฉํ•˜์ง€ ์•Š์•„๋„ ๋จ. **๋‹จ์–ด์ฃผ๋จธ๋‹ˆ(bag-of-words)**๋Š” N-ํ† ํฐ์œผ๋กœ ๊ตฌ์„ฑ๋œ ์ง‘ํ•ฉ์„ ์˜๋ฏธํ•˜๋ฉฐ **N-๊ทธ๋žจ ์ฃผ๋จธ๋‹ˆ(bag-of-N-grams)**๋ผ๊ณ  ๋ถˆ๋ฆฌ๊ธฐ๋„ ํ•œ๋‹ค.์˜ˆ๋ฅผ ๋“ค์–ด "the cat sat on the mat." ๋ฌธ์žฅ์— ๋Œ€ํ•œ 2-๊ทธ๋žจ ์ง‘ํ•ฉ๊ณผ 3-๊ทธ๋žจ ์ง‘ํ•ฉ์€ ๊ฐ๊ฐ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. - 2-๊ทธ๋žจ ์ง‘ํ•ฉ```{"the", "the cat", "cat", "cat sat", "sat", "sat on", "on", "on the", "the mat", "mat"}``` - 3-๊ทธ๋žจ ์ง‘ํ•ฉ```{"the", "the cat", "cat", "cat sat", "the cat sat", "sat", "sat on", "on", "cat sat on", "on the", "sat on the", "the mat", "mat", "on the mat"} ``` **์–ดํœ˜ ์ƒ‰์ธํ™”** ์ผ๋ฐ˜์ ์œผ๋กœ ๋จผ์ € ํ›ˆ๋ จ์…‹์— ํฌํ•จ๋œ ๋ชจ๋“  ํ† ํฐ๋“ค์˜ ์ƒ‰์ธ(์ธ๋ฑ์Šค)์„ ์ž‘์„ฑํ•œ๋‹ค.์ƒ์„ฑ๋œ ์ƒ‰์ธ์„ ๊ฐ ํ† ํฐ์„ ๋ฐ”ํƒ•์œผ๋กœ ์›-ํ•ซ, ๋ฉ€ํ‹ฐ-ํ•ซ ์ธ์ฝ”๋”ฉ ๋“ฑ์˜ ๋ฐฉ์‹์„ ์‚ฌ์šฉํ•˜์—ฌ์ˆ˜์น˜ํ˜• ํ…์„œ๋กœ ๋ณ€ํ™˜ํ•œ๋‹ค.[4์žฅ](https://codingalzi.github.io/dlp/notebooks/dlp04_getting_started_with_neural_networks.html)๊ณผ [5์žฅ](https://codingalzi.github.io/dlp/notebooks/dlp05_fundamentals_of_ml.html)์—์„œ ์„ค๋ช…ํ•œ ๋Œ€๋กœ ๋ณดํ†ต ์‚ฌ์šฉ ๋นˆ๋„์ˆ˜๊ฐ€ ๋†’์€ 2๋งŒ ๋˜๋Š” 3๋งŒ ๊ฐœ์˜ ๋‹จ์–ด๋งŒ์„ ๋Œ€์ƒ์œผ๋กœ ์–ดํœ˜ ์ƒ‰์ธํ™”๋ฅผ ์ง„ํ–‰ํ•œ๋‹ค.๋‹น์‹œ์— `num_words=10000`์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ ๋นˆ๋„์ˆ˜๊ฐ€ ์ƒ์œ„ 1๋งŒ ๋“ฑ ์•ˆ์— ๋“œ๋Š” ๋‹จ์–ด๋งŒ์„๋Œ€์ƒ์œผ๋กœ ํ›ˆ๋ จ์…‹์„ ๊ตฌ์„ฑํ•˜์˜€๋‹ค.```pythonfrom tensorflow.keras.datasets import imdb(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)```์ผ€๋ผ์Šค์˜ imdb ๋ฐ์ดํ„ฐ์…‹์€ ์ด๋ฏธ ์ •์ˆ˜๋“ค์˜ ์‹œํ€€์Šค๋กœ ์ „์ฒ˜๋ฆฌ๊ฐ€ ๋˜์–ด ์žˆ๋‹ค. ํ•˜์ง€๋งŒ ์—ฌ๊ธฐ์„œ๋Š” ์›๋ณธ imdb ๋ฐ์ดํ„ฐ์…‹์„ ๋Œ€์ƒ์œผ๋กœ ์ „์ฒ˜๋ฆฌ๋ฅผ ์ง์ ‘ ์ˆ˜ํ–‰ํ•˜๋Š” ๋‹จ๊ณ„๋ถ€ํ„ฐ ์‚ดํŽด๋ณผ ๊ฒƒ์ด๋‹ค.์ด๋ฅผ ์œ„ํ•ด ์•„๋ž˜ ์‚ฌํ•ญ์„ ๊ธฐ์–ตํ•ด ๋‘์–ด์•ผ ํ•œ๋‹ค.- OOV ์ธ๋ฑ์Šค ํ™œ์šฉ: ์–ดํœ˜ ์ƒ‰์ธ์— ํฌํ•จ๋˜์ง€ ์•Š๋Š” ๋‹จ์–ด๋Š” ๋ชจ๋‘ 1๋กœ ์ฒ˜๋ฆฌ. ์ผ๋ฐ˜ ๋ฌธ์žฅ์œผ๋กœ ๋ฒˆ์—ญ๋˜๋Š” ๊ฒฝ์šฐ "[UNK]" ์œผ๋กœ ์ฒ˜๋ฆฌ๋จ. - OOV = Out Of Vocabulary - UNK = Unknown- ๋งˆ์Šคํฌ(mask) ํ† ํฐ: ๋ฌด์‹ ๋˜์–ด์•ผ ํ•˜๋Š” ํ† ํฐ์„ ๋‚˜ํƒ€๋ƒ„. ๋ชจ๋‘ 0์œผ๋กœ ์ฒ˜๋ฆฌ. - ์˜ˆ๋ฅผ ๋“ค์–ด, ๋ฌธ์žฅ์˜ ๊ธธ์ด๋ฅผ ๋งž์ถ”๊ธฐ ์œ„ํ•ด ์‚ฌ์šฉ๋˜๋Š” ํŒจ๋”ฉ์œผ๋กœ 0์œผ๋กœ ์ฑ„์›Œ์ค„ ์ˆ˜ ์žˆ์Œ. ``` [[5, 7, 124, 4, 89] [8, 34, 21, 0, 0]] ``` **์ผ€๋ผ์Šค์˜ `TextVectorization` ์ธต ํ™œ์šฉ** ์ง€๊ธˆ๊นŒ์ง€ ์„ค๋ช…ํ•œ ํ…์ŠคํŠธ ๋ฒกํ„ฐํ™”๋ฅผ ์œ„ํ•ด ์ผ€๋ผ์Šค์˜ `TextVectorization` ์ธต์„ ํ™œ์šฉํ•  ์ˆ˜ ์žˆ์œผ๋ฉฐ๊ธฐ๋ณธ ์‚ฌ์šฉ๋ฒ•์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค.
from tensorflow.keras.layers import TextVectorization text_vectorization = TextVectorization( output_mode="int", )
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
`TextVectorization` ์ธต ๊ตฌ์„ฑ์— ์‚ฌ์šฉ๋˜๋Š” ์ฃผ์š” ๊ธฐ๋ณธ ์„ค์ •์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค.- ํ‘œ์ค€ํ™”: ์†Œ๋ฌธ์žํ™”์™€ ๋งˆ์นจํ‘œ ๋“ฑ ์ œ๊ฑฐ - `standardize='lower_and_strip_punctuation'`- ํ† ํฐํ™”: ๋‹จ์–ด ๊ธฐ์ค€ ์ชผ๊ฐœ๊ธฐ - `ngrams=None` - `split='whitespace'`- ์ถœ๋ ฅ ๋ชจ๋“œ: ์ถœ๋ ฅ ํ…์„œ์˜ ํ˜•์‹ - `output_mode="int"` ํ‘œ์ค€ํ™”์™€ ํ† ํฐํ™” ๋ฐฉ์‹์„ ์ž„์˜๋กœ ์ง€์ •ํ•ด์„œ ํ™œ์šฉํ•  ์ˆ˜๋„ ์žˆ๋‹ค.๋‹ค๋งŒ, ํŒŒ์ด์ฌ์˜ ๊ธฐ๋ณธ ๋ฌธ์ž์—ด ์ž๋ฃŒํ˜•์ธ `str` ๋Œ€์‹ ์— `tf.string` ํ…์„œ๋ฅผ ํ™œ์šฉํ•ด์•ผ ํ•จ์— ์ฃผ์˜ํ•ด์•ผ ํ•œ๋‹ค. ํ‘œ์ค€ํ™”์™€ ํ† ํฐํ™”์˜ ๊ธฐ๋ณธ๊ฐ’์€ ์•„๋ž˜ ๋‘ ํ•จ์ˆ˜๋ฅผ ํ™œ์šฉํ•˜๋Š” ๊ฒƒ๊ณผ ๋™์ผํ•˜๋‹ค.- `custom_standardization_fn()`- `custom_split_fn()`
import re import string import tensorflow as tf # ํ‘œ์ค€ํ™”: ์†Œ๋ฌธ์žํ™” ๋ฐ ๋งˆ์นจํ‘œ ์ œ๊ฑฐ def custom_standardization_fn(string_tensor): lowercase_string = tf.strings.lower(string_tensor) return tf.strings.regex_replace( lowercase_string, f"[{re.escape(string.punctuation)}]", "") # ๊ณต๋ฐฑ ๊ธฐ์ค€์œผ๋กœ ์ชผ๊ฐœ๊ธฐ def custom_split_fn(string_tensor): return tf.strings.split(string_tensor) # ์‚ฌ์šฉ์ž ์ •์˜ ํ‘œ์ค€ํ™” ๋ฐ ์ชผ๊ฐœ๊ธฐ ํ™œ์šฉ text_vectorization = TextVectorization( output_mode="int", standardize=custom_standardization_fn, split=custom_split_fn, )
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
**์˜ˆ์ œ** ์•„๋ž˜ ๋ฐ์ดํ„ฐ์…‹์„ ๋Œ€์ƒ์œผ๋กœ ํ…์ŠคํŠธ ๋ฒกํ„ฐํ™”๋ฅผ ์ง„ํ–‰ํ•ด๋ณด์ž.
dataset = [ "I write, erase, rewrite", "Erase again, and then", "A poppy blooms.", ] text_vectorization.adapt(dataset)
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์ƒ์„ฑ๋œ ์–ดํœ˜ ์ƒ‰์ธ์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค.
vocabulary = text_vectorization.get_vocabulary() vocabulary
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์ƒ์„ฑ๋œ ์–ดํœ˜ ์ƒ‰์ธ์„ ํ™œ์šฉํ•˜์—ฌ ์ƒˆ๋กœ์šด ๋ฌธ์žฅ์„ ๋ฒกํ„ฐํ™” ํ•ด๋ณด์ž.
test_sentence = "I write, rewrite, and still rewrite again" encoded_sentence = text_vectorization(test_sentence) print(encoded_sentence)
tf.Tensor([ 7 3 5 9 1 5 10], shape=(7,), dtype=int64)
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
๋ฒกํ„ฐํ™”๋œ ํ…์„œ๋กœ๋ถ€ํ„ฐ ๋ฌธ์žฅ์„ ๋ณต์›ํ•˜๋ฉด ํ‘œ์ค€ํ™”๋œ ๋ฌธ์žฅ์ด ์ƒ์„ฑ๋œ๋‹ค.
inverse_vocab = dict(enumerate(vocabulary)) decoded_sentence = " ".join(inverse_vocab[int(i)] for i in encoded_sentence) print(decoded_sentence)
i write rewrite and [UNK] rewrite again
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
**`TextVectorization` ์ธต ์‚ฌ์šฉ๋ฒ•** `TextVectorization` ์ธต์€ GPU ๋˜๋Š” TPU์—์„œ ์ง€์›๋˜์ง€ ์•Š๋Š”๋‹ค.๋”ฐ๋ผ์„œ ๋ชจ๋ธ ๊ตฌ์„ฑ์— ์ง์ ‘ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ์‹์€ ๋ชจ๋ธ์˜ ํ›ˆ๋ จ์„๋Šฆ์ถœ ์ˆ˜ ์žˆ๊ธฐ์— ๊ถŒ์žฅ๋˜์ง€ ์•Š๋Š”๋‹ค.์—ฌ๊ธฐ์„œ๋Š” ๋Œ€์‹ ์— ๋ฐ์ดํ„ฐ์…‹ ์ „์ฒ˜๋ฆฌ๋ฅผ ๋ชจ๋ธ ๊ตฌ์„ฑ๊ณผ ๋…๋ฆฝ์ ์œผ๋กœ ์ฒ˜๋ฆฌํ•˜๋Š” ๋ฐฉ์‹์„ ์ด์šฉํ•œ๋‹ค.ํ•˜์ง€๋งŒ ํ›ˆ๋ จ์ด ์™„์„ฑ๋œ ๋ชจ๋ธ์„ ์‹ค์ „์— ๋ฐฐ์น˜ํ•  ๊ฒฝ์šฐ `TextVectorization` ์ธต์„์™„์„ฑ๋œ ๋ชจ๋ธ์— ์ถ”๊ฐ€ํ•ด์„œ ์‚ฌ์šฉํ•˜๋Š” ๊ฒŒ ์ข‹๋‹ค.์ด์— ๋Œ€ํ•œ ์ž์„ธํ•œ ์„ค๋ช…์€ ์ž ์‹œ ๋’ค์— ๋ถ€๋ก์—์„œ ์„ค๋ช…ํ•œ๋‹ค. 11.3 ๋‹จ์–ด ๋ชจ์Œ ํ‘œํ˜„๋ฒ•: ์ง‘ํ•ฉ๊ณผ ์‹œํ€€์Šค ์•ž์„œ ์–ธ๊ธ‰ํ•œ ๋Œ€๋กœ ์ž์—ฐ์–ด์ฒ˜๋ฆฌ ๋ชจ๋ธ์— ๋”ฐ๋ผ ๋‹จ์–ด ๋ชจ์Œ์„ ๋‹ค๋ฃจ๋Š” ๋ฐฉ์‹์ด ๋‹ค๋ฅด๋‹ค. - ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ(bag-of-words) ๋ชจ๋ธ - ๋‹จ์–ด๋“ค์˜ ์ˆœ์„œ๋ฅผ ๋ฌด์‹œ. ๋‹จ์–ด ๋ชจ์Œ์„ ๋‹จ์–ด๋“ค์˜ ์ง‘ํ•ฉ์œผ๋กœ ๋‹ค๋ฃธ. - 2015๋…„ ์ด์ „๊นŒ์ง€ ์ฃผ๋กœ ์‚ฌ์šฉ๋จ.- ์‹œํ€€์Šค(sequence) ๋ชจ๋ธ - ์ˆœํ™˜(recurrent) ๋ชจ๋ธ - ๋‹จ์–ด๋“ค์˜ ์ˆœ์„œ๋ฅผ ์‹œ๊ณ„์—ด ๋ฐ์ดํ„ฐ์˜ ์Šคํ…์ฒ˜๋Ÿผ ๊ฐ„์ฃผ. - 2015-2016์— ์ฃผ๋กœ ์‚ฌ์šฉ๋จ. - ํŠธ๋žœ์Šคํฌ๋จธ(Transformer) ์•„ํ‚คํ…์ฒ˜ - ๊ธฐ๋ณธ์ ์œผ๋กœ ์ˆœ์„œ๋ฅผ ๋ฌด์‹œํ•˜์ง€๋งŒ ๋‹จ์–ด ์œ„์น˜๋ฅผ ํ•™์Šตํ•  ์ˆ˜ ์žˆ๋Š” ๋Šฅ๋ ฅ์„ ๊ฐ€์ง. - 2017๋…„ ์ดํ›„ ๊ธฐ๋ณธ์ ์œผ๋กœ ํ™œ์šฉ๋จ. ์—ฌ๊ธฐ์„œ๋Š” IMDB ์˜ํ™” ๋ฆฌ๋ทฐ ๋ฐ์ดํ„ฐ๋ฅผ ์ด์šฉํ•˜์—ฌ ๋‘ ๋ชจ๋ธ ๋ฐฉ์‹์˜ ํ™œ์šฉ๋ฒ•๊ณผ ์ฐจ์ด์ ์„ ์†Œ๊ฐœํ•œ๋‹ค. 11.3.1 IMDB ์˜ํ™” ๋ฆฌ๋ทฐ ๋ฐ์ดํ„ฐ ์ค€๋น„ ์ด์ „๊ณผ๋Š” ๋‹ฌ๋ฆฌ ์—ฌ๊ธฐ์„œ๋Š” IMDB ๋ฐ์ดํ„ฐ์…‹์„ ์ง์ ‘ ๋‹ค์šด๋กœ๋“œํ•˜์—ฌ ์ „์ฒ˜๋ฆฌํ•˜๋Š” ๊ณผ์ •์„ ์‚ดํŽด๋ณธ๋‹ค. ์ค€๋น„ ๊ณผ์ • 1: ๋ฐ์ดํ„ฐ์…‹ ๋‹ค์šด๋กœ๋“œ ์••์ถ• ํ’€๊ธฐ์••์ถ•์„ ํ’€๋ฉด ์•„๋ž˜ ๊ตฌ์กฐ์˜ ๋””๋ ‰ํ† ๋ฆฌ๊ฐ€ ์ƒ์„ฑ๋œ๋‹ค.```aclImdb/...train/......pos/......neg/...test/......pos/......neg/````train`์˜ `pos`์™€ `neg` ์„œ๋ธŒ๋””๋ ‰ํ† ๋ฆฌ์— ๊ฐ๊ฐ 12,500๊ฐœ์˜ ๊ธ์ •๊ณผ ๋ถ€์ • ๋ฆฌ๋ทฐ๊ฐ€ํฌํ•จ๋˜์–ด ์žˆ๋‹ค. *์ฃผ์˜์‚ฌํ•ญ*: ์•„๋ž˜ ์ฝ”๋“œ๋Š” ์œˆ๋„์šฐ์˜ ๊ฒฝ์šฐ 10 ์ตœ์‹  ๋ฒ„์ „ ๋˜๋Š” 11๋ถ€ํ„ฐ ์ง€์›๋œ๋‹ค.
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -xf aclImdb_v1.tar.gz
% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 80.2M 100 80.2M 0 0 26.1M 0 0:00:03 0:00:03 --:--:-- 26.1M
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
`aclImdb/train/unsup` ์„œ๋ธŒ๋””๋ ‰ํ† ๋ฆฌ๋Š” ํ•„์š” ์—†๊ธฐ์— ์‚ญ์ œํ•œ๋‹ค.
if 'google.colab' in str(get_ipython()): !rm -r aclImdb/train/unsup else: import shutil unsup_path = './aclImdb/train/unsup' shutil.rmtree(unsup_path)
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
๊ธ์ • ๋ฆฌ๋ทฐ ํ•˜๋‚˜์˜ ๋‚ด์šฉ์„ ์‚ดํŽด๋ณด์ž.๋ชจ๋ธ ๊ตฌ์„ฑ ์ด์ „์— ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์…‹์„ ์‚ดํŽด ๋ณด๊ณ ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ง๊ด€์„ ๊ฐ–๋Š” ๊ณผ์ •์ด ํ•ญ์ƒ ํ•„์š”ํ•˜๋‹ค.
if 'google.colab' in str(get_ipython()): !cat aclImdb/train/pos/4077_10.txt else: with open('aclImdb/train/pos/4077_10.txt', 'r') as f: text = f.read() print(text)
I first saw this back in the early 90s on UK TV, i did like it then but i missed the chance to tape it, many years passed but the film always stuck with me and i lost hope of seeing it TV again, the main thing that stuck with me was the end, the hole castle part really touched me, its easy to watch, has a great story, great music, the list goes on and on, its OK me saying how good it is but everyone will take there own best bits away with them once they have seen it, yes the animation is top notch and beautiful to watch, it does show its age in a very few parts but that has now become part of it beauty, i am so glad it has came out on DVD as it is one of my top 10 films of all time. Buy it or rent it just see it, best viewing is at night alone with drink and food in reach so you don't have to stop the film.<br /><br />Enjoy
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์ค€๋น„ ๊ณผ์ • 2: ๊ฒ€์ฆ์…‹ ์ค€๋น„ํ›ˆ๋ จ์…‹์˜ 20%๋ฅผ ๊ฒ€์ฆ์…‹์œผ๋กœ ๋–ผ์–ด๋‚ธ๋‹ค.์ด๋ฅผ ์œ„ํ•ด `aclImdb/val` ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•œ ํ›„์—๊ธ์ •๊ณผ ๋ถ€์ • ํ›ˆ๋ จ์…‹ ๋ชจ๋‘ ๋ฌด์ž‘์œ„๋กœ ์„ž์€ ํ›„ ๊ทธ์ค‘ 20%๋ฅผ ๊ฒ€์ฆ์…‹ ๋””๋ ‰ํ† ๋ฆฌ๋กœ ์˜ฎ๊ธด๋‹ค.
import os, pathlib, shutil, random base_dir = pathlib.Path("aclImdb") val_dir = base_dir / "val" train_dir = base_dir / "train" for category in ("neg", "pos"): os.makedirs(val_dir / category) # val ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ files = os.listdir(train_dir / category) random.Random(1337).shuffle(files) # ํ›ˆ๋ จ์…‹ ๋ฌด์ž‘์œ„ ์„ž๊ธฐ num_val_samples = int(0.2 * len(files)) # 20% ์ง€์ • ํ›„ ๊ฒ€์ฆ์…‹์œผ๋กœ ์˜ฎ๊ธฐ๊ธฐ val_files = files[-num_val_samples:] for fname in val_files: shutil.move(train_dir / category / fname, val_dir / category / fname)
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์ค€๋น„ ๊ณผ์ • 3: ํ…์„œ ๋ฐ์ดํ„ฐ์…‹ ์ค€๋น„`text_dataset_from_directory()` ํ•จ์ˆ˜๋ฅผ ์ด์šฉํ•˜์—ฌ ํ›ˆ๋ จ์…‹, ๊ฒ€์ฆ์…‹, ํ…Œ์ŠคํŠธ์…‹์„ ์ค€๋น„ํ•œ๋‹ค. ์ž๋ฃŒํ˜•์€ ๋ชจ๋‘ `Dataset`์ด๋ฉฐ, ๋ฐฐ์น˜ ํฌ๊ธฐ๋Š” 32๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค.
from tensorflow import keras batch_size = 32 train_ds = keras.utils.text_dataset_from_directory( "aclImdb/train", batch_size=batch_size ) val_ds = keras.utils.text_dataset_from_directory( "aclImdb/val", batch_size=batch_size ) test_ds = keras.utils.text_dataset_from_directory( "aclImdb/test", batch_size=batch_size )
Found 20000 files belonging to 2 classes. Found 5000 files belonging to 2 classes. Found 25000 files belonging to 2 classes.
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
๊ฐ ๋ฐ์ดํ„ฐ์…‹์€ ๋ฐฐ์น˜๋กœ ๊ตฌ๋ถ„๋˜๋ฉฐ์ž…๋ ฅ์€ `tf.string` ํ…์„œ์ด๊ณ , ํƒ€๊นƒ์€ `int32` ํ…์„œ์ด๋‹ค.ํฌ๊ธฐ๋Š” ๋ชจ๋‘ 32์ด๋ฉฐ ์ง€์ •๋œ ๋ฐฐ์น˜ ํฌ๊ธฐ์ด๋‹ค.์˜ˆ๋ฅผ ๋“ค์–ด, ์ฒซ์งธ ๋ฐฐ์น˜์˜ ์ž…๋ ฅ๊ณผ ํƒ€๊นƒ ๋ฐ์ดํ„ฐ์˜ ์ •๋ณด๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค.
for inputs, targets in train_ds: print("inputs.shape:", inputs.shape) print("inputs.dtype:", inputs.dtype) print("targets.shape:", targets.shape) print("targets.dtype:", targets.dtype) # ์˜ˆ์ œ: ์ฒซ์งธ ๋ฐฐ์น˜์˜ ์ฒซ์งธ ๋ฆฌ๋ทฐ print("inputs[0]:", inputs[0]) print("targets[0]:", targets[0]) break
inputs.shape: (32,) inputs.dtype: <dtype: 'string'> targets.shape: (32,) targets.dtype: <dtype: 'int32'> inputs[0]: tf.Tensor(b'The film begins with a bunch of kids in reform school and focuses on a kid named \'Gabe\', who has apparently worked hard to earn his parole. Gabe and his sister move to a new neighborhood to make a fresh start and soon Gabe meets up with the Dead End Kids. The Kids in this film are little punks, but they are much less antisocial than they\'d been in other previous films and down deep, they are well-meaning punks. However, in this neighborhood there are also some criminals who are perpetrating insurance fraud through arson and see Gabe as a convenient scapegoat--after all, he\'d been to reform school and no one would believe he was innocent once he was framed. So, when Gabe is about ready to be sent back to "The Big House", it\'s up to the rest of the gang to save him and expose the real crooks.<br /><br />The "Dead End Kids" appeared in several Warner Brothers films in the late 1930s and the films were generally very good (particularly ANGELS WITH DIRTY FACES). However, after the boys\' contracts expired, they went on to Monogram Studios and the films, to put it charitably, were very weak and formulaic--with Huntz Hall and Leo Gorcey being pretty much the whole show and the group being renamed "The Bowery Boys". Because ANGELS WASH THEIR FACES had the excellent writing and production values AND Hall and Gorcey were not constantly mugging for the camera, it\'s a pretty good film--and almost earns a score of 7 (it\'s REAL close). In fact, while this isn\'t a great film aesthetically, it\'s sure a lot of fun to watch, so I will give it a 7! Sure, it was a tad hokey-particularly towards the end when the kids take the law into their own hands and Reagan ignores the Bill of Rights--but it was also quite entertaining. The Dead End Kids are doing their best performances and Ronald Reagan and Ann Sheridan provided excellent support. Sure, this part of the film was illogical and impossible but somehow it was still funny and rather charming--so if you can suspend disbelief, it works well.', shape=(), dtype=string) targets[0]: tf.Tensor(1, shape=(), dtype=int32)
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
11.3.2 ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ ๊ธฐ๋ฒ• ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ์— ์ฑ„์šธ ํ† ํฐ์œผ๋กœ ์–ด๋–ค N-๊ทธ๋žจ์„ ์‚ฌ์šฉํ• ์ง€ ๋จผ์ € ์ง€์ •ํ•ด์•ผ ํ•œ๋‹ค. - ์œ ๋‹ˆ๊ทธ๋žจ(unigrams): ํ•˜๋‚˜์˜ ๋‹จ์–ด๊ฐ€ ํ•œ์˜ ํ† ํฐ- N-๊ทธ๋žจ(N-grams): ์ตœ๋Œ€ N ๊ฐœ์˜ ์—ฐ์† ๋‹จ์–ด๋กœ ์ด๋ฃจ์–ด์ง„ ํ† ํฐ **๋ฐฉ์‹ 1: ์œ ๋‹ˆ๊ทธ๋žจ ๋ฐ”์ด๋„ˆ๋ฆฌ ์ธ์ฝ”๋”ฉ** ์˜ˆ๋ฅผ ๋“ค์–ด "the cat sat on the mat" ๋ฌธ์žฅ์„ ์œ ๋‹ˆ๊ทธ๋žจ์œผ๋กœ ์ฒ˜๋ฆฌํ•˜๋ฉด ๋‹ค์Œ ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ๊ฐ€ ์ƒ์„ฑ๋œ๋‹ค.์ง‘ํ•ฉ์œผ๋กœ ์ฒ˜๋ฆฌ๋˜๊ธฐ์— ๋‹จ์–ด๋“ค์˜ ์ˆœ์„œ๋Š” ์™„์ „ํžˆ ๋ฌด์‹œ๋œ๋‹ค.```{"cat", "mat", "on", "sat", "the"}```์ด์ œ ๋ชจ๋“  ๋ฌธ์žฅ์€ ์–ดํœ˜์ƒ‰์ธ์— ํฌํ•จ๋œ ๋‹จ์–ด๋“ค์˜ ์ˆ˜๋งŒํผ ๊ธด 1์ฐจ์› ์ด์ง„ ํ…์„œ(binary tensor)๋กœ์ฒ˜๋ฆฌ๋œ๋‹ค. ์ฆ‰, ๋ฉ€ํ‹ฐ-ํ•ซ(multi-hot) ์ธ์ฝ”๋”ฉ ๋ฐฉ์‹์„ ์‚ฌ์šฉํ•ด์„œ ํ…์„œ๋กœ ๋ณ€ํ™˜๋œ๋‹ค.[4์žฅ](https://codingalzi.github.io/dlp/notebooks/dlp04_getting_started_with_neural_networks.html)๊ณผ [5์žฅ](https://codingalzi.github.io/dlp/notebooks/dlp05_fundamentals_of_ml.html)์—์„œ ๋ฌธ์žฅ์„ ์ธ์ฝ”๋”ฉ ๋ฐฉ์‹๊ณผ ๋™์ผํ•˜๋‹ค. `TextVectorization` ํด๋ž˜์Šค์˜ `output_mode="multi_hot"` ์˜ต์…˜์„ ์ด์šฉํ•˜๋ฉด๋ฐฉ๊ธˆ ์„ค๋ช…ํ•œ ๋‚ด์šฉ์„ ๊ทธ๋Œ€๋กœ ์ฒ˜๋ฆฌํ•ด์ค€๋‹ค.
from tensorflow.keras.layers import TextVectorization text_vectorization = TextVectorization( max_tokens=20000, output_mode="multi_hot", ) # ์–ดํœ˜์ƒ‰์ธ ์ƒ์„ฑ text_only_train_ds = train_ds.map(lambda x, y: x) text_vectorization.adapt(text_only_train_ds)
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์ƒ์„ฑ๋œ ์–ดํœ˜์ƒ‰์ธ์„ ์ด์šฉํ•˜์—ฌ ํ›ˆ๋ จ์…‹, ๊ฒ€์ฆ์…‹, ํ…Œ์ŠคํŠธ์…‹ ๋ชจ๋‘ ๋ฒกํ„ฐํ™”ํ•œ๋‹ค.
binary_1gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y)) binary_1gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y)) binary_1gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
๋ณ€ํ™˜๋œ ์ฒซ์งธ ๋ฐฐ์น˜์˜ ์ž…๋ ฅ๊ณผ ํƒ€๊นƒ ๋ฐ์ดํ„ฐ์˜ ์ •๋ณด๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค.`max_tokens=20000`์œผ๋กœ ์ง€์ •ํ•˜์˜€๊ธฐ์— ๋ชจ๋“  ๋ฌธ์žฅ์€ ๊ธธ์ด๊ฐ€ 2๋งŒ์ธ ๋ฒกํ„ฐ๋กœ ๋ณ€ํ™˜๋˜์—ˆ๋‹ค.
for inputs, targets in binary_1gram_train_ds: print("inputs.shape:", inputs.shape) print("inputs.dtype:", inputs.dtype) print("targets.shape:", targets.shape) print("targets.dtype:", targets.dtype) print("inputs[0]:", inputs[0]) print("targets[0]:", targets[0]) break
inputs.shape: (32, 20000) inputs.dtype: <dtype: 'float32'> targets.shape: (32,) targets.dtype: <dtype: 'int32'> inputs[0]: tf.Tensor([1. 1. 1. ... 0. 0. 0.], shape=(20000,), dtype=float32) targets[0]: tf.Tensor(0, shape=(), dtype=int32)
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
**๋ฐ€์ง‘ ๋ชจ๋ธ ์ง€์ •** ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ ๋ชจ๋ธ๋กœ ์—ฌ๊ธฐ์„œ๋Š” ๋ฐ€์ง‘ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•œ๋‹ค. `get_model()` ํ•จ์ˆ˜๊ฐ€ ์ปดํŒŒ์ผ ๋œ ๋‹จ์ˆœํ•œ ๋ฐ€์ง‘ ๋ชจ๋ธ์„ ๋ฐ˜ํ™˜ํ•œ๋‹ค.๋ชจ๋ธ์˜ ์ถœ๋ ฅ๊ฐ’์€ ๊ธ์ •์ผ ํ™•๋ฅ ์ด๋ฉฐ, ์ตœ์ƒ์œ„ ์ธต์˜ ํ™œ์„ฑํ™” ํ•จ์ˆ˜๋กœ `sigmoid`๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค.
from tensorflow import keras from tensorflow.keras import layers def get_model(max_tokens=20000, hidden_dim=16): inputs = keras.Input(shape=(max_tokens,)) x = layers.Dense(hidden_dim, activation="relu")(inputs) x = layers.Dropout(0.5)(x) outputs = layers.Dense(1, activation="sigmoid")(x) # ๊ธ์ •์ผ ํ™•๋ฅ  ๊ณ„์‚ฐ model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]) return model model = get_model() model.summary()
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 20000)] 0 dense (Dense) (None, 16) 320016 dropout (Dropout) (None, 16) 0 dense_1 (Dense) (None, 1) 17 ================================================================= Total params: 320,033 Trainable params: 320,033 Non-trainable params: 0 _________________________________________________________________
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
**๋ชจ๋ธ ํ›ˆ๋ จ** ๋ฐ€์ง‘ ๋ชจ๋ธ ํ›ˆ๋ จ๊ณผ์ •์€ ํŠน๋ณ„ํ•œ ๊ฒŒ ์—†๋‹ค.ํ›ˆ๋ จ ํ›„ ํ…Œ์ŠคํŠธ์…‹์— ๋Œ€ํ•œ ์ •ํ™•๋„๊ฐ€ 89% ๋ณด๋‹ค ์กฐ๊ธˆ ๋‚ฎ๊ฒŒ ๋‚˜์˜จ๋‹ค.์ตœ๊ณ  ์„ฑ๋Šฅ์˜ ๋ชจ๋ธ์ด ํ…Œ์ŠคํŠธ์…‹์— ๋Œ€ํ•ด 95% ์ •๋„ ์ •ํ™•๋„๋ฅผ ๋‚ด๋Š” ๊ฒƒ๋ณด๋‹ค๋Š” ๋‚ฎ์ง€๋งŒ๋ฌด์ž‘์œ„๋กœ ์ฐ๋Š” ๋ชจ๋ธ๋ณด๋‹ค๋Š” ํ›จ์”ฌ ์ข‹์€ ๋ชจ๋ธ์ด๋‹ค.
callbacks = [ keras.callbacks.ModelCheckpoint("binary_1gram.keras", save_best_only=True) ] model.fit(binary_1gram_train_ds.cache(), validation_data=binary_1gram_val_ds.cache(), epochs=10, callbacks=callbacks) model = keras.models.load_model("binary_1gram.keras") print(f"Test acc: {model.evaluate(binary_1gram_test_ds)[1]:.3f}")
Epoch 1/10 625/625 [==============================] - 10s 16ms/step - loss: 0.4074 - accuracy: 0.8277 - val_loss: 0.2792 - val_accuracy: 0.8908 Epoch 2/10 625/625 [==============================] - 3s 5ms/step - loss: 0.2746 - accuracy: 0.8981 - val_loss: 0.2774 - val_accuracy: 0.8964 Epoch 3/10 625/625 [==============================] - 4s 6ms/step - loss: 0.2471 - accuracy: 0.9115 - val_loss: 0.2872 - val_accuracy: 0.8976 Epoch 4/10 625/625 [==============================] - 3s 5ms/step - loss: 0.2246 - accuracy: 0.9244 - val_loss: 0.3187 - val_accuracy: 0.8936 Epoch 5/10 625/625 [==============================] - 3s 6ms/step - loss: 0.2156 - accuracy: 0.9313 - val_loss: 0.3164 - val_accuracy: 0.8960 Epoch 6/10 625/625 [==============================] - 3s 6ms/step - loss: 0.2108 - accuracy: 0.9341 - val_loss: 0.3355 - val_accuracy: 0.8934 Epoch 7/10 625/625 [==============================] - 4s 6ms/step - loss: 0.2052 - accuracy: 0.9366 - val_loss: 0.3354 - val_accuracy: 0.8944 Epoch 8/10 625/625 [==============================] - 4s 6ms/step - loss: 0.2017 - accuracy: 0.9365 - val_loss: 0.3582 - val_accuracy: 0.8940 Epoch 9/10 625/625 [==============================] - 4s 6ms/step - loss: 0.2013 - accuracy: 0.9394 - val_loss: 0.3497 - val_accuracy: 0.8938 Epoch 10/10 625/625 [==============================] - 4s 6ms/step - loss: 0.2043 - accuracy: 0.9394 - val_loss: 0.3631 - val_accuracy: 0.8940 782/782 [==============================] - 8s 10ms/step - loss: 0.2861 - accuracy: 0.8885 Test acc: 0.888
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
**๋ฐฉ์‹ 2: ๋ฐ”์ด๊ทธ๋žจ ๋ฐ”์ด๋„ˆ๋ฆฌ ์ธ์ฝ”๋”ฉ** ๋ฐ”์ด๊ทธ๋žจ(2-grams)์„ ์œ ๋‹ˆ๊ทธ๋žจ ๋Œ€์‹  ์ด์šฉํ•ด๋ณด์ž. ์˜ˆ๋ฅผ ๋“ค์–ด "the cat sat on the mat" ๋ฌธ์žฅ์„ ๋ฐ”์ด๊ทธ๋žจ์œผ๋กœ ์ฒ˜๋ฆฌํ•˜๋ฉด ๋‹ค์Œ ๋‹จ์–ด์ฃผ๋จธ๋‹ˆ๊ฐ€ ์ƒ์„ฑ๋œ๋‹ค.```{"the", "the cat", "cat", "cat sat", "sat", "sat on", "on", "on the", "the mat", "mat"}````TextVectorization` ํด๋ž˜์Šค์˜ `ngrams=N` ์˜ต์…˜์„ ์ด์šฉํ•˜๋ฉดN-๊ทธ๋žจ๋“ค๋กœ ์ด๋ฃจ์–ด์ง„ ์–ดํœ˜์ƒ‰์ธ์„ ์ƒ์„ฑํ•  ์ˆ˜ ์žˆ๋‹ค.
text_vectorization = TextVectorization( ngrams=2, max_tokens=20000, output_mode="multi_hot", )
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp
์–ดํœ˜์ƒ‰์ธ ์ƒ์„ฑ๊ณผ ํ›ˆ๋ จ์…‹, ๊ฒ€์ฆ์…‹, ํ…Œ์ŠคํŠธ์…‹์˜ ๋ฒกํ„ฐํ™” ๊ณผ์ •์€ ๋™์ผํ•˜๋‹ค.
text_vectorization.adapt(text_only_train_ds) binary_2gram_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y)) binary_2gram_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y)) binary_2gram_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
_____no_output_____
MIT
notebooks/dlp11_part01_introduction.ipynb
codingalzi/dlp