kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
12,242,884 |
class BERTClass(torch.nn.Module):
def __init__(self, drop_rate, otuput_size):
super().__init__()
model_config = BertConfig.from_pretrained('.. /input/bert-base-uncased', output_hidden_states=True)
self.bert = BertModel.from_pretrained('.. /input/bert-base-uncased', config=model_config)
self.drop = torch.nn.Dropout(drop_rate)
self.fc = torch.nn.Linear(768, otuput_size)
def forward(self, ids, mask):
out = self.bert(ids, attention_mask=mask)[1]
out = self.fc(self.drop(out))
return out<find_best_params>
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
Digit Recognizer
|
12,242,884 |
def calculate_loss_and_accuracy(model, criterion, loader, device):
model.eval()
loss = 0.0
total = 0
correct = 0
with torch.no_grad() :
for data in loader:
ids = data['ids'].to(device)
mask = data['mask'].to(device)
labels = data['labels'].to(device)
outputs = model(ids, mask)
loss += criterion(outputs, labels ).item()
pred = torch.round(torch.sigmoid(outputs)).cpu().numpy()
labels = labels.cpu().numpy()
total += len(labels)
correct +=(pred == labels ).sum().item()
return loss / len(loader), correct / total
def train_model(dataset_train, dataset_valid, batch_size, model, criterion, optimizer, num_epochs, device=None):
model.to(device)
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
dataloader_valid = DataLoader(dataset_valid, batch_size=len(dataset_valid), shuffle=False)
log_train = []
log_valid = []
for epoch in range(num_epochs):
s_time = time.time()
model.train()
for data in dataloader_train:
ids = data['ids'].to(device)
mask = data['mask'].to(device)
labels = data['labels'].to(device)
optimizer.zero_grad()
outputs = model(ids,mask)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
loss_train, acc_train = calculate_loss_and_accuracy(model, criterion, dataloader_train, device)
loss_valid, acc_valid = calculate_loss_and_accuracy(model, criterion, dataloader_valid, device)
log_train.append([loss_train, acc_train])
log_valid.append([loss_valid, acc_valid])
e_time = time.time()
print(f'epoch: {epoch + 1}, loss_train: {loss_train:.4f}, accuracy_train: {acc_train:.4f}, loss_valid: {loss_valid:.4f}, accuracy_valid: {acc_valid:.4f}, {(e_time - s_time):.4f}sec')
return {'train': log_train, 'valid': log_valid}<choose_model_class>
|
image_gen = ImageDataGenerator(rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=False,
vertical_flip=False,
)
|
Digit Recognizer
|
12,242,884 |
DROP_RATE = 0.4
OUTPUT_SIZE = 1
BATCH_SIZE = 32
NUM_EPOCHS = 2
LEARNING_RATE = 2e-5
model = BERTClass(DROP_RATE, OUTPUT_SIZE)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.AdamW(params=model.parameters() , lr=LEARNING_RATE)
device = 'cuda' if cuda.is_available() else 'cpu'
log = train_model(dataset_train, dataset_valid, BATCH_SIZE, model, criterion, optimizer, NUM_EPOCHS, device=device )<categorify>
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
|
Digit Recognizer
|
12,242,884 |
test_csv["cleaned_text"] = test_csv["text"].map(clean)
test_csv.head()<categorify>
|
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), input_shape=(28, 28, 1), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(5,5), input_shape=(28, 28, 1), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(filters=64, kernel_size=(5,5), input_shape=(28, 28, 1), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(5,5), input_shape=(28, 28, 1), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(5,5), input_shape=(28, 28, 1), padding="same", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(BatchNormalization())
model.add(Dense(10, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=["accuracy"] )
|
Digit Recognizer
|
12,242,884 |
class TestDataset(Dataset):
def __init__(self, X, tokenizer, max_len):
self.X = X
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.X)
def __getitem__(self, index):
text = self.X[index]
inputs = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
pad_to_max_length=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.LongTensor(ids),
'mask': torch.LongTensor(mask)
}
<create_dataframe>
|
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
|
Digit Recognizer
|
12,242,884 |
max_len = 45
dataset_test = TestDataset(test_csv["cleaned_text"].values, tokenizer, max_len )<categorify>
|
check_point = ModelCheckpoint("best_model.h5", monitor="val_accuracy", verbose=1, save_best_only=True )
|
Digit Recognizer
|
12,242,884 |
loader = DataLoader(dataset_test, batch_size=len(dataset_test), shuffle=False)
model.eval()
with torch.no_grad() :
for data in loader:
ids = data['ids'].to(device)
mask = data['mask'].to(device)
outputs = model.forward(ids, mask)
pred = torch.round(torch.sigmoid(outputs)).cpu().numpy()<load_from_csv>
|
reduce_lr = ReduceLROnPlateau(monitor="val_accuracy", patience=3, verbose=1, factor=0.5, min_lr=0.0001 )
|
Digit Recognizer
|
12,242,884 |
submit_csv =pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
submit_csv.head()<data_type_conversions>
|
Digit Recognizer
|
|
12,242,884 |
submit_csv['target'] = pred.astype('int64')
submit_csv.head(10 )<save_to_csv>
|
history = model.fit_generator(image_gen.flow(X_train,y_train, batch_size=64),
epochs = 50, validation_data =(X_val,y_val),
verbose = 1,
callbacks=[check_point, reduce_lr] )
|
Digit Recognizer
|
12,242,884 |
submit_csv.to_csv("submission2.csv",index = False )<install_modules>
|
losses = pd.DataFrame(model.history.history )
|
Digit Recognizer
|
12,242,884 |
!pip install transformers==3.5.1
!pip install pyspellchecker
!pip install -U joblib textblob
!python -m textblob.download_corpora|<import_modules>
|
print("Accuracy on validation data: {:.4f}".format(losses["val_accuracy"].max()))
|
Digit Recognizer
|
12,242,884 |
import pandas as pd
import torchtext
from transformers import BertTokenizer, BertForMaskedLM, BertConfig
import transformers
import torch
from torch.utils.data import Dataset, DataLoader
from torch import optim
from torch import cuda
from sklearn.model_selection import train_test_split
import re
import string
from joblib import Parallel, delayed
from textblob import TextBlob
from textblob.translate import NotTranslated
from time import sleep<load_from_csv>
|
from keras.models import load_model
|
Digit Recognizer
|
12,242,884 |
train_val_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<feature_engineering>
|
saved_model = load_model('best_model.h5' )
|
Digit Recognizer
|
12,242,884 |
train_val_df = train_val_df.loc[:,["text","target"]]
test_df = test_df.loc[:,["text"]]
test_df["target"] = [0]*len(test_df["text"] )<prepare_output>
|
predictions = saved_model.predict_classes(X_test )
|
Digit Recognizer
|
12,242,884 |
print(train_val_df)
print(test_df.head())
original_df = train_val_df.copy()<define_variables>
|
submission = pd.concat([pd.Series(range(1,28001), name ="ImageId"), submission], axis = 1 )
|
Digit Recognizer
|
12,242,884 |
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"couldnt" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"doesnt" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"havent" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"shouldnt" : "should not",
"that's" : "that is",
"thats" : "that is",
"there's" : "there is",
"theres" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"theyre": "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not"}
def replace_typical_misspell(text):
text = text.lower()
mispellings_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
def replace(match):
return mispell_dict[match.group(0)]
return mispellings_re.sub(replace, text)
train_val_df['text'] = train_val_df['text'].apply(lambda x : replace_typical_misspell(x))
test_df['text'] = test_df['text'].apply(lambda x : replace_typical_misspell(x))
def remove_space(string):
string = BeautifulSoup(string ).text.strip().lower()
string = re.sub(r'(( http)\S+)', 'http', string)
string = re.sub(r'\s+', ' ', string)
return string
train_val_df['text'] = train_val_df['text'].apply(lambda x : remove_space(x))
test_df['text'] = test_df['text'].apply(lambda x : remove_space(x))
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
train_val_df['text'] = train_val_df['text'].apply(lambda x: remove_emoji(x))
test_df['text'] = test_df['text'].apply(lambda x: remove_emoji(x))<filter>
|
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
12,242,884 |
print(train_val_df.loc[31])
print(original_df.loc[31] )<save_to_csv>
|
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
12,233,077 |
test_df.to_csv("test.tsv", sep='\t', index=False, header=None)
print(test_df.shape)
train_val_df.to_csv("train_eval.tsv", sep='\t', index=False, header=None)
print(train_val_df.shape )<categorify>
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
|
Digit Recognizer
|
12,233,077 |
max_length = 50
def tokenizer_50(input_text):
return tokenizer.encode(input_text, max_length=50, return_tensors='pt')[0]
TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_50, use_vocab=False, lower=False,
include_lengths=True, batch_first=True, fix_length=max_length, pad_token=0)
LABEL = torchtext.data.Field(sequential=False, use_vocab=False )<load_pretrained>
|
test_data=pd.read_csv('.. /input/digit-recognizer/test.csv')
train_data=pd.read_csv('.. /input/digit-recognizer/train.csv' )
|
Digit Recognizer
|
12,233,077 |
tokenizer = BertTokenizer.from_pretrained('bert-base-cased' )<load_from_csv>
|
training=np.array(train_data,dtype='float32')
testing=np.array(test_data,dtype='float32' )
|
Digit Recognizer
|
12,233,077 |
dataset_train_eval, dataset_test = torchtext.data.TabularDataset.splits(path='.', train='./train_eval.tsv', test='./test.tsv', format='tsv', fields=[('Text', TEXT),('Label', LABEL)] )<split>
|
x_train=training[:,1:]/255
y_train=training[:,0]
|
Digit Recognizer
|
12,233,077 |
dataset_train, dataset_eval = dataset_train_eval.split(
split_ratio=1.0 - 1800/7613, random_state=random.seed(1234))
print(dataset_train.__len__())
print(dataset_eval.__len__())
print(dataset_test.__len__() )<data_type_conversions>
|
x_test=testing[:,0:]/255
|
Digit Recognizer
|
12,233,077 |
print(tokenizer.convert_ids_to_tokens(item.Text.tolist()))
print(int(item.Label))<define_variables>
|
y_train_cat=to_categorical(y_train )
|
Digit Recognizer
|
12,233,077 |
batch_size = 32
dl_train = torchtext.data.Iterator(
dataset_train, batch_size=batch_size, train=True)
dl_eval = torchtext.data.Iterator(
dataset_eval, batch_size=batch_size, train=False, sort=False)
dl_test = torchtext.data.Iterator(
dataset_test, batch_size=batch_size, train=False, sort=False)
dataloaders_dict = {"train": dl_train, "val": dl_eval}<load_pretrained>
|
model =Sequential()
model.add(Conv2D(64,kernel_size=(3, 3),activation='relu',input_shape=(28, 28, 1)))
model.add(Conv2D(64,kernel_size=(3, 3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Conv2D(filters = 128, kernel_size =(3,3),activation ='relu'))
model.add(Conv2D(filters = 128, kernel_size =(3,3),activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 256, kernel_size =(3,3),activation ='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(512, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
12,233,077 |
model = BertModel.from_pretrained('bert-base-cased' )<set_options>
|
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.RMSprop() ,
metrics=['accuracy'])
learning_rate_reduction = ReduceLROnPlateau(monitor='accuracy',
patience=3,
verbose=1,
factor=0.3,
min_lr=0.0001 )
|
Digit Recognizer
|
12,233,077 |
class BertForTwitter(nn.Module):
def __init__(self):
super(BertForTwitter, self ).__init__()
self.bert = model
self.cls = nn.Linear(in_features=768, out_features=2)
nn.init.normal_(self.cls.weight, std=0.02)
nn.init.normal_(self.cls.bias, 0)
def forward(self, input_ids):
result = self.bert(input_ids)
vec_0 = result[0]
vec_0 = vec_0[:, 0, :]
vec_0 = vec_0.view(-1, 768)
output = self.cls(vec_0)
return output<train_on_grid>
|
epochs_range = 60
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train,y_train_cat,batch_size=256,epochs=epochs_range )
|
Digit Recognizer
|
12,233,077 |
net = BertForTwitter()
net.train()
print('ネットワーク設定完了' )<categorify>
|
y_pred=model.predict_classes(X_test )
|
Digit Recognizer
|
12,233,077 |
for param in net.parameters() :
param.requires_grad = False
for param in net.bert.encoder.layer[-1].parameters() :
param.requires_grad = True
for param in net.cls.parameters() :
param.requires_grad = True<choose_model_class>
|
sample=pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
12,233,077 |
optimizer = optim.Adam([
{'params': net.bert.encoder.layer[-1].parameters() , 'lr': 5e-5},
{'params': net.cls.parameters() , 'lr': 1e-4}
])
criterion = nn.CrossEntropyLoss()
<train_model>
|
temp=pd.DataFrame({'ImageId':sample['ImageId'],'Label':y_pred} )
|
Digit Recognizer
|
12,233,077 |
def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):
max_acc = 0
Stop_flag = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
print('-----start-------')
net.to(device)
torch.backends.cudnn.benchmark = True
batch_size = dataloaders_dict["train"].batch_size
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
net.train()
else:
net.eval()
epoch_loss = 0.0
epoch_corrects = 0
iteration = 1
for batch in(dataloaders_dict[phase]):
inputs = batch.Text[0].to(device)
labels = batch.Label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = net(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
if(iteration % 50 == 0):
acc =(torch.sum(preds == labels.data)
).double() /batch_size
print('イテレーション {} || Loss: {:.4f} || 10iter.|| 本イテレーションの正解率:{}'.format(
iteration, loss.item() , acc))
iteration += 1
epoch_loss += loss.item() * batch_size
epoch_corrects += torch.sum(preds == labels.data)
epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)
epoch_acc = epoch_corrects.double(
)/ len(dataloaders_dict[phase].dataset)
print('Epoch {}/{} | {:^5} | Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, num_epochs,
phase, epoch_loss, epoch_acc))
if phase == "val":
if epoch_acc < max_acc:
count += 1
if count >= 3:
Stop_flag = True
else:
count = 0
max_acc = epoch_acc
print(count)
if Stop_flag:
break
return net<train_model>
|
csv_data=temp.to_csv('pred.csv',index=False )
|
Digit Recognizer
|
12,216,311 |
num_epochs = 50
net_trained = train_model(net, dataloaders_dict,
criterion, optimizer, num_epochs=num_epochs )<load_from_csv>
|
%matplotlib inline
|
Digit Recognizer
|
12,216,311 |
sample_submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
sample_submission["target"] = ans_list
sample_submission<save_to_csv>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,216,311 |
sample_submission.to_csv("submission_plus.csv", index=False )<import_modules>
|
Y_train = train['label']
X_train = train.drop(labels=['label'], axis=1)
del train
|
Digit Recognizer
|
12,216,311 |
import pandas as pd
import numpy as np
import spacy
import re
import string<load_from_csv>
|
X_train = X_train / 255
test = test / 255
|
Digit Recognizer
|
12,216,311 |
train = pd.read_csv(".. /input/nlp-getting-started/train.csv")
test = pd.read_csv(".. /input/nlp-getting-started/test.csv")
submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv" )<categorify>
|
X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
12,216,311 |
def clean_text(text):
text = str(text ).lower()
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('
', '', text)
return text<feature_engineering>
|
model = Sequential()
model.add(Conv2D(input_shape=(28,28,1), kernel_size=(5,5),
filters=20, activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Conv2D(kernel_size=(5,5), filters=50, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
12,216,311 |
train["text"] = train["text"].apply(lambda x:clean_text(x))
test["text"] = test["text"].apply(lambda x:clean_text(x))<count_unique_values>
|
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'] )
|
Digit Recognizer
|
12,216,311 |
len(train["keyword"].unique() )<count_unique_values>
|
history = model.fit(X_train, Y_train, validation_split=0.1,
epochs=100, batch_size=64 )
|
Digit Recognizer
|
12,216,311 |
len(train["location"].unique() )<load_pretrained>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
12,216,311 |
<define_variables><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),
results],axis = 1)
submission.to_csv("cnn_mnist_datagen20210209.csv",index=False )
|
Digit Recognizer
|
12,099,647 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<concatenate>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
|
Digit Recognizer
|
12,099,647 |
def concat_keyword_text(row):
return(str(row["text"])+ " " + str(row["keyword"]))<feature_engineering>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
12,099,647 |
train_samples = train.apply(concat_keyword_text, axis = 1)
train_samples.head()<init_hyperparams>
|
X_train = train.iloc[:,1:]
y_train = train.iloc[:,0]
|
Digit Recognizer
|
12,099,647 |
vectorizer = TextVectorization()
text_ds = tf.data.Dataset.from_tensor_slices(train_samples ).batch(128)
vectorizer.adapt(text_ds )<feature_engineering>
|
X_train = X_train.values.reshape(-1, 28, 28, 1)/255.
test = test.values.reshape(-1, 28, 28, 1)/255.
y_train = to_categorical(y_train, 10 )
|
Digit Recognizer
|
12,099,647 |
voc = vectorizer.get_vocabulary()<define_variables>
|
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
shear_range=0.2
)
|
Digit Recognizer
|
12,099,647 |
num_tokens = len(voc)
embedding_dim = len(nlp('The' ).vector)
embedding_matrix = np.zeros(( num_tokens, embedding_dim))<feature_engineering>
|
def create_model() :
model = Sequential()
model.add(Conv2D(32,(3,3), padding='same', input_shape=X_train.shape[1:], activation='relu'))
model.add(Conv2D(32,(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3,3), padding='same', activation='relu'))
model.add(Conv2D(128,(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
Digit Recognizer
|
12,099,647 |
for i, word in enumerate(voc):
embedding_matrix[i] = nlp(word ).vector<feature_engineering>
|
EPOCHS = 30
BATCH_SIZE = 50
ENSEMBLES = 5
result_list = []
histories = []
results = np.zeros(( test.shape[0],10))
callback_list = [
ReduceLROnPlateau(monitor='val_loss', factor=0.25, patience=2),
EarlyStopping(monitor='val_loss', min_delta=0.0005, patience=4)
]
for i in range(ENSEMBLES):
X_train_tmp, X_val, y_train_tmp, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=i)
model = create_model()
print('training No.', i)
history = model.fit(datagen.flow(X_train_tmp, y_train_tmp, batch_size=BATCH_SIZE),
verbose=0,
epochs=EPOCHS,
callbacks=callback_list,
validation_data=(X_val, y_val),
steps_per_epoch=X_train_tmp.shape[0] // BATCH_SIZE)
histories.append(history)
result = model.predict(test)
results += result
result_list.append(result )
|
Digit Recognizer
|
12,099,647 |
<categorify><EOS>
|
results = np.argmax(results, axis=1)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1,28001), name='ImageID'), results], axis=1)
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
11,964,083 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<randomize_order>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
|
Digit Recognizer
|
11,964,083 |
df_train = train.sample(frac=0.7, random_state=0)
df_valid = train.drop(df_train.index )<prepare_x_and_y>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
11,964,083 |
X_train = df_train.drop(['target'], axis=1)
X_valid = df_valid.drop(['target'], axis=1)
y_train = df_train['target']
y_valid = df_valid['target']<data_type_conversions>
|
X_train = train.iloc[:,1:]
y_train = train.iloc[:,0]
|
Digit Recognizer
|
11,964,083 |
x_train = vectorizer(np.array([[s] for s in X_train["text"]])).numpy()
x_valid = vectorizer(np.array([[s] for s in X_valid["text"]])).numpy()<categorify>
|
X_train = X_train.values.reshape(-1, 28, 28, 1)/255.
test = test.values.reshape(-1, 28, 28, 1)/255.
y_train = to_categorical(y_train, 10 )
|
Digit Recognizer
|
11,964,083 |
y_train_ = np_utils.to_categorical(y_train.values)
y_valid_ = np_utils.to_categorical(y_valid.values )<choose_model_class>
|
random_seed = 0
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=random_seed )
|
Digit Recognizer
|
11,964,083 |
int_sequences_input = keras.Input(shape=(None,), dtype="int64")
embedded_sequences = embedding_layer(int_sequences_input)
x = layers.Conv1D(64, 5, activation="relu",padding='same' )(embedded_sequences)
x = layers.MaxPooling1D(3 )(x)
x = layers.Conv1D(32, 5, activation="relu",padding='same' )(x)
x = layers.MaxPooling1D(3 )(x)
x = layers.Conv1D(16, 5, activation="relu",padding='same' )(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(16, activation="relu" )(x)
x = layers.Dropout(0.5 )(x)
preds = layers.Dense(2, activation="softmax" )(x)
model = keras.Model(int_sequences_input, preds)
model.summary()<train_model>
|
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
|
Digit Recognizer
|
11,964,083 |
early_stopping = callbacks.EarlyStopping(
min_delta=0.001,
patience=20,
restore_best_weights=True,
)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics ='accuracy'
)
history = model.fit(
x_train, y_train_,
validation_data=(x_valid, y_valid_),
batch_size=128,
epochs=500,
callbacks=[early_stopping],
verbose=1,
)<data_type_conversions>
|
model = Sequential()
model.add(Conv2D(32,(5,5), padding='same', input_shape=X_train.shape[1:], activation='relu'))
model.add(Conv2D(32,(5,5), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()
|
Digit Recognizer
|
11,964,083 |
x_test = vectorizer(np.array([[s] for s in test["text"]])).numpy()<predict_on_test>
|
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
11,964,083 |
predictions = model.predict(x_test )<load_from_csv>
|
EPOCHS = 30
BATCH_SIZE = 20
callback_list = [
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=1),
EarlyStopping(monitor='val_loss', min_delta=0.0005, patience=4)
]
history = model.fit(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
epochs=EPOCHS,
callbacks=callback_list,
validation_data=(X_val, y_val),
steps_per_epoch=X_train.shape[0] // BATCH_SIZE )
|
Digit Recognizer
|
11,964,083 |
sub = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv')
sub.head()<save_to_csv>
|
results = model.predict(test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1,28001), name='ImageID'), results], axis=1)
submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
8,042,252 |
submission = pd.DataFrame({"id": test.iloc[:,0].values,"target": np.argmax(predictions,axis=1)})
submission.to_csv("submission.csv", index=False)
submission.head()<install_modules>
|
%%script false --no-raise-error
print(device_lib.list_local_devices())
|
Digit Recognizer
|
8,042,252 |
!pip install transformers==3.5.1
!pip install pyspellchecker
!pip install -U joblib textblob
!python -m textblob.download_corpora<import_modules>
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split, StratifiedKFold
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, BatchNormalization, Dropout, Flatten, Dense
from keras.callbacks import LearningRateScheduler
import tensorflow as tf
|
Digit Recognizer
|
8,042,252 |
import pandas as pd
import torchtext
from transformers import BertTokenizer, BertForMaskedLM, BertConfig
import transformers
import torch
from torch.utils.data import Dataset, DataLoader
from torch import optim
from torch import cuda
from sklearn.model_selection import train_test_split
import re
import string
from joblib import Parallel, delayed
from textblob import TextBlob
from textblob.translate import NotTranslated
from time import sleep<load_from_csv>
|
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e )
|
Digit Recognizer
|
8,042,252 |
train_val_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<feature_engineering>
|
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer//test.csv' )
|
Digit Recognizer
|
8,042,252 |
train_val_df = train_val_df.loc[:,["text","target"]]
test_df = test_df.loc[:,["text"]]
test_df["target"] = [0]*len(test_df["text"] )<define_variables>
|
y = train.pop('label')
train_y = to_categorical(y)
train_X = train/255
test_X = test/255
train_X = train_X.to_numpy()
test_X = test_X.to_numpy()
|
Digit Recognizer
|
8,042,252 |
check_df = train_val_df<define_variables>
|
sub_test_X, sub_train_X, sub_test_y, sub_train_y = train_test_split(train_X, train_y,
train_size=0.2, stratify=train_y )
|
Digit Recognizer
|
8,042,252 |
languages = ["de"]
parallel = Parallel(n_jobs=-1, backend="threading", verbose=5 )<categorify>
|
img_gen = ImageDataGenerator(rotation_range = 12, width_shift_range=.12, height_shift_range=.12,
zoom_range=.12 )
|
Digit Recognizer
|
8,042,252 |
def translate_text(comment, language):
if hasattr(comment, "decode"):
comment = comment.decode("utf-8")
text = TextBlob(comment)
try:
text = text.translate(to=language)
sleep(2.0)
text = text.translate(to="en")
sleep(2.0)
except NotTranslated:
pass
return str(text )<save_to_csv>
|
def build_model(save = False):
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation = 'relu', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation = 'relu', padding = 'same'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation = 'relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation = 'relu', padding = 'same'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 3, activation = 'relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(10, activation ='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def train_model(model, train_X, train_y, save = False):
sub_test_X, sub_train_X, sub_test_y, sub_train_y = train_test_split(train_X, train_y,
train_size=0.2, stratify=train_y)
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
start = time.clock()
model.fit_generator(img_gen.flow(sub_train_X, sub_train_y), epochs=10,
steps_per_epoch=sub_train_X.shape[0]//64,
validation_data =(sub_test_X,sub_test_y), callbacks=[annealer])
print(f'Overall it takes {time.clock() -start} sec')
if save ==True:
model.save('/kaggle/input/trained-model/model.h5')
def load_my_model() :
re_model = load_model('/kaggle/input/trained-model/first_model.h5')
return re_model
|
Digit Recognizer
|
8,042,252 |
comments_list = check_df["text"]
for language in languages:
print('Translate comments using "{0}" language'.format(language))
translated_data = parallel(delayed(translate_text )(comment, language)for comment in comments_list)
check_df['text'] = translated_data
result_path = os.path.join("train_val_" + language + ".csv")
check_df.to_csv(result_path, index=False )<load_from_csv>
|
num_models = 3
models = [0]*num_models
for i in range(num_models):
models[i] = build_model()
train_model(models[i],train_X, train_y)
|
Digit Recognizer
|
8,042,252 |
train_val_de_df = pd.read_csv("./train_val_de.csv")
train_concat_df = train_val_de_df<concatenate>
|
prediction = np.zeros(( test_X.shape[0],10))
for i in range(len(models)) :
prediction += models[i].predict(test_X )
|
Digit Recognizer
|
8,042,252 |
print(train_concat_df )<concatenate>
|
predict = np.argmax(prediction, axis =1)
predict = np.vstack(( np.arange(predict.shape[0])+1, predict)).T
|
Digit Recognizer
|
8,042,252 |
train_val_df = pd.concat([train_val_df,train_concat_df] )<load_pretrained>
|
submission = pd.DataFrame(data=predict, columns=['imageid', 'label'] )
|
Digit Recognizer
|
8,042,252 |
tokenizer = BertTokenizer.from_pretrained('bert-base-cased' )<import_modules>
|
submission.to_csv('submit.csv',index=False )
|
Digit Recognizer
|
11,813,917 |
print(torch.__version__ )<feature_engineering>
|
%matplotlib inline
|
Digit Recognizer
|
11,813,917 |
def remove_URL(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'', text)
train_val_df['text'] = train_val_df['text'].apply(lambda x : remove_URL(x))
test_df['text'] = test_df['text'].apply(lambda x : remove_URL(x))
def remove_html(text):
html = re.compile(r'<.*?>')
return html.sub(r'',text)
train_val_df['text'] = train_val_df['text'].apply(lambda x : remove_html(x))
test_df['text'] = test_df['text'].apply(lambda x : remove_html(x))
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
train_val_df['text'] = train_val_df['text'].apply(lambda x: remove_emoji(x))
test_df['text'] = test_df['text'].apply(lambda x: remove_emoji(x))
def remove_punct(text):
table = str.maketrans('','', string.punctuation)
return text.translate(table)
train_val_df['text'] = train_val_df['text'].apply(lambda x : remove_punct(x))
test_df['text'] = test_df['text'].apply(lambda x : remove_punct(x))
spell = SpellChecker()
def correct_spellings(text):
corrected_text = []
misspelled_words = spell.unknown(text.split())
for word in text.split() :
if word in misspelled_words:
corrected_text.append(spell.correction(word))
else:
corrected_text.append(word)
return " ".join(corrected_text)
train_val_df['text'] = train_val_df['text'].apply(lambda x : correct_spellings(x))
test_df['text'] = test_df['text'].apply(lambda x : correct_spellings(x))<save_to_csv>
|
print("Tensorflow version " + tf.__version__)
np.random.seed(42 )
|
Digit Recognizer
|
11,813,917 |
test_df.to_csv("test.tsv", sep='\t', index=False, header=None)
print(test_df.shape)
train_val_df.to_csv("train_eval.tsv", sep='\t', index=False, header=None)
print(train_val_df.shape)
<categorify>
|
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync )
|
Digit Recognizer
|
11,813,917 |
max_length = 100
def tokenizer_100(input_text):
return tokenizer.encode(input_text, max_length=100, return_tensors='pt')[0]
TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_100, use_vocab=False, lower=False,
include_lengths=True, batch_first=True, fix_length=max_length, pad_token=0)
LABEL = torchtext.data.Field(sequential=False, use_vocab=False )<train_model>
|
train_data=pd.read_csv(".. /input/digit-recognizer/train.csv")
test_data=pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
11,813,917 |
dataset_train_eval, dataset_test = torchtext.data.TabularDataset.splits(
path='.', train='./train_eval.tsv', test='./test.tsv', format='tsv', fields=[('Text', TEXT),('Label', LABEL)] )<data_type_conversions>
|
Y_train=train_data['label']
X_train=train_data.drop('label', axis=1 )
|
Digit Recognizer
|
11,813,917 |
print(tokenizer.convert_ids_to_tokens(item.Text.tolist()))
print(int(item.Label))<define_variables>
|
( x_train0, y_train0),(x_test0, y_test0)= mnist.load_data()
x_train1 = np.concatenate([x_train0, x_test0], axis=0)
y_train1 = np.concatenate([y_train0, y_test0], axis=0)
X_train_keras = x_train1.reshape(-1, 28*28)
Y_train_keras = y_train1
|
Digit Recognizer
|
11,813,917 |
batch_size = 32
dl_train = torchtext.data.Iterator(
dataset_train, batch_size=batch_size, train=True)
dl_eval = torchtext.data.Iterator(
dataset_eval, batch_size=batch_size, train=False, sort=False)
dl_test = torchtext.data.Iterator(
dataset_test, batch_size=batch_size, train=False, sort=False)
dataloaders_dict = {"train": dl_train, "val": dl_eval}<import_modules>
|
X_train = np.concatenate(( X_train.values, X_train_keras))
Y_train = np.concatenate(( Y_train, Y_train_keras))
|
Digit Recognizer
|
11,813,917 |
print(transformers.__version__ )<load_pretrained>
|
unique, counts = np.unique(Y_train, return_counts=True)
dict(zip(unique, counts))
|
Digit Recognizer
|
11,813,917 |
model = BertModel.from_pretrained('bert-base-cased' )<set_options>
|
X_train = X_train.astype('float32')
Y_train = Y_train.astype('float32')
test_data=test_data.astype('float32')
X_train = X_train / 255.0
test_data = test_data / 255.0
|
Digit Recognizer
|
11,813,917 |
class BertForTwitter(nn.Module):
def __init__(self):
super(BertForTwitter, self ).__init__()
self.bert = model
self.cls = nn.Linear(in_features=768, out_features=9)
nn.init.normal_(self.cls.weight, std=0.02)
nn.init.normal_(self.cls.bias, 0)
def forward(self, input_ids):
result = self.bert(input_ids)
vec_0 = result[0]
vec_0 = vec_0[:, 0, :]
vec_0 = vec_0.view(-1, 768)
output = self.cls(vec_0)
return output<train_on_grid>
|
Y_train=to_categorical(Y_train, num_classes=10)
print(f"Label size {Y_train.shape}" )
|
Digit Recognizer
|
11,813,917 |
net = BertForTwitter()
net.train()
print('ネットワーク設定完了' )<categorify>
|
X_train, X_val, Y_train, Y_val=train_test_split(X_train, Y_train, test_size=0.3, random_state=42)
|
Digit Recognizer
|
11,813,917 |
for param in net.parameters() :
param.requires_grad = False
for param in net.bert.encoder.layer[-1].parameters() :
param.requires_grad = True
for param in net.cls.parameters() :
param.requires_grad = True<choose_model_class>
|
model= Sequential()
model.add(Conv2D(input_shape=(28,28,1), filters=32, kernel_size=(5,5), padding='Same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.10))
model.add(Conv2D(filters=32, kernel_size=(5,5), padding='Same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.10))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding='Same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=256, kernel_size =(3,3), activation="relu"))
model.add(BatchNormalization())
model.add(Conv2D(filters=512, kernel_size =(3,3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
model.summary()
|
Digit Recognizer
|
11,813,917 |
optimizer = optim.Adam([
{'params': net.bert.encoder.layer[-1].parameters() , 'lr': 5e-5},
{'params': net.cls.parameters() , 'lr': 1e-4}
])
criterion = nn.CrossEntropyLoss()
<train_model>
|
optimizer=Adam(lr=0.001,decay=0.0 )
|
Digit Recognizer
|
11,813,917 |
def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):
max_acc = 0
Stop_flag = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
print('-----start-------')
net.to(device)
torch.backends.cudnn.benchmark = True
batch_size = dataloaders_dict["train"].batch_size
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
net.train()
else:
net.eval()
epoch_loss = 0.0
epoch_corrects = 0
iteration = 1
for batch in(dataloaders_dict[phase]):
inputs = batch.Text[0].to(device)
labels = batch.Label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = net(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
if(iteration % 50 == 0):
acc =(torch.sum(preds == labels.data)
).double() /batch_size
print('イテレーション {} || Loss: {:.4f} || 10iter.|| 本イテレーションの正解率:{}'.format(
iteration, loss.item() , acc))
iteration += 1
epoch_loss += loss.item() * batch_size
epoch_corrects += torch.sum(preds == labels.data)
epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)
epoch_acc = epoch_corrects.double(
)/ len(dataloaders_dict[phase].dataset)
print('Epoch {}/{} | {:^5} | Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, num_epochs,
phase, epoch_loss, epoch_acc))
if phase == "val":
if epoch_acc < max_acc:
count += 1
if count >= 3:
Stop_flag = True
else:
count = 0
max_acc = epoch_acc
print(count)
if Stop_flag:
break
return net<train_model>
|
model.compile(optimizer= optimizer, loss='categorical_crossentropy', metrics=['accuracy'] )
|
Digit Recognizer
|
11,813,917 |
num_epochs = 100
net_trained = train_model(net, dataloaders_dict,
criterion, optimizer, num_epochs=num_epochs )<load_from_csv>
|
learning_rate_redcuing=ReduceLROnPlateau(monitor='val_accuracy',
patience=5,
verbose=1,
factor=0.5,
min_lr=0.0001 )
|
Digit Recognizer
|
11,813,917 |
sample_submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
sample_submission["target"] = ans_list
sample_submission<save_to_csv>
|
epochs = 30
batch_size = 32
|
Digit Recognizer
|
11,813,917 |
sample_submission.to_csv("submission_plus.csv", index=False )<install_modules>
|
imagegen=ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
imagegen.fit(X_train )
|
Digit Recognizer
|
11,813,917 |
!pip install nlpaug<load_pretrained>
|
history=model.fit_generator(imagegen.flow(X_train, Y_train,batch_size=batch_size),
epochs=epochs,
validation_data=(X_val, Y_val),
verbose=1,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[learning_rate_redcuing] )
|
Digit Recognizer
|
11,813,917 |
!kaggle datasets download -d rtatman/glove-global-vectors-for-word-representation<install_modules>
|
model.save("MNIST_CNN_Model.h5")
model.save_weights("MNIST_CNN_Model_weights.h5" )
|
Digit Recognizer
|
11,813,917 |
!pip install nltk
!pip install gensim<load_pretrained>
|
results = model.predict(test_data)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
11,813,917 |
<load_from_url><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("sample_submission.csv",index=False)
submission.head()
|
Digit Recognizer
|
12,714,797 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
|
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' )
|
Digit Recognizer
|
12,714,797 |
plt.style.use('ggplot')
stop=set(stopwords.words('english'))
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
plt.style.use('ggplot')
stop=set(stopwords.words('english'))
warnings.filterwarnings("ignore")
nltk.download('brown', quiet=True)
nltk.download('universal_tagset', quiet=True)
SEED = 1337<load_from_csv>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,714,797 |
df_train = pd.read_csv('.. /input/nlp-getting-started/train.csv', dtype={'id': np.int16, 'target': np.int8})
df_test = pd.read_csv('.. /input/nlp-getting-started/test.csv', dtype={'id': np.int16})
print('Training Set Shape = {}'.format(df_train.shape))
print('Training Set Memory Usage = {:.2f} MB'.format(df_train.memory_usage().sum() / 1024**2))
print('Test Set Shape = {}'.format(df_test.shape))
print('Test Set Memory Usage = {:.2f} MB'.format(df_test.memory_usage().sum() / 1024**2))<count_unique_values>
|
Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1 )
|
Digit Recognizer
|
12,714,797 |
print(f'Number of unique values in keyword = {df_train["keyword"].nunique() }(Training)- {df_test["keyword"].nunique() }(Test)')
print(f'Number of unique values in location = {df_train["location"].nunique() }(Training)- {df_test["location"].nunique() }(Test)' )<string_transform>
|
X_train = X_train / 255.0
test = test / 255.0
|
Digit Recognizer
|
12,714,797 |
def create_corpus(target):
corpus=[]
for x in df_train[df_train['target']==target]['text'].str.split() :
for i in x:
corpus.append(i)
return corpus<count_values>
|
Y_train = to_categorical(Y_train, num_classes = 10 )
|
Digit Recognizer
|
12,714,797 |
counter=Counter(corpus)
most=counter.most_common()
x=[]
y=[]
for word,count in most[:40]:
if(word not in stop):
x.append(word)
y.append(count )<feature_engineering>
|
random_seed = 2
|
Digit Recognizer
|
12,714,797 |
def get_top_tweet_bigrams(corpus, n=None):
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx])for word, idx in vec.vocabulary_.items() ]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]<feature_engineering>
|
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.15, random_state=random_seed )
|
Digit Recognizer
|
12,714,797 |
df_train['word_count'] = df_train['text'].apply(lambda x: len(str(x ).split()))
df_test['word_count'] = df_test['text'].apply(lambda x: len(str(x ).split()))
df_train['unique_word_count'] = df_train['text'].apply(lambda x: len(set(str(x ).split())))
df_test['unique_word_count'] = df_test['text'].apply(lambda x: len(set(str(x ).split())))
df_train['stop_word_count'] = df_train['text'].apply(lambda x: len([w for w in str(x ).lower().split() if w in STOPWORDS]))
df_test['stop_word_count'] = df_test['text'].apply(lambda x: len([w for w in str(x ).lower().split() if w in STOPWORDS]))
df_train['url_count'] = df_train['text'].apply(lambda x: len([w for w in str(x ).lower().split() if 'http' in w or 'https' in w]))
df_test['url_count'] = df_test['text'].apply(lambda x: len([w for w in str(x ).lower().split() if 'http' in w or 'https' in w]))
df_train['mean_word_length'] = df_train['text'].apply(lambda x: np.mean([len(w)for w in str(x ).split() ]))
df_test['mean_word_length'] = df_test['text'].apply(lambda x: np.mean([len(w)for w in str(x ).split() ]))
df_train['char_count'] = df_train['text'].apply(lambda x: len(str(x)))
df_test['char_count'] = df_test['text'].apply(lambda x: len(str(x)))
df_train['punctuation_count'] = df_train['text'].apply(lambda x: len([c for c in str(x)if c in string.punctuation]))
df_test['punctuation_count'] = df_test['text'].apply(lambda x: len([c for c in str(x)if c in string.punctuation]))
df_train['hashtag_count'] = df_train['text'].apply(lambda x: len([c for c in str(x)if c == '
df_test['hashtag_count'] = df_test['text'].apply(lambda x: len([c for c in str(x)if c == '
df_train['mention_count'] = df_train['text'].apply(lambda x: len([c for c in str(x)if c == '@']))
df_test['mention_count'] = df_test['text'].apply(lambda x: len([c for c in str(x)if c == '@']))<categorify>
|
model = Sequential()
DefaultConv2D = partial(keras.layers.Conv2D, kernel_size =(3,3),padding = 'Same', activation ='relu')
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(DefaultConv2D(filters = 64))
model.add(DefaultConv2D(filters = 64))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.