kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
9,988,485 |
params = {
"C": [1e-3, 1e-2, 1e-1, 1],
"max_iter": [30000]
}
classifier = LinearSVC
classifier_df = trainClassifier(X_train, y_train, "LinearSVC", classifier, params, accuracy_score)
results = results.append(classifier_df )<train_model>
|
sns.set(style = 'white' , context = 'notebook' , palette = 'deep')
rcParams['figure.figsize'] = 10,6
|
Digit Recognizer
|
9,988,485 |
params = {
"kernel" : ["rbf"],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 1, 10],
"gamma": [1e-3, 1e-2, 1e-1, 1, 10]
}
classifier = SVC
classifier_df = trainClassifier(X_train, y_train, "SVC", classifier, params, accuracy_score)
results = results.append(classifier_df )<choose_model_class>
|
data_path = ".. /input/digit-recognizer/"
train = pd.read_csv(join(data_path,"train.csv"))
test = pd.read_csv(join(data_path,"test.csv"))
|
Digit Recognizer
|
9,988,485 |
params = {
"C": [1e-3, 1e-2, 1e-1, 1, 10]
}
classifier = LogisticRegression
classifier_df = trainClassifier(X_train, y_train, "LogisticRegression", classifier, params, accuracy_score)
results = results.append(classifier_df )<choose_model_class>
|
def process_data(data):
data = data/255.0
data = data.values.reshape(-1,28,28,1)
return data
X_train = process_data(X_train)
Y_train = to_categorical(Y_train,num_classes = 10 )
|
Digit Recognizer
|
9,988,485 |
params = {"max_depth": [3, 5, 7, 10, None],
"n_estimators":[3, 5,10, 25, 50],
"max_features": [1, 2, "auto"]}
classifier = RandomForestClassifier
classifier_df = trainClassifier(X_train, y_train, "RandomForests", classifier, params, accuracy_score)
results = results.append(classifier_df )<prepare_output>
|
num = 5
model = [0]*num
for i in range(num):
model[i] = Sequential()
model[i].add(Conv2D(filters = 32, kernel_size =(5,5), padding = "same", activation = "relu", input_shape =(28,28,1)))
model[i].add(BatchNormalization())
model[i].add(Conv2D(filters = 32, kernel_size =(5,5), padding = "same", activation = "relu"))
model[i].add(BatchNormalization())
model[i].add(MaxPool2D(pool_size =(2,2)))
model[i].add(Dropout(0.25))
model[i].add(Conv2D(filters = 64, kernel_size =(3,3), padding = "same", activation = "relu"))
model[i].add(BatchNormalization())
model[i].add(Conv2D(filters = 64, kernel_size =(3,3), padding = "same", activation = "relu"))
model[i].add(BatchNormalization())
model[i].add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model[i].add(Dropout(0.25))
model[i].add(Conv2D(filters = 64, kernel_size =(3,3), padding = "same", activation = "relu"))
model[i].add(BatchNormalization())
model[i].add(Dropout(0.25))
model[i].add(Flatten())
model[i].add(Dense(1024, activation = "relu"))
model[i].add(BatchNormalization())
model[i].add(Dropout(0.25))
model[i].add(Dense(10, activation = "softmax"))
model[i].compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"])
model[0].summary()
|
Digit Recognizer
|
9,988,485 |
results = results.set_index("model_name")
results<save_to_csv>
|
datagen = ImageDataGenerator(featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = False,
vertical_flip = False
)
|
Digit Recognizer
|
9,988,485 |
classifier = RandomForestClassifier
best_params = results.loc["RandomForests"]["parameters"]
submission_model = classifier(**best_params)
submission_model.fit(X_train, y_train)
X_test = pd.get_dummies(test_data[features])
X_test = imputer.transform(X_test)
X_test = scaler.transform(X_test)
y_hat = submission_model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_hat})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!")
<load_from_csv>
|
epochs = 32
batch_size = 256
history = [0]*num
for i in range(num):
random_seed = i
X_train_, X_val_, Y_train_, Y_val_ = train_test_split(X_train, Y_train, test_size = 0.2, random_state=random_seed)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
early_stopping = EarlyStopping(monitor='val_accuracy',
min_delta=0,
patience=10,
verbose =1,
mode='auto')
history[i] = model[i].fit_generator(datagen.flow(X_train_, Y_train_, batch_size = batch_size), epochs = epochs, validation_data =(X_val_,Y_val_), verbose = 1, steps_per_epoch = X_train.shape[0]//batch_size, callbacks=[learning_rate_reduction,early_stopping])
print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format(i+1,epochs,max(history[i].history['accuracy']),max(history[i].history['val_accuracy'])) )
|
Digit Recognizer
|
9,988,485 |
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()<load_from_csv>
|
def predict(X_data):
results = np.zeros(( X_data.shape[0],10))
for j in range(num):
results = results + model[j].predict(X_data)
return results
|
Digit Recognizer
|
9,988,485 |
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()<create_dataframe>
|
test = process_data(test)
results = predict(test)
results = np.argmax(results,axis = 1 )
|
Digit Recognizer
|
9,988,485 |
<define_variables><EOS>
|
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("keras_cnn_mnist_aug_ensemble2.csv",index=False )
|
Digit Recognizer
|
10,514,367 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
|
mnist_test = pd.read_csv(".. /input/mnist-in-csv/mnist_test.csv")
mnist_train = pd.read_csv(".. /input/mnist-in-csv/mnist_train.csv" )
|
Digit Recognizer
|
10,514,367 |
num_pipeline = Pipeline([
("imputer", SimpleImputer(strategy = "median")) ,
("scaler", StandardScaler()),
] )<categorify>
|
sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )
|
Digit Recognizer
|
10,514,367 |
cat_pipeline = Pipeline([
("imputer", SimpleImputer(strategy = "most_frequent")) ,
("cat_encoder", OneHotEncoder(sparse = False)) ,
] )<feature_engineering>
|
test['dataset'] = 'test'
|
Digit Recognizer
|
10,514,367 |
preprocess_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", cat_pipeline, cat_attribs),
] )<normalization>
|
train['dataset'] = 'train'
|
Digit Recognizer
|
10,514,367 |
X_train = preprocess_pipeline.fit_transform(train_data[num_attribs + cat_attribs])
X_train<prepare_x_and_y>
|
dataset = pd.concat([train.drop('label', axis=1), test] ).reset_index()
|
Digit Recognizer
|
10,514,367 |
y_train = train_data["Survived"]<train_model>
|
mnist = pd.concat([mnist_train, mnist_test] ).reset_index(drop=True)
labels = mnist['label'].values
mnist.drop('label', axis=1, inplace=True)
mnist.columns = cols
|
Digit Recognizer
|
10,514,367 |
forest_clf = RandomForestClassifier(n_estimators = 100, random_state = 42)
forest_clf.fit(X_train, y_train )<predict_on_test>
|
idx_mnist = mnist.sort_values(by=list(mnist.columns)).index
dataset_from = dataset.sort_values(by=list(mnist.columns)) ['dataset'].values
original_idx = dataset.sort_values(by=list(mnist.columns)) ['index'].values
|
Digit Recognizer
|
10,514,367 |
X_test = preprocess_pipeline.fit_transform(test_data[num_attribs + cat_attribs])
y_pred = forest_clf.predict(X_test )<compute_train_metric>
|
for i in range(len(idx_mnist)) :
if dataset_from[i] == 'test':
sample_submission.loc[original_idx[i], 'Label'] = labels[idx_mnist[i]]
|
Digit Recognizer
|
10,514,367 |
forest_scores = cross_val_score(forest_clf, X_train, y_train, cv = 10)
forest_scores.mean()<compute_train_metric>
|
sample_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
9,959,337 |
svm_clf = SVC(gamma = "auto")
svm_scores = cross_val_score(svm_clf, X_train, y_train, cv = 10)
svm_scores.mean()<predict_on_test>
|
%matplotlib inline
|
Digit Recognizer
|
9,959,337 |
svm_clf.fit(X_train, y_train)
y_pred = svm_clf.predict(X_test )<save_to_csv>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
9,959,337 |
output = pd.DataFrame({'PassengerId': test_data.index, 'Survived': y_pred})
output.to_csv('submission.csv', index = False)
print("Your submission was successfully saved!" )<install_modules>
|
X = train.drop('label', axis = 1)
y = train['label']
del train
|
Digit Recognizer
|
9,959,337 |
!pip install tensorflow_hub
!pip install bert-for-tf2
!pip install tensorflow
!pip install sentencepiece
!pip install transformers<feature_engineering>
|
X = X / 255.0
test = test / 255.0
|
Digit Recognizer
|
9,959,337 |
BertTokenizer = bert.bert_tokenization.FullTokenizer
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1", trainable=False)
vocabulary_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = BertTokenizer(vocabulary_file, to_lower_case )<string_transform>
|
labels = pd.get_dummies(y)
labels = labels.values
|
Digit Recognizer
|
9,959,337 |
stopwrds = set(stopwords.words('english'))
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
def preprocess_text(sen):
sentence = emoji.demojize(sen)
sentence = re.sub(r"http:\S+",'',sentence)
sentence = ' '.join([x for x in nltk.word_tokenize(sentence)if x not in stopwrds])
sentence = remove_tags(sentence)
sentence = re.sub(r'['+string.punctuation+']','',sentence)
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
sentence = re.sub(r'\s+', ' ', sentence)
return sentence.strip()<categorify>
|
X_train, X_val, y_train, y_val = train_test_split(X, labels, test_size = 0.1,random_state=42 )
|
Digit Recognizer
|
9,959,337 |
def tokenize_bert(data):
tokenized = data.apply(( lambda x: tokenizer.convert_tokens_to_ids(['[CLS]'])+ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(x))))
return tokenized
def pad_mask(data_tokenized,max_len):
padded = tf.keras.preprocessing.sequence.pad_sequences(data_tokenized, maxlen=max_len, dtype='int32', padding='post',value=0.0)
masked = np.where(padded!=0,1,0)
return padded, masked
def get_max_len(data):
max_len = 0
for val in data:
tmp = len(tokenizer.tokenize(val))
if tmp > max_len:
max_len = tmp
return max_len<categorify>
|
from keras.preprocessing.image import ImageDataGenerator
|
Digit Recognizer
|
9,959,337 |
def encode(df):
tweet = tf.ragged.constant([tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s)) for s in df])
cls1 = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*tweet.shape[0]
input_word_ids = tf.concat([cls1, tweet], axis=-1)
input_mask = tf.ones_like(input_word_ids ).to_tensor()
type_cls = tf.zeros_like(cls1)
type_tweets = tf.zeros_like(tweet)
input_type_ids = tf.concat([type_cls, type_tweets], axis=-1 ).to_tensor()
inputs = {
'input_ids': input_word_ids.to_tensor() ,
'input_mask': input_mask,
'input_type_ids': input_type_ids}
return inputs<load_from_csv>
|
datagen = ImageDataGenerator(shear_range= 0.2, zoom_range= 0.2)
datagen.fit(X_train )
|
Digit Recognizer
|
9,959,337 |
finetune_train = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-hatred-speech/train.csv',encoding="utf-8")
finetune_test = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-hatred-speech/test.csv',encoding="utf-8")
finetune_train["hashtags"]=finetune_train["tweet"].apply(lambda x:re.findall(r"
finetune_test["hashtags"]=finetune_test["tweet"].apply(lambda x:re.findall(r"
finetune_train["hashtags"]=finetune_train["hashtags"].apply(lambda x: ' '.join(x))
finetune_test["hashtags"]=finetune_test["hashtags"].apply(lambda x: ' '.join(x))
finetune_train = finetune_train.rename(columns={'label':'target'})
finetune_test = finetune_test.rename(columns={'label':'target'})
finetune_train.info()<load_from_csv>
|
from keras.layers import Dropout, Conv2D, MaxPool2D, Dense, Flatten
from keras.models import Sequential
|
Digit Recognizer
|
9,959,337 |
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv",encoding="utf-8")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv",encoding="utf-8")
train["hashtags"]=train["text"].apply(lambda x:re.findall(r"
test["hashtags"]=test["text"].apply(lambda x:re.findall(r"
train["hashtags"]=train["hashtags"].apply(lambda x: ' '.join(x))
test["hashtags"]=test["hashtags"].apply(lambda x: ' '.join(x))
train.info()<feature_engineering>
|
model = Sequential()
model.add(Conv2D(filters = 64, kernel_size = 5,activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 64, kernel_size = 5,activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters = 32, kernel_size = 3,activation ='relu'))
model.add(Conv2D(filters = 32, kernel_size = 3,activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
9,959,337 |
finetune_train["clean"] = finetune_train["tweet"].apply(lambda x: preprocess_text(x.lower()))
finetune_test["clean"] = finetune_test["tweet"].apply(lambda x: preprocess_text(x.lower()))
print("length of finetune train set:",len(finetune_train))
print("length of finetune test set:",len(finetune_test))<data_type_conversions>
|
model.compile(optimizer= 'adam', loss= 'categorical_crossentropy', metrics= ['accuracy'] )
|
Digit Recognizer
|
9,959,337 |
train["clean"] = train["text"].apply(lambda x: preprocess_text(x.lower()))
test["clean"] = test["text"].apply(lambda x: preprocess_text(x.lower()))
print("length of train set:",len(train))
print("length of test set:",len(test))<string_transform>
|
performance = model.fit_generator(datagen.flow(X_train,y_train, batch_size=64), epochs = 30,
validation_data =(X_val,y_val), verbose = 3 )
|
Digit Recognizer
|
9,959,337 |
def extract_features(df,test_df):
txt=' '.join(df[df["target"]==1]["clean"])
disaster_unigram=nltk.FreqDist(nltk.word_tokenize(txt))
txt=' '.join(df[df["target"]==0]["clean"])
nondisaster_unigram=nltk.FreqDist(nltk.word_tokenize(txt))
txt=' '.join(df[df["target"]==1]["clean"])
disaster_bigram=nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt)))
txt=' '.join(df[df["target"]==0]["clean"])
nondisaster_bigram=nltk.FreqDist(nltk.bigrams(nltk.word_tokenize(txt)))
txt=' '.join(df[df["target"]==1]["hashtags"])
disaster_unigram_hash=nltk.FreqDist(nltk.word_tokenize(txt))
txt=' '.join(df[df["target"]==0]["hashtags"])
nondisaster_unigram_hash=nltk.FreqDist(nltk.word_tokenize(txt))
df["unigram_disas"]=df["clean"].apply(lambda x: sum([disaster_unigram.get(wrd)for wrd in nltk.word_tokenize(x)if disaster_unigram.get(wrd)!=None])/len(disaster_unigram))
test_df["unigram_disas"]=test_df["clean"].apply(lambda x: sum([disaster_unigram.get(wrd)for wrd in nltk.word_tokenize(x)if disaster_unigram.get(wrd)!=None])/len(disaster_unigram))
df["unigram_nondisas"]=df["clean"].apply(lambda x: sum([nondisaster_unigram.get(wrd)for wrd in nltk.word_tokenize(x)if nondisaster_unigram.get(wrd)!=None])/len(nondisaster_unigram))
test_df["unigram_nondisas"]=test_df["clean"].apply(lambda x: sum([nondisaster_unigram.get(wrd)for wrd in nltk.word_tokenize(x)if nondisaster_unigram.get(wrd)!=None])/len(nondisaster_unigram))
df["unigram_disas_hash"]=df["hashtags"].apply(lambda x: sum([disaster_unigram_hash.get(wrd)for wrd in nltk.word_tokenize(x)if disaster_unigram_hash.get(wrd)!=None])/len(disaster_unigram_hash))
test_df["unigram_disas_hash"]=test_df["hashtags"].apply(lambda x: sum([disaster_unigram_hash.get(wrd)for wrd in nltk.word_tokenize(x)if disaster_unigram_hash.get(wrd)!=None])/len(disaster_unigram_hash))
df["unigram_nondisas_hash"]=df["hashtags"].apply(lambda x: sum([nondisaster_unigram_hash.get(wrd)for wrd in nltk.word_tokenize(x)if nondisaster_unigram_hash.get(wrd)!=None])/len(nondisaster_unigram_hash))
test_df["unigram_nondisas_hash"]=test_df["hashtags"].apply(lambda x: sum([nondisaster_unigram_hash.get(wrd)for wrd in nltk.word_tokenize(x)if nondisaster_unigram_hash.get(wrd)!=None])/len(nondisaster_unigram_hash))
df["bigram_disas"]=df["clean"].apply(lambda x: sum([disaster_bigram.get(wrd)for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd)!=None])/len(disaster_bigram)if x.strip() !='' else 0)
test_df["bigram_disas"]=test_df["clean"].apply(lambda x: sum([disaster_bigram.get(wrd)for wrd in nltk.bigrams(nltk.word_tokenize(x)) if disaster_bigram.get(wrd)!=None])/len(disaster_bigram)if x.strip() !='' else 0)
df["bigram_nondisas"]=df["clean"].apply(lambda x: sum([nondisaster_bigram.get(wrd)for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd)!=None])/len(nondisaster_bigram)if x.strip() !='' else 0)
test_df["bigram_nondisas"]=test_df["clean"].apply(lambda x: sum([nondisaster_bigram.get(wrd)for wrd in nltk.bigrams(nltk.word_tokenize(x)) if nondisaster_bigram.get(wrd)!=None])/len(nondisaster_bigram)if x.strip() !='' else 0)
return df,test_df<feature_engineering>
|
from sklearn.metrics import confusion_matrix, classification_report
|
Digit Recognizer
|
9,959,337 |
finetune_train,finetune_test = extract_features(finetune_train,finetune_test)
finetune_train.head(2 )<feature_engineering>
|
pred = model.predict(X_val )
|
Digit Recognizer
|
9,959,337 |
train,test = extract_features(train,test)
train.head(2 )<categorify>
|
pred_classes = np.argmax(pred,axis = 1)
y_true = np.argmax(y_val,axis = 1 )
|
Digit Recognizer
|
9,959,337 |
def build_bert(max_len):
input_ids = keras.layers.Input(shape=(max_len,), name="input_ids", dtype=tf.int32)
input_typ = keras.layers.Input(shape=(max_len,), name="input_type_ids", dtype=tf.int32)
input_mask = keras.layers.Input(shape=(max_len,), name="input_mask", dtype=tf.int32)
input_features = keras.layers.Input(shape=(6,), name="input_features", dtype=tf.float32)
bert_inputs = {"input_ids": input_ids, "input_mask": input_mask,'input_type_ids':input_typ}
bert_model = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1", trainable=True,name='keraslayer')
pooled_output, _ = bert_model([input_ids, input_mask,input_typ])
merge_two = concatenate([pooled_output, input_features])
out = keras.layers.Dense(1, activation='sigmoid' )(merge_two)
model = keras.Model(inputs=[bert_inputs,input_features],outputs=out)
return model<categorify>
|
print(classification_report(y_true, pred_classes))
|
Digit Recognizer
|
9,959,337 |
all_df = pd.concat([finetune_train,finetune_test])
max_len = get_max_len(all_df["clean"])+ 1
encode_ds_all = encode(all_df["clean"] )<categorify>
|
test_preds = model.predict(test)
test_preds = np.argmax(test_preds,axis = 1)
test_preds = pd.Series(test_preds,name="Label" )
|
Digit Recognizer
|
9,959,337 |
encode_ds_tr = {'input_ids':encode_ds_all["input_ids"][0:31962,:],
'input_mask':encode_ds_all["input_mask"][0:31962,:],
'input_type_ids':encode_ds_all["input_type_ids"][0:31962,:]}<categorify>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),test_preds],axis = 1)
submission.to_csv("cnn_mnist.csv",index=False )
|
Digit Recognizer
|
8,773,240 |
features = ['unigram_disas','unigram_nondisas','unigram_disas_hash','unigram_nondisas_hash','bigram_disas','bigram_nondisas']
encode_features_tr = all_df[features].iloc[0:31962,:]<train_on_grid>
|
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
8,773,240 |
y_enc = finetune_train["target"]
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=1e-3,decay=1e-3/64)
model.compile(optimizer=optimizer, loss=[loss, loss],metrics=["accuracy"])
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', monitor='val_accuracy', save_best_only=True)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy')
fine_history = model.fit([encode_ds_tr,encode_features_tr], y_enc, validation_split=0.34,shuffle=True,epochs=2,batch_size=64,verbose=1 )<categorify>
|
train_file = ".. /input/digit-recognizer/train.csv"
test_file = ".. /input/digit-recognizer/test.csv"
sample_submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
8,773,240 |
all_df = pd.concat([train,test])
max_len = get_max_len(train["clean"])+ 1
encode_ds_all = encode(all_df["clean"] )<categorify>
|
raw_data = np.loadtxt(train_file, skiprows=1, dtype='int', delimiter=',')
x_train, x_val, y_train, y_val = train_test_split(
raw_data[:,1:], raw_data[:,0], test_size=0.1 )
|
Digit Recognizer
|
8,773,240 |
encode_ds_tr = {'input_ids':encode_ds_all["input_ids"][0:7613,:],
'input_mask':encode_ds_all["input_mask"][0:7613,:],
'input_type_ids':encode_ds_all["input_type_ids"][0:7613,:]}
encode_ds_tr<categorify>
|
x_train = x_train.astype("float32")/255.
x_val = x_val.astype("float32")/255 .
|
Digit Recognizer
|
8,773,240 |
encode_features_tr = all_df[features].iloc[0:7613,:]<train_on_grid>
|
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
print(y_train[0] )
|
Digit Recognizer
|
8,773,240 |
y_enc = train["target"]
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=1e-5,decay=1e-5/64)
model.compile(optimizer=optimizer, loss=[loss, loss],metrics=["accuracy"])
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', monitor='val_accuracy', save_best_only=True)
earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy')
fine_history = model.fit([encode_ds_tr,encode_features_tr], y_enc, validation_split=0.34,shuffle=True,epochs=3,batch_size=64,verbose=1 )<predict_on_test>
|
model = Sequential()
model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu',
input_shape =(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 16, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 32, kernel_size =(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
|
Digit Recognizer
|
8,773,240 |
y_pred=model.predict([encode_ds_tr,encode_features_tr])
y_pred = y_pred.round()
print(classification_report(y_enc,y_pred))<categorify>
|
datagen = ImageDataGenerator(zoom_range = 0.1,
height_shift_range = 0.1,
width_shift_range = 0.1,
rotation_range = 10 )
|
Digit Recognizer
|
8,773,240 |
encode_ds_ts = {'input_ids':encode_ds_all["input_ids"][7613:,:],
'input_mask':encode_ds_all["input_mask"][7613:,:],
'input_type_ids':encode_ds_all["input_type_ids"][7613:,:]}
encode_ds_ts<define_variables>
|
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-4), metrics=["accuracy"] )
|
Digit Recognizer
|
8,773,240 |
encode_features_ts = all_df[features].iloc[7613:,:]<load_from_csv>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
|
Digit Recognizer
|
8,773,240 |
y_pred=model.predict([encode_ds_ts,encode_features_ts])
y_pred= y_pred.round()
submission=pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv')
submission['id']=test['id']
submission['target']=y_pred
submission['target']=submission['target'].astype(int)
submission.head(10)
<save_to_csv>
|
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16),
steps_per_epoch=500,
epochs=20,
verbose=2,
validation_data=(x_val[:400,:], y_val[:400,:]),
callbacks=[annealer] )
|
Digit Recognizer
|
8,773,240 |
submission.to_csv('sample_submission.csv',index=False )<set_options>
|
final_loss, final_acc = model.evaluate(x_val, y_val, verbose=0)
print("Final loss: {0:.4f}, final accuracy: {1:.4f}".format(final_loss, final_acc))
|
Digit Recognizer
|
8,773,240 |
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_colwidth', -1 )<load_from_csv>
|
y_hat = model.predict(x_val)
y_pred = np.argmax(y_hat, axis=1)
y_true = np.argmax(y_val, axis=1)
cm = confusion_matrix(y_true, y_pred)
print(cm )
|
Digit Recognizer
|
8,773,240 |
df = pd.read_csv('.. /input/nlp-getting-started/train.csv')
print(len(df))
print(df.columns)
df<categorify>
|
mnist_testset = np.loadtxt(test_file, skiprows=1, dtype='int', delimiter=',')
x_test = mnist_testset.astype("float32")
x_test = x_test.reshape(-1, 28, 28, 1)/255 .
|
Digit Recognizer
|
8,773,240 |
def clean_text(text):
text = re.sub(r'http\S+', '', text)
text = re.sub(r"(?:\@)\w+", '', text)
text = re.sub(r'[^a-zA-Z0-9'.,?$&\s]', '', text)
text = text.lower()
return text
for i in range(10):
index = np.random.randint(low=0, high=len(df))
print('Raw text:', df['text'][index])
print('Cleaned text:', clean_text(df['text'][index]))
print('Label: ', df['target'][index], '
' )<categorify>
|
y_hat = model.predict(x_test, batch_size=64 )
|
Digit Recognizer
|
8,773,240 |
def convert_to_features(data, tokenizer, max_len=None):
data = data.replace('
', '')
if max_len is not None:
tokenized = tokenizer.encode_plus(
data,
padding ='max_length',
max_length=max_len,
truncation=True,
return_tensors='np',
return_attention_mask=True,
return_token_type_ids=True,
)
else:
tokenized = tokenizer.encode_plus(
data,
return_tensors='np',
return_attention_mask=True,
return_token_type_ids=True,
)
return tokenized
def create_inputs_with_targets(x, y, tokenizer, max_len=128):
dataset_dict = {
"input_ids": [],
"attention_mask": [],
'labels': []
}
for sentence, label in tqdm(zip(x,y)) :
cleaned_sentence = clean_text(sentence)
temp = convert_to_features(cleaned_sentence, tokenizer, max_len=max_len)
dataset_dict["input_ids"].append(temp["input_ids"][0])
dataset_dict["attention_mask"].append(temp["attention_mask"][0])
dataset_dict["labels"].append(label)
x = [
np.array(dataset_dict["input_ids"]),
np.array(dataset_dict["attention_mask"]),
]
y = np.array(dataset_dict['labels'])
return x, y
def create_inputs_without_targets(x, tokenizer, max_len=128):
dataset_dict = {
"input_ids": [],
"attention_mask": [],
}
for sentence in tqdm(x):
cleaned_sentence = clean_text(sentence)
temp = convert_to_features(cleaned_sentence, tokenizer, max_len=max_len)
dataset_dict["input_ids"].append(temp["input_ids"][0])
dataset_dict["attention_mask"].append(temp["attention_mask"][0])
x = [
np.array(dataset_dict["input_ids"]),
np.array(dataset_dict["attention_mask"]),
]
return x<load_pretrained>
|
y_pred = np.argmax(y_hat,axis=1 )
|
Digit Recognizer
|
8,773,240 |
base_model = 'bert-base-uncased'
bert_tokenizer = transformers.BertTokenizer.from_pretrained(base_model)
max_len = 80<prepare_x_and_y>
|
solution = pd.DataFrame({'ImageId': sample_submission['ImageId'], 'Label': y_pred})
solution[["ImageId","Label"]].to_csv("CNNPrediction.csv", index=False)
solution.head()
|
Digit Recognizer
|
12,327,863 |
validation_data_indices = df.sample(frac=0.2 ).index
validation_df = df.loc[validation_data_indices, :].reset_index(drop=True)
train_df = df.drop(validation_data_indices, axis=0 ).reset_index(drop=True)
test_df = pd.read_csv('.. /input/nlp-getting-started/test.csv')
x_train, y_train = create_inputs_with_targets(list(train_df['text']), list(train_df['target']), tokenizer=bert_tokenizer, max_len=max_len)
x_val, y_val = create_inputs_with_targets(list(validation_df['text']), list(validation_df['target']), tokenizer=bert_tokenizer, max_len=max_len)
x_test = create_inputs_without_targets(list(test_df['text']), tokenizer=bert_tokenizer, max_len=max_len)
print('Training dataframe size: ', len(train_df))
print('Validation dataframe size: ', len(validation_df))
print('Test dataframe size: ', len(test_df))<categorify>
|
%matplotlib inline
|
Digit Recognizer
|
12,327,863 |
def create_model(model_name, max_len=128):
seed = 500
my_init = tf.keras.initializers.glorot_uniform(seed)
max_len = max_len
encoder = transformers.TFAutoModel.from_pretrained(model_name)
encoder.trainable = True
input_ids = keras.layers.Input(shape=(max_len,), dtype=tf.int32)
attention_mask = keras.layers.Input(shape=(max_len,), dtype=tf.int32)
sequence_output = encoder(input_ids, attention_mask=attention_mask)['last_hidden_state']
bi_lstm = tf.keras.layers.Bidirectional(keras.layers.LSTM(64, return_sequences=True))(sequence_output)
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3 )(concat)
output = tf.keras.layers.Dense(1, activation="sigmoid" )(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_mask], outputs=[output]
)
return model<choose_model_class>
|
train_df = pd.read_csv(".. /input/digit-recognizer/train.csv")
test_df = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,327,863 |
epochs = 20
lr = 2e-4
use_tpu = True
if use_tpu:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
with strategy.scope() :
model = create_model(base_model, max_len=max_len)
optimizer = keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=optimizer,
loss = keras.losses.BinaryCrossentropy() ,
metrics= [keras.metrics.BinaryAccuracy() ])
else:
model = create_model()
model.summary()<train_model>
|
train_df['label'].value_counts(normalize=True)
|
Digit Recognizer
|
12,327,863 |
my_callbacks = [keras.callbacks.EarlyStopping(monitor='val_binary_accuracy', patience=2, mode='max', restore_best_weights=True)]
hist = model.fit(x_train,
y_train,
validation_data =(x_val, y_val),
epochs= epochs,
batch_size= 128,
callbacks = my_callbacks,
verbose= 1 )<save_to_csv>
|
x_train = train_df.drop(labels = ["label"],axis = 1)
y_train = train_df['label']
del train_df
|
Digit Recognizer
|
12,327,863 |
predictions = model.predict(x_test)
ids = list(test_df['id'])
target = [round(i[0])for i in predictions]
sub = pd.DataFrame({'id':ids, 'target':target}, index=None)
sub.to_csv('submission.csv', index=False)
sub<load_from_csv>
|
y_train = to_categorical(y_train, num_classes=10)
y_train[:2,:]
|
Digit Recognizer
|
12,327,863 |
<install_modules>
|
random_seed = 121
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train, test_size = 0.15, random_state=random_seed)
|
Digit Recognizer
|
12,327,863 |
!pip install transformers==3.5.1
!pip install pyspellchecker<import_modules>
|
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 128, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.2))
model.add(Dense(10, activation = "softmax"))
|
Digit Recognizer
|
12,327,863 |
import pandas as pd
import torchtext
from transformers import BertTokenizer, BertForMaskedLM, BertConfig
import transformers
import torch
from torch.utils.data import Dataset, DataLoader
from torch import optim
from torch import cuda
from sklearn.model_selection import train_test_split
import re
import string
<load_from_csv>
|
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
|
Digit Recognizer
|
12,327,863 |
train_val_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<feature_engineering>
|
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
12,327,863 |
train_val_df = train_val_df.loc[:,["text","target"]]
test_df = test_df.loc[:,["text"]]
test_df["target"] = [0]*len(test_df["text"] )<prepare_output>
|
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 50
batch_size = 86
|
Digit Recognizer
|
12,327,863 |
print(train_val_df)
print(test_df.head())
original_df = train_val_df.copy()<define_variables>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
12,327,863 |
mispell_dict = {"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"couldnt" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"doesnt" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"havent" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"shouldnt" : "should not",
"that's" : "that is",
"thats" : "that is",
"there's" : "there is",
"theres" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"theyre": "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not"}
def replace_typical_misspell(text):
text = text.lower()
mispellings_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
def replace(match):
return mispell_dict[match.group(0)]
return mispellings_re.sub(replace, text)
train_val_df['text'] = train_val_df['text'].apply(lambda x : replace_typical_misspell(x))
test_df['text'] = test_df['text'].apply(lambda x : replace_typical_misspell(x))
def remove_space(string):
string = BeautifulSoup(string ).text.strip().lower()
string = re.sub(r'(( http)\S+)', 'http', string)
string = re.sub(r'@\w*', '', string)
string = re.sub(r'
string = re.sub(r'\s+', ' ', string)
return string
train_val_df['text'] = train_val_df['text'].apply(lambda x : remove_space(x))
test_df['text'] = test_df['text'].apply(lambda x : remove_space(x))
def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
train_val_df['text'] = train_val_df['text'].apply(lambda x: remove_emoji(x))
test_df['text'] = test_df['text'].apply(lambda x: remove_emoji(x))
<save_to_csv>
|
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] )
|
Digit Recognizer
|
12,327,863 |
test_df.to_csv("test.tsv", sep='\t', index=False, header=None)
print(test_df.shape)
train_val_df.to_csv("train_eval.tsv", sep='\t', index=False, header=None)
print(train_val_df.shape )<categorify>
|
results = model.predict(test_df)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
12,327,863 |
<load_pretrained><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False )
|
Digit Recognizer
|
12,305,349 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
sns.set()
|
Digit Recognizer
|
12,305,349 |
dataset_train_eval, dataset_test = torchtext.data.TabularDataset.splits(path='.', train='./train_eval.tsv', test='./test.tsv', format='tsv', fields=[('Text', TEXT),('Label', LABEL)] )<split>
|
digits_PATH = '/kaggle/input/digit-recognizer/'
digits_train = pd.read_csv(digits_PATH+'train.csv' )
|
Digit Recognizer
|
12,305,349 |
dataset_train, dataset_eval = dataset_train_eval.split(
split_ratio=1.0 - 1800/7613, random_state=random.seed(1234))
print(dataset_train.__len__())
print(dataset_eval.__len__())
print(dataset_test.__len__() )<data_type_conversions>
|
X, y = digits_train.iloc[:,1:].values/255, digits_train.iloc[:,0].values
X = X.reshape(-1,28, 28, 1 )
|
Digit Recognizer
|
12,305,349 |
print(tokenizer.convert_ids_to_tokens(item.Text.tolist()))
print(int(item.Label))<define_variables>
|
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9, random_state=0 )
|
Digit Recognizer
|
12,305,349 |
batch_size = 32
dl_train = torchtext.data.Iterator(
dataset_train, batch_size=batch_size, train=True)
dl_eval = torchtext.data.Iterator(
dataset_eval, batch_size=batch_size, train=False, sort=False)
dl_test = torchtext.data.Iterator(
dataset_test, batch_size=batch_size, train=False, sort=False)
dataloaders_dict = {"train": dl_train, "val": dl_eval}<load_pretrained>
|
class MultiCNN() :
def __init__(self, model_generator, num_models=1):
self.models = []
self.create_models(model_generator, num_models)
def create_models(self, model_generator, num_models=1):
for i in range(0,num_models):
m = keras.models.Sequential(model_generator())
m.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.models.append(m)
def fit(self, train_inputs, train_targets, early_stop=True, verbose=2, batch_size=200, epochs=100):
callback = []
if early_stop:
callback.append(keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=5))
for m in self.models:
X_train2, X_val2, y_train2, y_val2 = train_test_split(train_inputs, train_targets, test_size = 0.2)
m.fit(X_train2, y_train2,
batch_size= batch_size,
epochs = epochs,
callbacks=callback,
validation_data =(X_val2, y_val2),
verbose=verbose
)
def fit_generator(self, generator, train_inputs, train_targets, early_stop=True, verbose=2, batch_size=32, epochs=100):
callback = []
if early_stop:
callback.append(keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=5))
for m in self.models:
X_train2, X_val2, y_train2, y_val2 = train_test_split(train_inputs, train_targets, test_size = 0.2)
m.fit_generator(generator.flow(X_train2,y_train2, batch_size=batch_size),
epochs = epochs,
callbacks=callback,
validation_data =(X_val2, y_val2),
verbose=verbose
)
def predict(self, test_inputs):
prediction = [model.predict(test_inputs)for model in self.models]
return sum(prediction)/len(prediction)
def acuracia(self, test_input, test_target):
prediction = self.predict(test_input)
prediction = np.argmax(prediction, axis=1)
return accuracy_score(prediction, test_target)
def confusion_matrix(self, test_input, test_target):
y_pred = self.predict(test_input)
y_pred = np.argmax(y_pred, axis=1)
confusao = confusion_matrix(y_pred, test_target)
fig, ax = plt.subplots(1, figsize=(10,10))
sns.heatmap(pd.DataFrame(confusao), ax=ax, cbar=False, annot=True)
ax.set_title('Matriz de confusão', size=20)
ax.set_yticklabels(ax.get_xticklabels() , rotation=0, size=15)
ax.set_xticklabels(ax.get_yticklabels() , rotation=0, size=15)
plt.show()
def acuracia_individual(self, test_input, test_target):
return [accuracy_score(np.argmax(m.predict(X_test), axis=1), y_test)for m in self.models]
|
Digit Recognizer
|
12,305,349 |
model = BertModel.from_pretrained('bert-base-cased' )<set_options>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.Flatten() ,
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_default = MultiCNN(make_CNN, num_models=15)
multiCNN_default.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_default.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
class BertForTwitter(nn.Module):
def __init__(self):
super(BertForTwitter, self ).__init__()
self.bert = model
self.cls = nn.Linear(in_features=768, out_features=2)
nn.init.normal_(self.cls.weight, std=0.02)
nn.init.normal_(self.cls.bias, 0)
def forward(self, input_ids):
result = self.bert(input_ids)
vec_0 = result[0]
vec_0 = vec_0[:, 0, :]
vec_0 = vec_0.view(-1, 768)
output = self.cls(vec_0)
return output<train_on_grid>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.Flatten() ,
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_plus_layer = MultiCNN(make_CNN, num_models=15)
multiCNN_plus_layer.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_plus_layer.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
net = BertForTwitter()
net.train()
print('ネットワーク設定完了' )<categorify>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.AveragePooling2D(pool_size =(2,2), strides = 2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.AveragePooling2D(pool_size =(2,2), strides = 2),
keras.layers.Flatten() ,
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_pooling = MultiCNN(make_CNN, num_models=15)
multiCNN_pooling.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_pooling.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
for param in net.parameters() :
param.requires_grad = False
for param in net.bert.encoder.layer[-1].parameters() :
param.requires_grad = True
for param in net.cls.parameters() :
param.requires_grad = True<choose_model_class>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Flatten() ,
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_batch_normalization = MultiCNN(make_CNN, num_models=15)
multiCNN_batch_normalization.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_batch_normalization.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
optimizer = optim.Adam([
{'params': net.bert.encoder.layer[-1].parameters() , 'lr': 5e-5},
{'params': net.cls.parameters() , 'lr': 1e-4}
])
criterion = nn.CrossEntropyLoss()
<train_model>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_dropout = MultiCNN(make_CNN, num_models=15)
multiCNN_dropout.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_dropout.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs):
max_acc = 0
Stop_flag = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("使用デバイス:", device)
print('-----start-------')
net.to(device)
torch.backends.cudnn.benchmark = True
batch_size = dataloaders_dict["train"].batch_size
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
net.train()
else:
net.eval()
epoch_loss = 0.0
epoch_corrects = 0
iteration = 1
for batch in(dataloaders_dict[phase]):
inputs = batch.Text[0].to(device)
labels = batch.Label.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = net(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
if(iteration % 50 == 0):
acc =(torch.sum(preds == labels.data)
).double() /batch_size
print('イテレーション {} || Loss: {:.4f} || 10iter.|| 本イテレーションの正解率:{}'.format(
iteration, loss.item() , acc))
iteration += 1
epoch_loss += loss.item() * batch_size
epoch_corrects += torch.sum(preds == labels.data)
epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)
epoch_acc = epoch_corrects.double(
)/ len(dataloaders_dict[phase].dataset)
print('Epoch {}/{} | {:^5} | Loss: {:.4f} Acc: {:.4f}'.format(epoch+1, num_epochs,
phase, epoch_loss, epoch_acc))
if phase == "val":
if epoch_acc < max_acc:
count += 1
if count >= 3:
Stop_flag = True
else:
count = 0
max_acc = epoch_acc
print(count)
if Stop_flag:
break
return net<train_model>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(3,3), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=200, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_200 = MultiCNN(make_CNN, num_models=15)
multiCNN_200.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_200.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
num_epochs = 50
net_trained = train_model(net, dataloaders_dict,
criterion, optimizer, num_epochs=num_epochs )<load_from_csv>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=200, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_bigger_kernel = MultiCNN(make_CNN, num_models=15)
multiCNN_bigger_kernel.fit(X_train, y_train, verbose = 0, early_stop=True)
multiCNN_bigger_kernel.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
sample_submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
sample_submission["target"] = ans_list
sample_submission<save_to_csv>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=200, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_min_epochs = MultiCNN(make_CNN, num_models=15)
multiCNN_min_epochs.fit(X_train, y_train, verbose = 0, early_stop=True, epochs=10)
multiCNN_min_epochs.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
sample_submission.to_csv("submission_plus.csv", index=False )<import_modules>
|
datagen = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-06,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
brightness_range=None,
zoom_range=0.1,
fill_mode="nearest",
cval=0.0,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
dtype=None,
)
datagen.fit(X_train )
|
Digit Recognizer
|
12,305,349 |
import torch
import pandas as pd
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer<string_transform>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=200, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_data_augmentation = MultiCNN(make_CNN, num_models=15)
multiCNN_data_augmentation.fit_generator(datagen, X_train, y_train, verbose = 0, early_stop=True)
multiCNN_data_augmentation.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
class Dataset:
def __init__(self, text, tokenizer, max_len):
self.text = text
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
inputs = self.tokenizer(
text,
max_length=self.max_len,
padding="max_length",
truncation=True
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
}<load_from_csv>
|
def make_CNN() :
return [
keras.layers.Conv2D(filters=32, kernel_size=(5,5), activation="relu", input_shape=(28, 28, 1)) ,
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation="relu"),
keras.layers.MaxPooling2D(pool_size =(2,2), strides = 2),
keras.layers.BatchNormalization() ,
keras.layers.Dropout(0.2),
keras.layers.Flatten() ,
keras.layers.Dense(units=200, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(units=10, activation="softmax")
]
multiCNN_final = MultiCNN(make_CNN, num_models=15)
multiCNN_final.fit_generator(datagen, X_train, y_train, verbose = 0, early_stop=True )
|
Digit Recognizer
|
12,305,349 |
def generate_predictions(model_path, max_len):
model = AutoModelForSequenceClassification.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
model.to("cuda")
model.eval()
df = pd.read_csv(".. /input/nlp-getting-started/test.csv")
dataset = Dataset(text=df.text.values, tokenizer=tokenizer, max_len=max_len)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=32, num_workers=4, pin_memory=True, shuffle=False
)
final_output = []
for b_idx, data in enumerate(data_loader):
with torch.no_grad() :
for key, value in data.items() :
data[key] = value.to("cuda")
output = model(**data)
output = torch.nn.functional.softmax(output.logits, dim=1)
output = output.detach().cpu().numpy() [:, 1]
output =(output >= 0.6 ).astype(int ).tolist()
final_output.extend(output)
torch.cuda.empty_cache()
return np.array(final_output )<predict_on_test>
|
multiCNN_final.acuracia(X_test, y_test )
|
Digit Recognizer
|
12,305,349 |
preds = generate_predictions("abhishek/autonlp-fred2-2682064", max_len=64 )<save_to_csv>
|
submission = pd.read_csv(digits_PATH+'sample_submission.csv', index_col=0)
test = pd.read_csv(digits_PATH+'test.csv')/255
test = test.values.reshape(-1,28, 28, 1 )
|
Digit Recognizer
|
12,305,349 |
sample = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
sample.target = preds
sample.to_csv("submission.csv", index=False )<count_values>
|
submission['Label'] = np.argmax(multiCNN_final.predict(test), axis=1 )
|
Digit Recognizer
|
12,305,349 |
sample.target.value_counts()<import_modules>
|
submission.Label.value_counts()
|
Digit Recognizer
|
12,305,349 |
<load_from_csv><EOS>
|
submission.to_csv('submission.csv' )
|
Digit Recognizer
|
12,242,884 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
|
Digit Recognizer
|
12,242,884 |
test_csv = pd.read_csv(".. /input/nlp-getting-started/test.csv")
test_csv.head()<count_values>
|
train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,242,884 |
training_csv['target'].value_counts()<count_values>
|
train.isnull().sum().any()
|
Digit Recognizer
|
12,242,884 |
training_csv['keyword'].value_counts()<count_missing_values>
|
labels = list(range(10))
|
Digit Recognizer
|
12,242,884 |
training_csv['keyword'].isnull().sum()<count_values>
|
X = train.drop("label", axis=1 ).values
y = train["label"].values
|
Digit Recognizer
|
12,242,884 |
training_csv['location'].value_counts()<count_missing_values>
|
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
12,242,884 |
training_csv['location'].isnull().sum()<categorify>
|
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
12,242,884 |
def clean(title):
title = re.sub(r"\-"," ",title)
title = re.sub(r"\+"," ",title)
title = re.sub(r"&","and",title)
title = re.sub(r"\|"," ",title)
title = re.sub(r"\"," ",title)
title = re.sub(r"\W"," ",title)
title = title.lower()
for p in string.punctuation :
title = re.sub(r"f{p}"," ",title)
title = re.sub(r"\s+"," ",title)
return title<categorify>
|
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state=10 )
|
Digit Recognizer
|
12,242,884 |
training_csv["cleaned_text"] = training_csv["text"].map(clean)
training_csv.head()<load_pretrained>
|
X_test = test.values
|
Digit Recognizer
|
12,242,884 |
tokenizer = BertTokenizer.from_pretrained('.. /input/bert-base-uncased')
<split>
|
from keras.utils import to_categorical
|
Digit Recognizer
|
12,242,884 |
X_train,X_test,y_train,y_test = train_test_split(training_csv["cleaned_text"].values,training_csv["target"].values, random_state=0,test_size=0.1,shuffle=True )<categorify>
|
y_train = to_categorical(y_train)
y_val_true = y_val.copy()
y_val = to_categorical(y_val )
|
Digit Recognizer
|
12,242,884 |
class CreateDataset(Dataset):
def __init__(self, X, y, tokenizer, max_len):
self.X = X
self.y = y
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.y)
def __getitem__(self, index):
text = self.X[index]
inputs = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
pad_to_max_length=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.LongTensor(ids),
'mask': torch.LongTensor(mask),
'labels': torch.Tensor(self.y[index])
}
<create_dataframe>
|
X_train = X_train/255
X_val = X_val/255
X_test = X_test/255
|
Digit Recognizer
|
12,242,884 |
max_len = 45
dataset_train = CreateDataset(X_train, y_train, tokenizer, max_len)
dataset_valid = CreateDataset(X_test, y_test, tokenizer, max_len)
<load_pretrained>
|
X_train = X_train.reshape(-1, 28, 28, 1)
X_val = X_val.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1 )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.