kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,104,798 |
all_data = pd.concat(( train, test)).reset_index(drop = True)
all_data.drop(['SalePrice'], axis = 1, inplace = True )<count_missing_values>
|
submission.to_csv("submission1.csv", index = False)
|
Contradictory, My Dear Watson
|
11,104,798 |
missing_percentage(all_data )<define_variables>
|
train = pd.read_csv("/kaggle/input/contradictory-my-dear-watson/train.csv")
train.head()
|
Contradictory, My Dear Watson
|
11,104,798 |
missing_val_col = ["Alley",
"PoolQC",
"MiscFeature",
"Fence",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
'BsmtQual',
'BsmtCond',
'BsmtExposure',
'BsmtFinType1',
'BsmtFinType2',
'MasVnrType']
for i in missing_val_col:
all_data[i] = all_data[i].fillna('None' )<define_variables>
|
tokenizer = AutoTokenizer.from_pretrained('jplu/tf-xlm-roberta-large' )
|
Contradictory, My Dear Watson
|
11,104,798 |
missing_val_col2 = ['BsmtFinSF1',
'BsmtFinSF2',
'BsmtUnfSF',
'TotalBsmtSF',
'BsmtFullBath',
'BsmtHalfBath',
'GarageYrBlt',
'GarageArea',
'GarageCars',
'MasVnrArea']
for i in missing_val_col2:
all_data[i] = all_data[i].fillna(0)
all_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.mean()))<data_type_conversions>
|
def encode_sentence(s):
tokens = list(tokenizer.tokenize(s))
tokens.append('[SEP]')
return tokenizer.convert_tokens_to_ids(tokens)
|
Contradictory, My Dear Watson
|
11,104,798 |
all_data['MSSubClass'] = all_data['MSSubClass'].astype(str)
all_data['MSZoning'] = all_data.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode() [0]))
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str )<data_type_conversions>
|
s = "I love machine learning"
encode_sentence(s )
|
Contradictory, My Dear Watson
|
11,104,798 |
all_data['Functional'] = all_data['Functional'].fillna('Typ')
all_data['Utilities'] = all_data['Utilities'].fillna('AllPub')
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode() [0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode() [0])
all_data['KitchenQual'] = all_data['KitchenQual'].fillna("TA")
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode() [0])
all_data['Electrical'] = all_data['Electrical'].fillna("SBrkr")
<count_missing_values>
|
def data_translate(source_data,dest_language):
translator = Translator()
if dest_language == 'zh':
dest_language = 'zh-cn'
dest_data = translator.translate(source_data, dest = dest_language ).text
return dest_data
|
Contradictory, My Dear Watson
|
11,104,798 |
missing_percentage(all_data )<sort_values>
|
def translation_augment(source_data, languages, fraction):
new_df = pd.DataFrame()
for lang in languages:
print(lang)
sampled_rows = source_data.sample(frac=fraction, replace = False)
prem_bag = bag.from_sequence(sampled_rows['premise'].tolist() ).map(data_translate, lang)
hypothesis_bag = bag.from_sequence(sampled_rows['hypothesis'].tolist() ).map(data_translate, lang)
with diagnostics.ProgressBar() :
prems = prem_bag.compute()
hyps = hypothesis_bag.compute()
aug_df = pd.DataFrame({'id': pd.Series([None]*len(sampled_rows)) ,
'premise': pd.Series(prems),
'hypothesis': pd.Series(hyps),
'lang_abv': pd.Series([lang]*len(sampled_rows)) ,
'language': pd.Series([None]*len(sampled_rows)) ,
'label': pd.Series(sampled_rows['label'].values)
})
new_df = new_df.append(aug_df)
new_df = shuffle(new_df)
return new_df
|
Contradictory, My Dear Watson
|
11,104,798 |
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x)).sort_values(ascending=False)
skewed_feats<sort_values>
|
def data_augment(train_df, fraction):
english_df = train.loc[train.lang_abv == 'en']
languages = list(set(train.lang_abv.values))
languages.remove('en')
print(languages)
translated_df = translation_augment(english_df,languages, fraction)
train_df = train_df.append(translated_df)
train_df = shuffle(train_df)
return train_df
|
Contradictory, My Dear Watson
|
11,104,798 |
def fixing_skewness(df):
numeric_feats = df.dtypes[df.dtypes != "object"].index
skewed_feats = df[numeric_feats].apply(lambda x: skew(x)).sort_values(ascending=False)
high_skew = skewed_feats[abs(skewed_feats)> 0.5]
skewed_features = high_skew.index
for feat in skewed_features:
df[feat] = boxcox1p(df[feat], boxcox_normmax(df[feat] + 1))
fixing_skewness(all_data )<feature_engineering>
|
train = pd.read_csv('/kaggle/input/augment-data20/augmented_data_20percent.csv')
train.head()
len(train )
|
Contradictory, My Dear Watson
|
11,104,798 |
all_data['TotalSF'] =(all_data['TotalBsmtSF']
+ all_data['1stFlrSF']
+ all_data['2ndFlrSF'])
all_data['YrBltAndRemod'] = all_data['YearBuilt'] + all_data['YearRemodAdd']
all_data['Total_sqr_footage'] =(all_data['BsmtFinSF1']
+ all_data['BsmtFinSF2']
+ all_data['1stFlrSF']
+ all_data['2ndFlrSF']
)
all_data['Total_Bathrooms'] =(all_data['FullBath']
+(0.5 * all_data['HalfBath'])
+ all_data['BsmtFullBath']
+(0.5 * all_data['BsmtHalfBath'])
)
all_data['Total_porch_sf'] =(all_data['OpenPorchSF']
+ all_data['3SsnPorch']
+ all_data['EnclosedPorch']
+ all_data['ScreenPorch']
+ all_data['WoodDeckSF']
)
<feature_engineering>
|
def bert_encode(premises, hypotheses, tokenizer):
num_examples = len(premises)
sen1 = tf.ragged.constant([encode_sentence(s)for s in np.array(premises)])
sen2 = tf.ragged.constant([encode_sentence(s)for s in np.array(hypotheses)])
cls = [tokenizer.convert_tokens_to_ids(['CLS'])]*sen1.shape[0]
input_word_ids = tf.concat([cls, sen1, sen2], axis = -1)
input_mask = tf.ones_like(input_word_ids ).to_tensor()
type_cls = tf.zeros_like(cls)
type_sen1 = tf.zeros_like(sen1)
type_sen2 = tf.ones_like(sen2)
input_type_ids = tf.concat([type_cls, type_sen1, type_sen2], axis = -1 ).to_tensor()
inputs = {
'input_word_ids' : input_word_ids.to_tensor() ,
'input_mask': input_mask,
'input_type_ids': input_type_ids
}
return inputs
|
Contradictory, My Dear Watson
|
11,104,798 |
all_data['haspool'] = all_data['PoolArea'].apply(lambda x: 1 if x > 0 else 0)
all_data['has2ndfloor'] = all_data['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)
all_data['hasgarage'] = all_data['GarageArea'].apply(lambda x: 1 if x > 0 else 0)
all_data['hasbsmt'] = all_data['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)
all_data['hasfireplace'] = all_data['Fireplaces'].apply(lambda x: 1 if x > 0 else 0 )<drop_column>
|
train_input = bert_encode(train.premise.values, train.hypothesis.values, tokenizer )
|
Contradictory, My Dear Watson
|
11,104,798 |
all_data = all_data.drop(['Utilities', 'Street', 'PoolQC',], axis=1 )<categorify>
|
max_len = 80
def build_model() :
bert_encoder = TFRobertaModel.from_pretrained('jplu/tf-xlm-roberta-large')
input_word_ids = tf.keras.Input(shape =(max_len,), dtype =tf.int32, name = "input_word_ids")
input_mask = tf.keras.Input(shape =(max_len,), dtype= tf.int32, name = "input_mask")
input_type_ids = tf.keras.Input(shape=(max_len,), dtype= tf.int32, name="input_type_ids")
embedding = bert_encoder([input_word_ids, input_mask, input_type_ids])[0]
output = tf.keras.layers.Dense(3, activation = 'softmax' )(embedding[:,0,:])
model = tf.keras.Model(inputs= [input_word_ids, input_mask, input_type_ids], outputs=output)
model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss='sparse_categorical_crossentropy', metrics = 'accuracy')
return model
|
Contradictory, My Dear Watson
|
11,104,798 |
final_features = pd.get_dummies(all_data ).reset_index(drop=True)
final_features.shape<prepare_x_and_y>
|
model.fit(train_input, train.label.values, epochs=3, verbose=1, batch_size=16, validation_split=0.2 )
|
Contradictory, My Dear Watson
|
11,104,798 |
X = final_features.iloc[:len(y), :]
X_sub = final_features.iloc[len(y):, :]<drop_column>
|
model.save_weights('RoBertamodel_augmented_data_20_percent_adam_sparse_categorical_entropy.h5' )
|
Contradictory, My Dear Watson
|
11,104,798 |
outliers = [30, 88, 462, 631, 1322]
X = X.drop(X.index[outliers])
y = y.drop(y.index[outliers] )<count_values>
|
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
test.head()
|
Contradictory, My Dear Watson
|
11,104,798 |
counts = X.BsmtUnfSF.value_counts()<count_values>
|
test_input = bert_encode(test.premise.values, test.hypothesis.values, tokenizer )
|
Contradictory, My Dear Watson
|
11,104,798 |
counts.iloc[0]<count_values>
|
predictions = [np.argmax(i)for i in model.predict(test_input)]
|
Contradictory, My Dear Watson
|
11,104,798 |
for i in X.columns:
counts = X[i].value_counts()
print(counts )<train_model>
|
submission = test.id.copy().to_frame()
submission.head()
|
Contradictory, My Dear Watson
|
11,104,798 |
def overfit_reducer(df):
overfit = []
for i in df.columns:
counts = df[i].value_counts()
zeros = counts.iloc[0]
if zeros / len(df)* 100 > 99.94:
overfit.append(i)
overfit = list(overfit)
return overfit
overfitted_features = overfit_reducer(X)
X = X.drop(overfitted_features, axis=1)
X_sub = X_sub.drop(overfitted_features, axis=1 )<split>
|
submission['prediction'] = predictions
|
Contradictory, My Dear Watson
|
11,104,798 |
<feature_engineering><EOS>
|
submission.to_csv("submission.csv", index = False)
|
Contradictory, My Dear Watson
|
11,062,246 |
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<feature_engineering>
|
!pip install -q transformers==3.0.2
|
Contradictory, My Dear Watson
|
11,062,246 |
sample_train['Linear_Yhat'] = beta_0 + beta_1*sample_train['GrLivArea']<compute_test_metric>
|
train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
train
|
Contradictory, My Dear Watson
|
11,062,246 |
print("Mean Squared Error(MSE)for regression line is : {}".format(np.square(sample_train['SalePrice'] - sample_train['Linear_Yhat'] ).mean()))<compute_test_metric>
|
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
test
|
Contradictory, My Dear Watson
|
11,062,246 |
mean_squared_error(sample_train['SalePrice'], sample_train.Linear_Yhat )<train_model>
|
sub = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv')
sub
|
Contradictory, My Dear Watson
|
11,062,246 |
lin_reg = LinearRegression(normalize=True, n_jobs=-1)
lin_reg.fit(X_train, y_train)
y_pred = lin_reg.predict(X_test )<compute_test_metric>
|
model_name = 'jplu/tf-xlm-roberta-large'
epochs = 4
maxlen = 80
AUTO = tf.data.experimental.AUTOTUNE
|
Contradictory, My Dear Watson
|
11,062,246 |
print('%.2f'%mean_squared_error(y_test, y_pred))<import_modules>
|
tokenizer = AutoTokenizer.from_pretrained(model_name )
|
Contradictory, My Dear Watson
|
11,062,246 |
lin_reg = LinearRegression()
cv = KFold(shuffle=True, random_state=2, n_splits=10)
scores = cross_val_score(lin_reg, X,y,cv = cv, scoring = 'neg_mean_absolute_error' )<train_model>
|
def get_training_dataset(idx, df = train, is_train = True):
text = df[['premise', 'hypothesis']].values[idx].tolist()
text_enc = tokenizer.batch_encode_plus(
text,
pad_to_max_length = True,
max_length = maxlen
)
dataset = tf.data.Dataset.from_tensor_slices(( text_enc['input_ids'], df['label'][idx].values))
dataset = dataset.repeat()
dataset = dataset.shuffle(2020)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(AUTO)
return dataset
def get_valid_dataset(idx, df = train, is_train = False):
text = df[['premise', 'hypothesis']].values[idx].tolist()
text_enc = tokenizer.batch_encode_plus(
text,
pad_to_max_length = True,
max_length = maxlen
)
dataset = tf.data.Dataset.from_tensor_slices(( text_enc['input_ids'], df['label'][idx].values))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(df = test, is_train = False):
text = df[['premise', 'hypothesis']].values.tolist()
text_enc = tokenizer.batch_encode_plus(
text,
pad_to_max_length = True,
max_length = maxlen
)
dataset = tf.data.Dataset.from_tensor_slices(text_enc['input_ids'])
dataset = dataset.batch(batch_size)
return dataset
|
Contradictory, My Dear Watson
|
11,062,246 |
alpha_ridge = [-3,-2,-1,1e-15, 1e-10, 1e-8,1e-5,1e-4, 1e-3,1e-2,0.5,1,1.5, 2,3,4, 5, 10, 20, 30, 40]
temp_rss = {}
temp_mse = {}
for i in alpha_ridge:
ridge = Ridge(alpha= i, normalize=True)
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rss = sum(( y_pred-y_test)**2)
temp_mse[i] = mse
temp_rss[i] = rss<sort_values>
|
def build_model(maxlen, model_name):
with strategy.scope() :
base_model = TFAutoModel.from_pretrained(model_name)
input_word_ids = tf.keras.Input(shape =(maxlen,), dtype = tf.int32, name = "input_word_ids")
embedding = base_model(input_word_ids)[0]
out_tokens = embedding[:, 0, :]
output = tf.keras.layers.Dense(3, activation = 'softmax' )(out_tokens)
model = tf.keras.Model(inputs = input_word_ids, outputs = output)
model.compile(tf.keras.optimizers.Adam(lr = 1e-5),
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model
|
Contradictory, My Dear Watson
|
11,062,246 |
for key, value in sorted(temp_mse.items() , key=lambda item: item[1]):
print("%s: %s" %(key, value))<sort_values>
|
folds = 3
kf = KFold(n_splits = folds, shuffle = True, random_state = 777)
models = []
histories = []
predictions = np.zeros(( test.shape[0], 3))
for fold,(trn_idx, val_idx)in enumerate(kf.split(np.arange(train['label'].shape[0]))):
print('
')
print('-'*50)
print(f'Training fold {fold + 1}')
train_dataset = get_training_dataset(trn_idx, df = train, is_train = True)
valid_dataset = get_valid_dataset(val_idx, df = train, is_train = False)
K.clear_session()
model = build_model(maxlen, model_name)
checkpoint = tf.keras.callbacks.ModelCheckpoint(
'XLM-R_fold-%i.h5'%fold, monitor = 'val_loss', verbose = 1, save_best_only = True,
save_weights_only = True, mode = 'min', save_freq = 'epoch'
)
print('Model Training.....')
STEPS_PER_EPOCH = len(trn_idx)// batch_size
history = model.fit(
train_dataset, epochs = epochs, verbose = 1,
steps_per_epoch = STEPS_PER_EPOCH,
batch_size = batch_size,
validation_data = valid_dataset
)
display_training_curves(
history.history['loss'],
history.history['val_loss'],
'loss', 311
)
display_training_curves(
history.history['accuracy'],
history.history['val_accuracy'],
'accuracy', 312
)
histories.append(history)
models.append(model)
print('Prediting on test data.. ')
test_dataset = get_test_dataset(test, is_train = False)
pred = model.predict(test_dataset, verbose = 1)
predictions += pred / folds
del history, train_dataset, valid_dataset, model
gc.collect()
print('
')
print('-'*50 )
|
Contradictory, My Dear Watson
|
11,062,246 |
for key, value in sorted(temp_rss.items() , key=lambda item: item[1]):
print("%s: %s" %(key, value))<predict_on_test>
|
sub['prediction'] = np.argmax(predictions, axis = 1 )
|
Contradictory, My Dear Watson
|
11,062,246 |
<sort_values><EOS>
|
sub.to_csv('./submission.csv', index = False)
sub
|
Contradictory, My Dear Watson
|
11,053,603 |
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<sort_values>
|
os.environ["WANDB_API_KEY"] = "0"
|
Contradictory, My Dear Watson
|
11,053,603 |
for key, value in sorted(temp_rss.items() , key=lambda item: item[1]):
print("%s: %s" %(key, value))<find_best_model_class>
|
SEED = 42
EPOCHS = 10
MAX_LEN = 96
NUM_SPLITS = 3
LR = 3e-5
BATCH_SIZE = 16
|
Contradictory, My Dear Watson
|
11,053,603 |
temp_rss = {}
temp_mse = {}
for i in alpha_ridge:
lasso_reg = ElasticNet(alpha= i, normalize=True)
lasso_reg.fit(X_train, y_train)
y_pred = lasso_reg.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
rss = sum(( y_pred-y_test)**2)
temp_mse[i] = mse
temp_rss[i] = rss<sort_values>
|
os.environ['PYTHONHASHSEED']=str(SEED)
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED )
|
Contradictory, My Dear Watson
|
11,053,603 |
for key, value in sorted(temp_mse.items() , key=lambda item: item[1]):
print("%s: %s" %(key, value))<sort_values>
|
train = pd.read_csv(".. /input/contradictory-my-dear-watson-translated-dataset/train_augmented.csv", index_col=["id"])
test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv", index_col=["id"])
test["label"] = -1
|
Contradictory, My Dear Watson
|
11,053,603 |
for key, value in sorted(temp_rss.items() , key=lambda item: item[1]):
print("%s: %s" %(key, value))<compute_train_metric>
|
df = pd.concat([train, test])
df.loc[df["label"]!=-1, "type"] = "train"
df.loc[df["label"]==-1, "type"] = "test"
|
Contradictory, My Dear Watson
|
11,053,603 |
kfolds = KFold(n_splits=10, shuffle=True, random_state=42)
def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
def cv_rmse(model, X=X):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=kfolds))
return(rmse )<define_search_space>
|
del df
gc.collect()
|
Contradictory, My Dear Watson
|
11,053,603 |
alphas_alt = [14.5, 14.6, 14.7, 14.8, 14.9, 15, 15.1, 15.2, 15.3, 15.4, 15.5]
alphas2 = [5e-05, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008]
e_alphas = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007]
e_l1ratio = [0.8, 0.85, 0.9, 0.95, 0.99, 1]<define_search_model>
|
strategy = tf.distribute.experimental.TPUStrategy(tpu )
|
Contradictory, My Dear Watson
|
11,053,603 |
ridge = make_pipeline(RobustScaler() , RidgeCV(alphas=alphas_alt, cv=kfolds))
lasso = make_pipeline(RobustScaler() , LassoCV(max_iter=1e7,
alphas=alphas2,
random_state=42,
cv=kfolds))
elasticnet = make_pipeline(RobustScaler() , ElasticNetCV(max_iter=1e7, alphas=e_alphas, cv=kfolds, l1_ratio=e_l1ratio))
svr = make_pipeline(RobustScaler() , SVR(C= 20, epsilon= 0.008, gamma=0.0003,))<choose_model_class>
|
BATCH_SIZE = BATCH_SIZE*strategy.num_replicas_in_sync
|
Contradictory, My Dear Watson
|
11,053,603 |
gbr = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =42 )<choose_model_class>
|
MODEL = 'jplu/tf-xlm-roberta-large'
TOKENIZER = AutoTokenizer.from_pretrained(MODEL )
|
Contradictory, My Dear Watson
|
11,053,603 |
lightgbm = LGBMRegressor(objective='regression',
num_leaves=4,
learning_rate=0.01,
n_estimators=5000,
max_bin=200,
bagging_fraction=0.75,
bagging_freq=5,
bagging_seed=7,
feature_fraction=0.2,
feature_fraction_seed=7,
verbose=-1,
)<choose_model_class>
|
def fast_encode(df):
text = df[['premise', 'hypothesis']].values.tolist()
encoded = TOKENIZER.batch_encode_plus(
text,
pad_to_max_length=True,
max_length=MAX_LEN
)
return np.array(encoded["input_ids"] )
|
Contradictory, My Dear Watson
|
11,053,603 |
xgboost = XGBRegressor(learning_rate=0.01,n_estimators=3460,
max_depth=3, min_child_weight=0,
gamma=0, subsample=0.7,
colsample_bytree=0.7,
objective='reg:linear', nthread=-1,
scale_pos_weight=1, seed=27,
reg_alpha=0.00006 )<choose_model_class>
|
test_encoded = fast_encode(test)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(test_encoded)
.batch(BATCH_SIZE)
)
|
Contradictory, My Dear Watson
|
11,053,603 |
stack_gen = StackingCVRegressor(regressors=(ridge, lasso, elasticnet, xgboost, lightgbm),
meta_regressor=xgboost,
use_features_in_secondary=True )<compute_test_metric>
|
def mish(x):
return x*tanh(softplus(x))
get_custom_objects() ["mish"] = Activation(mish )
|
Contradictory, My Dear Watson
|
11,053,603 |
score = cv_rmse(ridge)
print("Ridge: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() ,)
score = cv_rmse(lasso)
print("LASSO: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() ,)
score = cv_rmse(elasticnet)
print("elastic net: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() ,)
score = cv_rmse(svr)
print("SVR: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() ,)
score = cv_rmse(lightgbm)
print("lightgbm: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() ,)
".format(score.mean() , score.std()), datetime.now() ,)
score = cv_rmse(xgboost)
print("xgboost: {:.4f}({:.4f})
".format(score.mean() , score.std()), datetime.now() , )<train_model>
|
def create_model(transformer):
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32)
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
cls_token = Dropout(0.3 )(cls_token)
cls_token = Dense(32, activation='mish' )(cls_token)
cls_token = Dense(16, activation='mish' )(cls_token)
out = Dense(3, activation='softmax' )(cls_token)
optimizer = RectifiedAdam(lr=LR)
model = Model(inputs=input_ids, outputs=out)
model.compile(
optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
|
Contradictory, My Dear Watson
|
11,053,603 |
print('START Fit')
print('stack_gen')
stack_gen_model = stack_gen.fit(np.array(X), np.array(y))
print('elasticnet')
elastic_model_full_data = elasticnet.fit(X, y)
print('Lasso')
lasso_model_full_data = lasso.fit(X, y)
print('Ridge')
ridge_model_full_data = ridge.fit(X, y)
print('Svr')
svr_model_full_data = svr.fit(X, y)
print('xgboost')
xgb_model_full_data = xgboost.fit(X, y)
print('lightgbm')
lgb_model_full_data = lightgbm.fit(X, y )<predict_on_test>
|
kfold = StratifiedKFold(n_splits=NUM_SPLITS, shuffle=True, random_state=SEED )
|
Contradictory, My Dear Watson
|
11,053,603 |
1.0 * elastic_model_full_data.predict(X )<predict_on_test>
|
eas = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3,
verbose=1, mode='min', baseline=None, restore_best_weights=True )
|
Contradictory, My Dear Watson
|
11,053,603 |
def blend_models_predict(X):
return(( 0.1 * elastic_model_full_data.predict(X)) + \
(0.05 * lasso_model_full_data.predict(X)) + \
(0.2 * ridge_model_full_data.predict(X)) + \
(0.1 * svr_model_full_data.predict(X)) + \
(0.15 * xgb_model_full_data.predict(X)) + \
(0.1 * lgb_model_full_data.predict(X)) + \
(0.3 * stack_gen_model.predict(np.array(X))))<compute_test_metric>
|
oof_preds = np.zeros(( len(train)))
test_preds = np.zeros(( len(test), 3))
|
Contradictory, My Dear Watson
|
11,053,603 |
print('RMSLE score on train data:')
print(rmsle(y, blend_models_predict(X)) )<load_from_csv>
|
for fold,(train_index, valid_index)in enumerate(kfold.split(train, train['label'])) :
tf.tpu.experimental.initialize_tpu_system(tpu)
print("*"*60)
print("*"+" "*26+f"FOLD {fold+1}"+" "*26+"*")
print("*"*60, end="
")
X_train = train.iloc[train_index, :].reset_index(drop=True)
X_valid = train.iloc[valid_index, :].reset_index(drop=True)
y_train = X_train['label'].values
y_valid = X_valid['label'].values
train_encoded = fast_encode(X_train)
valid_encoded = fast_encode(X_valid)
train_dataset = tf.data.Dataset.from_tensor_slices(( train_encoded, y_train))
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
valid_dataset = tf.data.Dataset.from_tensor_slices(( valid_encoded, y_valid))
valid_dataset = valid_dataset.batch(BATCH_SIZE)
valid_dataset = valid_dataset.prefetch(tf.data.experimental.AUTOTUNE)
num_steps = len(X_train)//BATCH_SIZE
with strategy.scope() :
transformer_layer = TFXLMRobertaModel.from_pretrained(MODEL)
model = create_model(transformer_layer)
history = model.fit(
train_dataset,
steps_per_epoch=num_steps,
validation_data=valid_dataset,
epochs=EPOCHS,
callbacks=[eas]
)
valid_preds = model.predict(valid_dataset)
oof_preds[valid_index] = valid_preds.argmax(axis=1)
preds = model.predict(test_dataset)
test_preds += preds/NUM_SPLITS
|
Contradictory, My Dear Watson
|
11,053,603 |
print('Predict submission')
submission = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/sample_submission.csv")
submission.iloc[:,1] = np.floor(np.expm1(blend_models_predict(X_sub)) )<load_from_csv>
|
print(f"Accuracy: {accuracy_score(train['label'], oof_preds)}" )
|
Contradictory, My Dear Watson
|
11,053,603 |
<save_to_csv><EOS>
|
submission = pd.read_csv(".. /input/contradictory-my-dear-watson/sample_submission.csv")
submission['prediction'] = np.argmax(test_preds, axis=1)
submission.to_csv("submission.csv", index=False )
|
Contradictory, My Dear Watson
|
10,971,620 |
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<import_modules>
|
plt.style.use('fivethirtyeight')
warnings.filterwarnings('ignore')
|
Contradictory, My Dear Watson
|
10,971,620 |
sns.__version__<load_from_csv>
|
df_train=pd.read_csv(os.path.join(path,"train.csv"))
df_test=pd.read_csv(os.path.join(path,"test.csv"))
|
Contradictory, My Dear Watson
|
10,971,620 |
df_train = pd.read_csv(".. /input/train.csv")
df_test = pd.read_csv(".. /input/test.csv")
<groupby>
|
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync )
|
Contradictory, My Dear Watson
|
10,971,620 |
df_train.groupby(['Survived','Sex'])['Survived'].count()<count_values>
|
MODEL = 'jplu/tf-xlm-roberta-large'
EPOCHS = 10
MAX_LEN = 96
BATCH_SIZE= 16 * strategy.num_replicas_in_sync
AUTO = tf.data.experimental.AUTOTUNE
tokenizer = AutoTokenizer.from_pretrained(MODEL )
|
Contradictory, My Dear Watson
|
10,971,620 |
print("% of women survived: " , df_train[df_train.Sex == 'female'].Survived.sum() /df_train[df_train.Sex == 'female'].Survived.count())
print("% of men survived: " , df_train[df_train.Sex == 'male'].Survived.sum() /df_train[df_train.Sex == 'male'].Survived.count() )<save_to_csv>
|
def quick_encode(df,maxlen=100):
values = df[['premise','hypothesis']].values.tolist()
tokens=tokenizer.batch_encode_plus(values,max_length=maxlen,pad_to_max_length=True)
return np.array(tokens['input_ids'])
x_train = quick_encode(df_train)
x_test = quick_encode(df_test)
y_train = df_train.label.values
|
Contradictory, My Dear Watson
|
10,971,620 |
<count_values>
|
def create_dist_dataset(X, y,val,batch_size= BATCH_SIZE):
dataset = tf.data.Dataset.from_tensor_slices(( X,y)).shuffle(len(X))
if not val:
dataset = dataset.repeat().batch(batch_size ).prefetch(AUTO)
else:
dataset = dataset.batch(batch_size ).prefetch(AUTO)
return dataset
test_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_test))
.batch(BATCH_SIZE)
)
|
Contradictory, My Dear Watson
|
10,971,620 |
print("% of survivals in")
print("Pclass=1 : ", df_train.Survived[df_train.Pclass == 1].sum() /df_train[df_train.Pclass == 1].Survived.count())
print("Pclass=2 : ", df_train.Survived[df_train.Pclass == 2].sum() /df_train[df_train.Pclass == 2].Survived.count())
print("Pclass=3 : ", df_train.Survived[df_train.Pclass == 3].sum() /df_train[df_train.Pclass == 3].Survived.count() )<save_to_csv>
|
def build_model(transformer,max_len):
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
cls_token = Dropout(0.2 )(cls_token)
cls_token = Dense(32,activation='relu' )(cls_token)
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
|
Contradictory, My Dear Watson
|
10,971,620 |
df_test['Survived'] = 0
df_test.loc[(df_test.Sex == 'female'), 'Survived'] = 1
df_test.loc[(df_test.Sex == 'female')&(df_test.Pclass == 3)&(df_test.Embarked == 'S'), 'Survived'] = 0
<feature_engineering>
|
def build_lrfn(lr_start=0.00001, lr_max=0.00003,
lr_min=0.000001, lr_rampup_epochs=3,
lr_sustain_epochs=0, lr_exp_decay=.6):
lr_max = lr_max * strategy.num_replicas_in_sync
def lrfn(epoch):
if epoch < lr_rampup_epochs:
lr =(lr_max - lr_start)/ lr_rampup_epochs * epoch + lr_start
elif epoch < lr_rampup_epochs + lr_sustain_epochs:
lr = lr_max
else:
lr =(lr_max - lr_min)* lr_exp_decay**(epoch - lr_rampup_epochs - lr_sustain_epochs)+ lr_min
return lr
return lrfn
|
Contradictory, My Dear Watson
|
10,971,620 |
for df in [df_train, df_test]:
df['Age_bin']=np.nan
for i in range(8,0,-1):
df.loc[ df['Age'] <= i*10, 'Age_bin'] = i<feature_engineering>
|
lrfn = build_lrfn()
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=1 )
|
Contradictory, My Dear Watson
|
10,971,620 |
df_test.loc[(df_test.Sex == 'male')&(df_test.Pclass == 1)&(df_test.Age_bin == 1), 'Survived'] = 1
df_test.loc[(df_test.Sex == 'male')&(df_test.Pclass == 2)&(df_test.Age_bin == 1), 'Survived'] = 1<feature_engineering>
|
skf = StratifiedKFold(n_splits=5,shuffle=True,random_state=777)
val_score=[]
history=[]
for fold,(train_ind,valid_ind)in enumerate(skf.split(x_train,y_train)) :
if fold < 4:
print("fold",fold+1)
tf.tpu.experimental.initialize_tpu_system(tpu)
train_data = create_dist_dataset(x_train[train_ind],y_train[train_ind],val=False)
valid_data = create_dist_dataset(x_train[valid_ind],y_train[valid_ind],val=True)
Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"roberta_base.h5", monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='min')
with strategy.scope() :
transformer_layer = TFAutoModel.from_pretrained(MODEL)
model = build_model(transformer_layer, max_len=MAX_LEN)
n_steps = len(train_ind)//BATCH_SIZE
print("training model {} ".format(fold+1))
train_history = model.fit(
train_data,
steps_per_epoch=n_steps,
validation_data=valid_data,
epochs=EPOCHS,callbacks=[Checkpoint],verbose=1)
print("Loading model...")
model.load_weights(f"roberta_base.h5")
print("fold {} validation accuracy {}".format(fold+1,np.mean(train_history.history['val_accuracy'])))
print("fold {} validation loss {}".format(fold+1,np.mean(train_history.history['val_loss'])))
val_score.append(train_history.history['val_accuracy'])
history.append(train_history)
val_score.append(np.mean(train_history.history['val_accuracy']))
print('predict on test.... ')
preds=model.predict(test_dataset,verbose=1)
pred_test+=preds/4
print("Mean Validation accuracy : ",np.mean(val_score))
|
Contradictory, My Dear Watson
|
10,971,620 |
df_test.loc[(df_test.Sex == 'female')&(df_test.SibSp > 7), 'Survived'] = 0<feature_engineering>
|
submission = pd.read_csv(os.path.join(path,'sample_submission.csv'))
submission['prediction'] = np.argmax(pred_test,axis=1)
submission.head()
|
Contradictory, My Dear Watson
|
10,971,620 |
<feature_engineering><EOS>
|
submission.to_csv('submission.csv',index=False )
|
Contradictory, My Dear Watson
|
10,963,423 |
<SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<feature_engineering>
|
!pip install git+https://github.com/ssut/py-googletrans.git
|
Contradictory, My Dear Watson
|
10,963,423 |
df_test.loc[(df_test.Sex == 'male')&(df_test.Fare_bin == 11), 'Survived'] = 1<save_to_csv>
|
import numpy as np
import pandas as pd
from googletrans import Translator
from dask import bag, diagnostics
|
Contradictory, My Dear Watson
|
10,963,423 |
<drop_column>
|
train = pd.read_csv('.. /input/contradictory-my-dear-watson/train.csv', index_col=['id'])
display(train, train.lang_abv.value_counts() )
|
Contradictory, My Dear Watson
|
10,963,423 |
df_test.drop(['Survived'],axis=1,inplace=True )<create_dataframe>
|
def translate(words, dest):
dest_choices = ['zh-cn',
'ar',
'fr',
'sw',
'ur',
'vi',
'ru',
'hi',
'el',
'th',
'es',
'de',
'tr',
'bg'
]
if not dest:
dest = np.random.choice(dest_choices)
translator = Translator()
decoded = translator.translate(words, dest=dest ).text
return decoded
def trans_parallel(df, dest):
premise_bag = bag.from_sequence(df.premise.tolist() ).map(translate, dest)
hypo_bag = bag.from_sequence(df.hypothesis.tolist() ).map(translate, dest)
with diagnostics.ProgressBar() :
premises = premise_bag.compute()
hypos = hypo_bag.compute()
df[['premise', 'hypothesis']] = list(zip(premises, hypos))
return df
eng = train.loc[train.lang_abv == "en"].copy() \
.pipe(trans_parallel, dest=None)
non_eng = train.loc[train.lang_abv != "en"].copy() \
.pipe(trans_parallel, dest='en')
train = train.append([eng, non_eng])
train.shape
|
Contradictory, My Dear Watson
|
10,963,423 |
df_train_ml = df_train.copy()
df_test_ml = df_test.copy()<categorify>
|
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv', index_col=['id'])
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv',
index_col=['id'] )
|
Contradictory, My Dear Watson
|
10,963,423 |
df_train_ml = pd.get_dummies(df_train_ml, columns=['Sex', 'Embarked', 'Pclass'], drop_first=True)
df_train_ml.drop(['PassengerId','Name','Ticket', 'Cabin', 'Age_bin', 'Fare_bin'],axis=1,inplace=True)
df_train_ml.dropna(inplace=True )<categorify>
|
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
import transformers
from transformers import TFAutoModel, AutoTokenizer
|
Contradictory, My Dear Watson
|
10,963,423 |
passenger_id = df_test_ml['PassengerId']
df_test_ml = pd.get_dummies(df_test_ml, columns=['Sex', 'Embarked', 'Pclass'], drop_first=True)
df_test_ml.drop(['PassengerId','Name','Ticket', 'Cabin', 'Age_bin', 'Fare_bin'],axis=1,inplace=True )<normalization>
|
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync )
|
Contradictory, My Dear Watson
|
10,963,423 |
scaler = StandardScaler()
scaler.fit(df_train_ml.drop('Survived',axis=1))
scaled_features = scaler.transform(df_train_ml.drop('Survived',axis=1))
df_train_ml_sc = pd.DataFrame(scaled_features, columns=df_train_ml.columns[:-1])
df_test_ml.fillna(df_test_ml.mean() , inplace=True)
scaled_features = scaler.transform(df_test_ml)
df_test_ml_sc = pd.DataFrame(scaled_features, columns=df_test_ml.columns )<split>
|
model_name = 'jplu/tf-xlm-roberta-large'
n_epochs = 8
max_len = 80
batch_size = 16 * strategy.num_replicas_in_sync
|
Contradictory, My Dear Watson
|
10,963,423 |
X_train, X_test, y_train, y_test = train_test_split(df_train_ml.drop('Survived',axis=1), df_train_ml['Survived'], test_size=0.30, random_state=101)
X_train_sc, X_test_sc, y_train_sc, y_test_sc = train_test_split(df_train_ml_sc, df_train_ml['Survived'], test_size=0.30, random_state=101 )<prepare_x_and_y>
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
train_text = train[['premise', 'hypothesis']].values.tolist()
test_text = test[['premise', 'hypothesis']].values.tolist()
train_encoded = tokenizer.batch_encode_plus(
train_text,
pad_to_max_length=True,
max_length=max_len
)
test_encoded = tokenizer.batch_encode_plus(
test_text,
pad_to_max_length=True,
max_length=max_len
)
x_train, x_valid, y_train, y_valid = train_test_split(
train_encoded['input_ids'], train.label.values,
test_size=0.2, random_state=2020
)
x_test = test_encoded['input_ids']
|
Contradictory, My Dear Watson
|
10,963,423 |
X_train_all = df_train_ml.drop('Survived',axis=1)
y_train_all = df_train_ml['Survived']
X_test_all = df_test_ml
X_train_all_sc = df_train_ml_sc
y_train_all_sc = df_train_ml['Survived']
X_test_all_sc = df_test_ml_sc<correct_missing_values>
|
auto = tf.data.experimental.AUTOTUNE
train_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
)
|
Contradictory, My Dear Watson
|
10,963,423 |
X_test_all.fillna(X_test_all.mean() , inplace=True)
print("*" )<import_modules>
|
with strategy.scope() :
transformer_encoder = TFAutoModel.from_pretrained(model_name)
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer_encoder(input_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
|
Contradictory, My Dear Watson
|
10,963,423 |
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix<compute_train_metric>
|
n_steps = len(x_train)// batch_size
train_history = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=n_epochs
)
|
Contradictory, My Dear Watson
|
10,963,423 |
logreg = LogisticRegression()
logreg.fit(X_train,y_train)
pred_logreg = logreg.predict(X_test)
print(confusion_matrix(y_test, pred_logreg))
print(classification_report(y_test, pred_logreg))
print(accuracy_score(y_test, pred_logreg))<predict_on_test>
|
train_history = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=1
)
|
Contradictory, My Dear Watson
|
10,963,423 |
logreg.fit(X_train_all, y_train_all)
pred_all_logreg = logreg.predict(X_test_all )<create_dataframe>
|
test_preds = model.predict(test_dataset, verbose=1)
submission['prediction'] = test_preds.argmax(axis=1)
submission.to_csv("submission.csv" )
|
Contradictory, My Dear Watson
|
14,631,065 |
sub_logreg = pd.DataFrame()
sub_logreg['PassengerId'] = df_test['PassengerId']
sub_logreg['Survived'] = pred_all_logreg
<compute_train_metric>
|
os.environ["WANDB_API_KEY"] = "0"
np.random.seed(0 )
|
Contradictory, My Dear Watson
|
14,631,065 |
gnb=GaussianNB()
gnb.fit(X_train,y_train)
pred_gnb = gnb.predict(X_test)
print(confusion_matrix(y_test, pred_gnb))
print(classification_report(y_test, pred_gnb))
print(accuracy_score(y_test, pred_gnb))<train_model>
|
train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv")
train.head()
|
Contradictory, My Dear Watson
|
14,631,065 |
knn = KNeighborsClassifier(n_neighbors=20)
knn.fit(X_train_sc,y_train_sc )<predict_on_test>
|
missing_values_count = train.isnull().sum()
print("Number of missing data points per column:
")
print(missing_values_count )
|
Contradictory, My Dear Watson
|
14,631,065 |
pred_knn = knn.predict(X_test)
print(confusion_matrix(y_test, pred_knn))
print(classification_report(y_test, pred_knn))
print(accuracy_score(y_test, pred_knn))<predict_on_test>
|
print("Number of training data rows: {}
".format(train.shape[0]))
print("Number of training data columns: {}
".format(train.shape[1]))
|
Contradictory, My Dear Watson
|
14,631,065 |
knn.fit(X_train_all, y_train_all)
pred_all_knn = knn.predict(X_test_all )<create_dataframe>
|
train.language.unique()
train.language.value_counts()
|
Contradictory, My Dear Watson
|
14,631,065 |
sub_knn = pd.DataFrame()
sub_knn['PassengerId'] = df_test['PassengerId']
sub_knn['Survived'] = pred_all_knn
<train_model>
|
train, validation = train_test_split(train, stratify=train.label.values,
random_state=42,
test_size=0.2, shuffle=True )
|
Contradictory, My Dear Watson
|
14,631,065 |
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train )<predict_on_test>
|
print("Train data: {}
".format(train.shape))
print("Validation data: {}
".format(validation.shape))
|
Contradictory, My Dear Watson
|
14,631,065 |
pred_dtree = dtree.predict(X_test)
print(classification_report(y_test,pred_dtree))
print(accuracy_score(y_test, pred_dtree))<predict_on_test>
|
model_name = 'bert-base-multilingual-cased'
tokenizer = BertTokenizer.from_pretrained(model_name)
save_path = '.'
if not os.path.exists(save_path):
os.makedirs(save_path)
tokenizer.save_pretrained(save_path)
tokenizer = BertWordPieceTokenizer("vocab.txt", lowercase=False, strip_accents=False)
tokenizer
|
Contradictory, My Dear Watson
|
14,631,065 |
dtree_2 = DecisionTreeClassifier(max_features=7 , max_depth=6, min_samples_split=8)
dtree_2.fit(X_train,y_train)
pred_dtree_2 = dtree_2.predict(X_test)
print(classification_report(y_test, pred_dtree_2))
print(accuracy_score(y_test, pred_dtree_2))<predict_on_test>
|
tokenized_premise = tokenizer.encode_batch(train.premise.values.tolist())
train['premise_seq_length'] = [len(encoding.tokens)for encoding in tokenized_premise]
tokenized_hypothesis = tokenizer.encode_batch(train.hypothesis.values.tolist())
train['hypothesis_seq_length'] = [len(encoding.tokens)for encoding in tokenized_hypothesis]
info_per_lang = train.groupby('language' ).agg({'premise_seq_length': ['mean', 'max', 'count'], 'hypothesis_seq_length': ['mean', 'max', 'count']})
print(info_per_lang )
|
Contradictory, My Dear Watson
|
14,631,065 |
dtree_2.fit(X_train_all, y_train_all)
pred_all_dtree2 = dtree_2.predict(X_test_all )<train_model>
|
EPOCHS = 3
BATCH_SIZE = 64
MAX_LEN = 100
PATIENCE = 1
LEARNING_RATE = 1e-5
|
Contradictory, My Dear Watson
|
14,631,065 |
rfc = RandomForestClassifier(max_depth=6, max_features=7)
rfc.fit(X_train, y_train )<predict_on_test>
|
def encode(df, tokenizer, max_len=50):
pairs = df[['premise','hypothesis']].values.tolist()
tokenizer.enable_truncation(max_len)
tokenizer.enable_padding()
print("Encoding...")
enc_list = tokenizer.encode_batch(pairs)
print("Complete")
input_word_ids = tf.ragged.constant([enc.ids for enc in enc_list], dtype=tf.int32)
input_mask = tf.ragged.constant([enc.attention_mask for enc in enc_list], dtype=tf.int32)
input_type_ids = tf.ragged.constant([enc.type_ids for enc in enc_list], dtype=tf.int32)
inputs = {
'input_word_ids': input_word_ids.to_tensor() ,
'input_mask': input_mask.to_tensor() ,
'input_type_ids': input_type_ids.to_tensor() }
return inputs
|
Contradictory, My Dear Watson
|
14,631,065 |
pred_rfc = rfc.predict(X_test)
print(confusion_matrix(y_test, pred_rfc))
print(classification_report(y_test, pred_rfc))
print(accuracy_score(y_test, pred_rfc))<predict_on_test>
|
train_input = encode(train, tokenizer=tokenizer, max_len=MAX_LEN )
|
Contradictory, My Dear Watson
|
14,631,065 |
rfc.fit(X_train_all, y_train_all)
pred_all_rfc = rfc.predict(X_test_all )<save_to_csv>
|
validation_input = encode(validation, tokenizer=tokenizer, max_len=MAX_LEN )
|
Contradictory, My Dear Watson
|
14,631,065 |
sub_rfc = pd.DataFrame()
sub_rfc['PassengerId'] = df_test['PassengerId']
sub_rfc['Survived'] = pred_all_rfc
<train_model>
|
def build_model(model_name, max_len=50):
tf.random.set_seed(1234)
bert_encoder = TFBertModel.from_pretrained(model_name)
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
input_type_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_type_ids")
embedding = bert_encoder([input_word_ids, input_mask, input_type_ids])[0]
cls_vector = embedding[:,0,:]
output = tf.keras.layers.Dense(3, activation='softmax' )(cls_vector)
model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=output)
model.compile(tf.keras.optimizers.Adam(lr=LEARNING_RATE), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
|
Contradictory, My Dear Watson
|
14,631,065 |
svc = SVC(gamma = 0.01, C = 100)
svc.fit(X_train_sc, y_train_sc )<predict_on_test>
|
checkpoint_filepath='bert_best_checkpoint.hdf5'
callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE), ModelCheckpoint(filepath=checkpoint_filepath, save_best_only=True, save_weights_only=True, monitor='val_accuracy', mode='max', verbose=1)]
train_history = model.fit(x=train_input, y=train.label.values, validation_data=(validation_input, validation.label.values), epochs=EPOCHS, verbose=1, batch_size=BATCH_SIZE, callbacks=callbacks )
|
Contradictory, My Dear Watson
|
14,631,065 |
pred_svc = svc.predict(X_test_sc)
print(confusion_matrix(y_test_sc, pred_svc))
print(classification_report(y_test_sc, pred_svc))
print(accuracy_score(y_test_sc, pred_svc))<save_to_csv>
|
del model
|
Contradictory, My Dear Watson
|
14,631,065 |
svc.fit(X_train_all_sc, y_train_all_sc)
pred_all_svc = svc.predict(X_test_all_sc)
sub_svc = pd.DataFrame()
sub_svc['PassengerId'] = df_test['PassengerId']
sub_svc['Survived'] = pred_all_svc
sub_svc.to_csv('svc.csv',index=False )<import_modules>
|
K.clear_session()
|
Contradictory, My Dear Watson
|
14,631,065 |
from sklearn.model_selection import cross_val_score<compute_train_metric>
|
PRETRAINED_MODEL_TYPES = {
'xlmroberta':(XLMRobertaConfig, TFXLMRobertaModel, XLMRobertaTokenizer, 'jplu/tf-xlm-roberta-large')
}
config_class, model_class, tokenizer_class, model_name = PRETRAINED_MODEL_TYPES['xlmroberta']
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer
|
Contradictory, My Dear Watson
|
14,631,065 |
scores_svc = cross_val_score(svc, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_svc)
print(scores_svc.mean() )<compute_train_metric>
|
def encode(df, tokenizer, max_len=50):
pairs = df[['premise','hypothesis']].values.tolist()
print("Encoding...")
encoded_dict = tokenizer.batch_encode_plus(pairs, max_length=max_len, padding=True, truncation=True,
add_special_tokens=True, return_attention_mask=True)
print("Complete")
input_word_ids = tf.convert_to_tensor(encoded_dict['input_ids'], dtype=tf.int32)
input_mask = tf.convert_to_tensor(encoded_dict['attention_mask'], dtype=tf.int32)
inputs = {
'input_word_ids': input_word_ids,
'input_mask': input_mask}
return inputs
|
Contradictory, My Dear Watson
|
14,631,065 |
scores_rfc = cross_val_score(rfc, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_rfc)
print(scores_rfc.mean() )<compute_train_metric>
|
train_input = encode(train, tokenizer=tokenizer, max_len=MAX_LEN )
|
Contradictory, My Dear Watson
|
14,631,065 |
scores_dtree_2 = cross_val_score(dtree_2, X_train_all_sc, y_train_all_sc, cv=10, scoring='accuracy')
print(scores_dtree_2)
print(scores_dtree_2.mean() )<compute_test_metric>
|
validation_input = encode(validation, tokenizer=tokenizer, max_len=MAX_LEN )
|
Contradictory, My Dear Watson
|
14,631,065 |
print("dtree_2 : " , scores_dtree_2.mean())
print("rfc : " , scores_rfc.mean())
print("svc : " , scores_svc.mean() )<import_modules>
|
def build_model(max_len=50):
tf.random.set_seed(1234)
encoder = model_class.from_pretrained(model_name)
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
embedding = encoder([input_word_ids, input_mask])[0]
sequence_output = embedding[:,0,:]
output = tf.keras.layers.Dense(3, activation="softmax" )(sequence_output)
model = tf.keras.Model(inputs=[input_word_ids, input_mask], outputs=output)
model.compile(tf.keras.optimizers.Adam(lr=LEARNING_RATE), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
|
Contradictory, My Dear Watson
|
14,631,065 |
import numpy as np
import pandas as pd
import time
from datetime import datetime
from sklearn.model_selection import train_test_split
from google.cloud import storage
from google.cloud import automl_v1beta1 as automl
from google.api_core.gapic_v1.client_info import ClientInfo
from automlwrapper import AutoMLWrapper<choose_model_class>
|
checkpoint_filepath='xlmroberta_best_checkpoint.hdf5'
callbacks = [EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE), ModelCheckpoint(filepath=checkpoint_filepath, save_best_only=True, save_weights_only=True, monitor='val_accuracy', mode='max', verbose=1)]
train_history = model.fit(x=train_input, y=train.label.values, validation_data=(validation_input, validation.label.values), epochs=EPOCHS, verbose=1, batch_size=BATCH_SIZE, callbacks=callbacks )
|
Contradictory, My Dear Watson
|
14,631,065 |
PROJECT_ID = 'cloudml-demo'
bucket_name = 'cloudml-demo-lcm'
region = 'us-central1'
dataset_display_name = 'kaggle_tweets'
model_display_name = 'kaggle_starter_model1'
storage_client = storage.Client(project=PROJECT_ID)
client = automl.AutoMlClient(client_info=ClientInfo())
print(f'Starting AutoML notebook at {datetime.fromtimestamp(time.time() ).strftime("%Y-%m-%d, %H:%M:%S UTC")}' )<load_from_csv>
|
def accuracy(x):
return round(float(x[2]/x[1]), 2)*100
validation['predictions'] = validation_predictions
lang_counts = validation.language.value_counts().sort_index()
tp_per_lang = validation[validation['label'] == validation['predictions']].groupby('language' ).agg({'language': ['count']} ).sort_index()
lang_names = lang_counts.index.tolist()
lang_tuples = list(zip(lang_names, lang_counts.values.tolist() , tp_per_lang.iloc[:, 0].values.tolist()))
acc = map(accuracy, lang_tuples)
for i, score in enumerate(acc):
print("Accuracy of {} is {} ".format(lang_tuples[i][0], score))
|
Contradictory, My Dear Watson
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.