kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
12,266,838 |
HOWMANYITERS = 10<categorify>
|
concatenate, Dropout, Lambda, MaxPooling2D, BatchNormalization
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
def GP_deap(evolved_train):
global HOWMANYITERS
outputs = evolved_train['Survived'].values.tolist()
evolved_train = evolved_train.drop(["Survived","PassengerId"],axis=1)
inputs = evolved_train.values.tolist()
def protectedDiv(left, right):
try:
return left / right
except ZeroDivisionError:
return 1
def randomString(stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters)for i in range(stringLength))
pset = gp.PrimitiveSet("MAIN", len(evolved_train.columns))
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(protectedDiv, 2)
pset.addPrimitive(math.cos, 1)
pset.addPrimitive(math.sin, 1)
pset.addPrimitive(math.tanh,1)
pset.addPrimitive(max, 2)
pset.addPrimitive(min, 2)
pset.addEphemeralConstant(randomString() , lambda: random.uniform(-10,10))
pset.renameArguments(ARG0='x1')
pset.renameArguments(ARG1='x2')
pset.renameArguments(ARG2='x3')
pset.renameArguments(ARG3='x4')
pset.renameArguments(ARG4='x5')
pset.renameArguments(ARG5='x6')
pset.renameArguments(ARG6='x7')
pset.renameArguments(ARG7='x8')
pset.renameArguments(ARG8='x9')
pset.renameArguments(ARG9='x10')
pset.renameArguments(ARG10='x11')
pset.renameArguments(ARG11='x12')
pset.renameArguments(ARG12='x13')
pset.renameArguments(ARG13='x14')
pset.renameArguments(ARG14='x15')
pset.renameArguments(ARG15='x16')
pset.renameArguments(ARG16='x17')
pset.renameArguments(ARG17='x18')
pset.renameArguments(ARG18='x19')
pset.renameArguments(ARG19='x20')
pset.renameArguments(ARG20='x21')
pset.renameArguments(ARG21='x22')
pset.renameArguments(ARG22='x23')
pset.renameArguments(ARG23='x24')
pset.renameArguments(ARG24='x25')
pset.renameArguments(ARG25='x26')
pset.renameArguments(ARG26='x27')
pset.renameArguments(ARG27='x28')
pset.renameArguments(ARG28='x29')
pset.renameArguments(ARG29='x30')
pset.renameArguments(ARG30='x31')
pset.renameArguments(ARG31='x32')
pset.renameArguments(ARG32='x33')
pset.renameArguments(ARG33='x34')
pset.renameArguments(ARG34='x35')
pset.renameArguments(ARG35='x36')
pset.renameArguments(ARG36='x37')
pset.renameArguments(ARG37='x38')
pset.renameArguments(ARG38='x39')
pset.renameArguments(ARG39='x40')
pset.renameArguments(ARG40='x41')
pset.renameArguments(ARG41='x42')
pset.renameArguments(ARG42='x43')
pset.renameArguments(ARG43='x44')
pset.renameArguments(ARG44='x45')
pset.renameArguments(ARG45='x46')
pset.renameArguments(ARG46='x47')
pset.renameArguments(ARG47='x48')
pset.renameArguments(ARG48='x49')
pset.renameArguments(ARG49='x50')
creator.create("FitnessMin", base.Fitness, weights=(1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=3)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
def evalSymbReg(individual):
func = toolbox.compile(expr=individual)
return math.fsum(np.round(1.-(1./(1.+np.exp(-func(*in_)))))== out for in_, out in zip(inputs, outputs)) / len(evolved_train),
toolbox.register("evaluate", evalSymbReg)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=3)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
stats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.7, mutpb=0.3, ngen=HOWMANYITERS, stats=stats,
halloffame=hof, verbose=True)
func2 = toolbox.compile(expr=hof[0])
return func2
def manualtree(df):
Model = pd.DataFrame(data = {'manual_tree':[]})
male_title = ['Master']
for index, row in df.iterrows() :
Model.loc[index, 'manual_tree'] = 0
if(df.loc[index, 'Sex'] == 'female'):
Model.loc[index, 'manual_tree'] = 1
if(( df.loc[index, 'Sex'] == 'female')&
(df.loc[index, 'Pclass'] == 3)&
(df.loc[index, 'Embarked'] == 'S')&
(df.loc[index, 'Fare'] > 8)
):
Model.loc[index, 'manual_tree'] = 0
if(( df.loc[index, 'Sex'] == 'male')&
(df.loc[index, 'Title'] == 3)
):
Model.loc[index, 'manual_tree'] = 1
return Model
def MungeData(data):
title_list = [
'Dr', 'Mr', 'Master',
'Miss', 'Major', 'Rev',
'Mrs', 'Ms', 'Mlle','Col',
'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer'
]
def replace_names_titles(x):
for title in title_list:
if title in x:
return title
data['Title'] = data.Name.apply(replace_names_titles)
data['Title'] = data['Title'].replace(['Miss', 'Mrs','Ms', 'Mlle', 'Lady', 'Mme', 'the Countess', 'Dona'], 'Miss/Mrs/Ms')
data['Title'] = data['Title'].replace(['Dr', 'Col', 'Major', 'Jonkheer', 'Capt', 'Sir', 'Don', 'Rev'], 'Dr/Military/Noble/Clergy')
data['Title'] = data.Title.map({ 'Dr':1, 'Mr':2, 'Master':3, 'Miss':4, 'Major':5, 'Rev':6, 'Mrs':7, 'Ms':8, 'Mlle':9,
'Col':10, 'Capt':11, 'Mme':12, 'Countess':13, 'Don': 14, 'Jonkheer':15
})
data = data.drop(['Name'],axis = 1)
data.Title.fillna(0, inplace=True)
data['Is_Married'] = 0
data['Is_Married'].loc[data['Title'] == 7] = 1
data["manual_tree"] = manualtree(data)
data['Age'] = data.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median()))
data['Relatives'] = data.SibSp + data.Parch
data['Fare_per_person'] = data.Fare / np.mean(data.SibSp + data.Parch + 1)
med_fare = data.groupby(['Pclass', 'Parch', 'SibSp'] ).Fare.median() [3][0][0]
data = data.drop(['SibSp', 'Parch'], axis=1)
data['Fare'] = data['Fare'].fillna(med_fare)
data.Sex.fillna('0', inplace=True)
data.loc[data.Sex != 'male', 'Sex'] = 0
data.loc[data.Sex == 'male', 'Sex'] = 1
data['Ticket_Frequency'] = data.groupby('Ticket')['Ticket'].transform('count')
data = data.drop(['Ticket'], axis=1)
data.Cabin.fillna('0', inplace=True)
data.loc[data.Cabin.str[0] == 'A', 'Cabin'] = 1
data.loc[data.Cabin.str[0] == 'B', 'Cabin'] = 1
data.loc[data.Cabin.str[0] == 'C', 'Cabin'] = 1
data.loc[data.Cabin.str[0] == 'D', 'Cabin'] = 2
data.loc[data.Cabin.str[0] == 'E', 'Cabin'] = 2
data.loc[data.Cabin.str[0] == 'F', 'Cabin'] = 3
data.loc[data.Cabin.str[0] == 'G', 'Cabin'] = 3
data.loc[data.Cabin.str[0] == 'T', 'Cabin'] = 3
data.loc[data.Embarked == 'C', 'Embarked'] = 1
data.loc[data.Embarked == 'Q', 'Embarked'] = 2
data.loc[data.Embarked == 'S', 'Embarked'] = 3
data.Embarked.fillna(3, inplace=True)
data["Cabin"] = data["Cabin"].astype(int)
numeric_features = ['Relatives','Fare_per_person', 'Fare', 'Age','Ticket_Frequency']
for feature in numeric_features:
x = data[feature].values
min_max_scaler = MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x.reshape(-1, 1))
data[feature] = pd.DataFrame(x_scaled)
cat_features = ['Pclass','Embarked', 'Sex', 'Cabin', 'Title','manual_tree','Is_Married']
encoded_features = []
for feature in cat_features:
encoded_feat = OneHotEncoder().fit_transform(data[feature].values.reshape(-1, 1)).toarray()
n = data[feature].nunique()
cols = ['{}_{}'.format(feature, n)for n in range(1, n + 1)]
encoded_df = pd.DataFrame(encoded_feat, columns=cols)
encoded_df.index = data.index
encoded_features.append(encoded_df)
data = pd.concat([data, *encoded_features], axis=1)
return data.astype(float)
<load_from_csv>
|
K.image_data_format()
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
raw_train = pd.read_csv('.. /input/titanic/train.csv')
raw_test = pd.read_csv('.. /input/titanic/test.csv')
pass_id_train = raw_train["PassengerId"]
survived_train = raw_train["Survived"]
pass_id_test = raw_test["PassengerId"]
evolved_train = MungeData(raw_train)
evolved_test = MungeData(raw_test )<load_pretrained>
|
Conway's Reverse Game of Life 2020
|
|
12,266,838 |
GeneticFunctionObject = GP_deap(evolved_train)
with open("geneticfunction.pickle","wb")as file:
pickle.dump(GeneticFunction,file )<save_to_csv>
|
trainY = single_step_df.loc[:, single_step_df.columns.str.startswith('start')].values.reshape(-1, 25, 25)
trainX = single_step_df.loc[:, single_step_df.columns.str.startswith('stop')].values.reshape(-1, 25, 25 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
evolved_train = evolved_train.drop(["PassengerId","Survived"],axis=1)
train_nparray = evolved_train.values.tolist()
trainPredictions = Outputs(np.array([GeneticFunctionObject(*x)for x in train_nparray]))
print("Your score based on Train set(Remember, Kaggle/Test set score will be different):")
print(accuracy_score(survived_train.astype(int),trainPredictions.astype(int)))
pd_train = pd.DataFrame({'PassengerId': pass_id_train.astype(int),
'Predicted': trainPredictions.astype(int),
'Survived': survived_train.astype(int)})
pd_train.to_csv('gptrain_yourgenalgo.csv', index=False)
evoled_test = evolved_test.drop(["PassengerId"],axis=1)
test_nparray = evolved_test.values.tolist()
testPredictions = Outputs(np.array([GeneticFunctionObject(*x)for x in test_nparray]))
pd_test = pd.DataFrame({'PassengerId': pass_id_test.astype(int),
'Survived': testPredictions.astype(int)})
pd_test.to_csv('submission_yourgenalgo.csv', index=False)
<import_modules>
|
trainX = np.array([preprocess(xi)for xi in trainX] ).astype(np.float32)
trainX = np.expand_dims(trainX, axis=-1 ).astype(np.float32 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
print(f'Using PyTorch v{torch.__version__}' )<load_from_csv>
|
trainY = np.array([preprocess(xi)for xi in trainY] ).astype(np.float32)
trainY = np.expand_dims(trainY, axis=-1 ).astype(np.float32 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
train = pd.read_csv(".. /input/digit-recognizer/train.csv" )<define_variables>
|
def life_step_1_tensor(X):
nbrs_count = tf.stack([tf.roll(tf.roll(X, i, 0), j, 1)
for i in(-1, 0, 1)for j in(-1, 0, 1)
if(i != 0 or j != 0)])
nbrs_count = tf.squeeze(K.sum(nbrs_count, axis=0, keepdims=True), axis=0)
nbrs_count =(nbrs_count == 3)|(( X == 1)&(nbrs_count == 2))
return tf.cast(nbrs_count, dtype=tf.uint32 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
transform = transforms.Compose([
transforms.ToTensor() ,
transforms.Normalize(( 0.5,),(0.5,))
])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_digits(df):
labels = []
start_inx = 0
if 'label' in df.columns:
labels = [v for v in df.label.values]
start_inx = 1
digits = []
for i in range(df.pixel0.size):
digit = df.iloc[i].astype(float ).values[start_inx:]
digit = np.reshape(digit,(28,28))
digit = transform(digit ).type('torch.FloatTensor')
if len(labels)> 0:
digits.append([digit, labels[i]])
else:
digits.append(digit)
return digits<init_hyperparams>
|
def postprocess_tensor(arr):
return arr[4:-4, 4:-4,:]
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
train_X = get_digits(train)
num_workers = 0
batch_size = 64
valid_size = 0.2
num_train = len(train_X)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(train_X, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_X, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
dataiter = iter(train_loader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape )<init_hyperparams>
|
def wrap_pad(t, extra_dims):
s = tf.shape(t)
m = tf.constant([extra_dims[0], extra_dims[1]])
d = tf.constant([1, 3, 3, s.numpy() [-1]])
t = tf.tile(t, d)[:, s[1]-m[0]:m[0]-s[1], s[2]-m[1]:m[1]-s[2], :]
paddings = tf.constant([[0,0],[1, 1],[1, 1], [0,0]])
t = tf.pad(t, paddings, 'CONSTANT')
return t
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
def calc_out(in_layers, stride, padding, kernel_size, pool_stride):
return int(( 1+(in_layers - kernel_size +(2*padding)) /stride)/pool_stride)
class Net(nn.Module):
def __init__(self):
super(Net, self ).__init__()
inputs = [1,32,64,64]
kernel_size = [5,5,3]
stride = [1,1,1]
pool_stride = [2,2,2]
layers = []
self.out = 28
self.depth = inputs[-1]
for i in range(len(kernel_size)) :
padding = int(kernel_size[i]/2)
self.out = calc_out(self.out, stride[i], padding,
kernel_size[i], pool_stride[i])
layers.append(nn.Conv2d(inputs[i], inputs[i+1], kernel_size[i],
stride=stride[i], padding=padding))
layers.append(nn.ReLU())
layers.append(nn.Conv2d(inputs[i+1], inputs[i+1], kernel_size[i],
stride=stride[i], padding=padding))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(pool_stride[i],pool_stride[i]))
layers.append(nn.Dropout(p=0.2))
self.cnn_layers = nn.Sequential(*layers)
print(self.depth*self.out*self.out)
layers2 = []
layers2.append(nn.Dropout(p=0.2))
layers2.append(nn.Linear(self.depth*self.out*self.out, 512))
layers2.append(nn.Dropout(p=0.2))
layers2.append(nn.Linear(512, 256))
layers2.append(nn.Dropout(p=0.2))
layers2.append(nn.Linear(256, 256))
layers2.append(nn.Dropout(p=0.2))
layers2.append(nn.Linear(256, 10))
self.fc_layers = nn.Sequential(*layers2)
def forward(self, x):
x = self.cnn_layers(x)
x = x.view(-1, self.depth*self.out*self.out)
x = self.fc_layers(x)
return x
model = Net()
model<choose_model_class>
|
t = trainY[0, :, :, :]
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters() , lr=0.0005 )<train_model>
|
def custom_loss(y_actual,y_pred):
pred_fwd = y_pred
actual_fwd = y_actual
y_actual_f = tf.cast(K.flatten(actual_fwd), tf.float32)
y_pred_f = tf.cast(K.flatten(pred_fwd), tf.float32)
bce = BinaryCrossentropy(from_logits=False)
return bce(y_actual_f, y_pred_f )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
n_epochs = 25
valid_loss_min = np.Inf
print(device)
model.to(device)
tLoss, vLoss = [], []
for epoch in range(n_epochs):
train_loss = 0.0
valid_loss = 0.0
model.train()
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item() *data.size(0)
model.eval()
for data, target in valid_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = criterion(output, target)
valid_loss += loss.item() *data.size(0)
train_loss = train_loss/len(train_loader.dataset)
valid_loss = valid_loss/len(valid_loader.dataset)
tLoss.append(train_loss)
vLoss.append(valid_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
if valid_loss <= valid_loss_min:
print('Validation loss decreased({:.6f} --> {:.6f} ).Saving model...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict() , 'model_cifar.pt')
valid_loss_min = valid_loss<load_from_disk>
|
def crglnet_v1() :
inputShape =(33, 33, 1)
inputs = Input(inputShape)
c1 = Conv2D(64,(3, 3), activation='elu', padding='same' )(inputs)
c2 = Conv2D(64,(3, 3), activation='elu', padding='same' )(c1)
c3 = Conv2D(128,(5, 5), activation='elu', padding='same' )(c2)
c4 = Conv2D(64,(3, 3), activation='elu', padding='same' )(c3)
c5 = Conv2D(64,(3, 3), activation='elu', padding='same' )(c4)
c6 = Conv2D(1,(1, 1), activation='sigmoid' )(c5)
model = Model(inputs=[inputs], outputs=[c6])
return model
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
model.load_state_dict(torch.load('model_cifar.pt')) ;<find_best_params>
|
def crglnet_v2() :
inputShape =(33, 33, 1)
inputs = Input(inputShape)
c1 = Conv2D(32,(3, 3), activation='elu', padding='same' )(inputs)
c2 = Conv2D(64,(3, 3), activation='elu', padding='same' )(c1)
c3 = Conv2D(128,(5, 5), activation='elu', padding='same' )(c2)
c4 = concatenate([Conv2D(64,(3, 3), activation='elu', padding='same' )(c3), c2])
c5 = concatenate([Conv2D(32,(3, 3), activation='elu', padding='same' )(c4), c1])
c6 = Conv2D(64,(3, 3), activation='elu', padding='same' )(c5)
c7 = Conv2D(32,(3, 3), activation='elu', padding='same' )(c6)
c8 = Conv2D(1,(1, 1), activation='sigmoid' )(c7)
model = Model(inputs=[inputs], outputs=[c8])
return model
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
test_loss = 0.0
class_correct = [0]*10
class_total = [0]*10
model.eval()
conf_matrix = np.zeros(( 10,10))
for data, target in valid_loader:
data = data.to(device)
target = target.to(device)
output = model(data)
loss = criterion(output, target)
test_loss += loss.item() *data.size(0)
_, pred = torch.max(output, 1)
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy())if device == "cpu" else np.squeeze(correct_tensor.cpu().numpy())
for i in range(target.size(0)) :
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
conf_matrix[label][pred.data[i]] += 1
test_loss = test_loss/len(valid_loader.dataset)
print('Test Loss: {:.6f}
'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %3s: %2d%%(%2d/%2d)' %(
i, 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %3s: N/A(no training examples)' %(classes[i]))
print('
Test Accuracy(Overall): %2d%%(%2d/%2d)' %(
100.* np.sum(class_correct)/ np.sum(class_total),
np.sum(class_correct), np.sum(class_total)) )<load_from_csv>
|
model = crglnet_v2()
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
test_X = get_digits(test)
test_loader = torch.utils.data.DataLoader(test_X, batch_size=batch_size,
num_workers=num_workers )<prepare_output>
|
opt = Adam(lr=1e-3)
model.compile(optimizer=opt, loss=custom_loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2)] )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
ImageId, Label = [],[]
for data in test_loader:
data = data.to(device)
output = model(data)
_, pred = torch.max(output, 1)
for i in range(len(pred)) :
ImageId.append(len(ImageId)+1)
Label.append(pred[i].cpu().numpy())
sub = pd.DataFrame(data={'ImageId':ImageId, 'Label':Label})
sub.describe<save_to_csv>
|
from sklearn.model_selection import train_test_split
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
sub.to_csv("submission.csv", index=False )<install_modules>
|
from sklearn.model_selection import train_test_split
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
!pip install autofeat<set_options>
|
train_x, test_x, train_y, test_y = train_test_split(trainX, trainY, test_size=0.2, random_state=42 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
warnings.filterwarnings("ignore")
pd.set_option('max_columns', 100 )<load_from_csv>
|
def step(train_x, true_y):
loss = []
with tf.GradientTape() as tape:
pred_y = model(train_x)
pred_y = tf.cast(pred_y,tf.float32)
model_loss = custom_loss(true_y, pred_y)
loss.append(model_loss.numpy())
model_gradients = tape.gradient(model_loss, model.trainable_variables)
opt.apply_gradients(zip(model_gradients, model.trainable_variables))
return np.mean(loss )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
traindf = pd.read_csv('.. /input/titanic/train.csv' ).set_index('PassengerId')
testdf = pd.read_csv('.. /input/titanic/test.csv' ).set_index('PassengerId')
df = pd.concat([traindf, testdf], axis=0, sort=False )<feature_engineering>
|
epochs = 20
batch_size = 32
bat_per_epoch = math.floor(len(train_x)/ batch_size)
epoch_loss = []
for epoch in tqdm(range(epochs)) :
step_loss = []
for i in tqdm(range(bat_per_epoch)) :
n = i*batch_size
step_loss.append(step(train_x[n:n+batch_size], train_y[n:n+batch_size]))
epoch_loss.append(np.mean(step_loss))
print(f'Epoch: {epoch}, Loss: {epoch_loss[epoch]}')
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
df = pd.concat([traindf, testdf], axis=0, sort=False)
df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
df['IsWomanOrBoy'] =(( df.Title == 'Master')|(df.Sex == 'female'))
df['LastName'] = df.Name.str.split(',' ).str[0]
family = df.groupby(df.LastName ).Survived
df['WomanOrBoyCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).count())
df['WomanOrBoyCount'] = df.mask(df.IsWomanOrBoy, df.WomanOrBoyCount - 1, axis=0)
df['FamilySurvivedCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).sum())
df['FamilySurvivedCount'] = df.mask(df.IsWomanOrBoy, df.FamilySurvivedCount - \
df.Survived.fillna(0), axis=0)
df['WomanOrBoySurvived'] = df.FamilySurvivedCount / df.WomanOrBoyCount.replace(0, np.nan)
df.WomanOrBoyCount = df.WomanOrBoyCount.replace(np.nan, 0)
df['Alone'] =(df.WomanOrBoyCount == 0)
df['Title'] = df['Title'].replace('Ms','Miss')
df['Title'] = df['Title'].replace('Mlle','Miss')
df['Title'] = df['Title'].replace('Mme','Mrs')
df['Embarked'] = df['Embarked'].fillna('S')
df['Deck'] = df['Cabin'].apply(lambda s: s[0] if pd.notnull(s)else 'M')
df.loc[(df['Deck'] == 'T'), 'Deck'] = 'A'
med_fare = df.groupby(['Pclass', 'Parch', 'SibSp'] ).Fare.median() [3][0][0]
df['Fare'] = df['Fare'].fillna(med_fare)
df['Age'] = df.groupby(['Sex', 'Pclass', 'Title'])['Age'].apply(lambda x: x.fillna(x.median()))
df['Family_Size'] = df['SibSp'] + df['Parch'] + 1
cols_to_drop = ['Name','Ticket','Cabin']
df = df.drop(cols_to_drop, axis=1)
df.WomanOrBoySurvived = df.WomanOrBoySurvived.fillna(0)
df.WomanOrBoyCount = df.WomanOrBoyCount.fillna(0)
df.FamilySurvivedCount = df.FamilySurvivedCount.fillna(0)
df.Alone = df.Alone.fillna(0)
df.Alone = df.Alone*1<prepare_x_and_y>
|
model.compile(optimizer=opt, loss=custom_loss, metrics=['accuracy', tf.keras.metrics.MeanIoU(num_classes=2)])
print(model.evaluate(test_x, test_y, verbose=0))
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
target = df.Survived.loc[traindf.index]
df = df.drop(['Survived'], axis=1)
train, test = df.loc[traindf.index], df.loc[testdf.index]<define_variables>
|
preds = model.predict(test_x)
preds.shape
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64']
categorical_columns = []
features = train.columns.values.tolist()
for col in features:
if train[col].dtype in numerics: continue
categorical_columns.append(col)
categorical_columns<data_type_conversions>
|
preds_thresh = tf.where(preds>0.5,1,0)
pred_fwd = tf.map_fn(life_step_1_tensor, preds_thresh, fn_output_signature=tf.uint32)
actual_fwd = tf.map_fn(life_step_1_tensor, test_y, fn_output_signature=tf.uint32 )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
for col in categorical_columns:
if col in train.columns:
le = LabelEncoder()
le.fit(list(train[col].astype(str ).values)+ list(test[col].astype(str ).values))
train[col] = le.transform(list(train[col].astype(str ).values))
test[col] = le.transform(list(test[col].astype(str ).values))<choose_model_class>
|
start_features = [f for f in train_df.columns if "start" in f]
stop_features = [f for f in train_df.columns if "stop" in f]
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
model = AutoFeatRegressor()
model<categorify>
|
sub = pd.DataFrame()
m = tf.keras.metrics.Accuracy()
accuracies = []
t1 = tqdm(range(1,6), desc=f'Delta: ')
for delta in t1:
m.reset_states()
t1.set_description(f'Delta: {delta}')
t1.refresh()
test_data_iter = train_df.loc[train_df['delta'] == delta]
tmp_sub = test_data_iter[["id"]].copy()
testY = test_data_iter.loc[:, test_data_iter.columns.str.startswith('start')].values.reshape(-1, 25, 25)
testX = test_data_iter.loc[:, test_data_iter.columns.str.startswith('stop')].values.reshape(-1, 25, 25)
testX = np.array([preprocess(xi)for xi in testX] ).astype(np.float32)
testX = np.expand_dims(testX, axis=-1 ).astype(np.float32)
testY = np.array([preprocess(xi)for xi in testY] ).astype(np.float32)
testY = np.expand_dims(testY, axis=-1 ).astype(np.float32)
t2 = tqdm(range(delta))
for i in t2:
if i == 0:
preds = model.predict(testX)
else:
preds = tf.where(preds>0.5,1,0)
preds = model.predict(preds)
preds = tf.cast(tf.where(preds>0.5,1,0), tf.uint32)
m.update_state(preds, testY)
acc = m.result().numpy()
print(f'Accuracy: {acc}')
accuracies.append(acc)
preds = preds[:, 4:-4, 4:-4,:].numpy()
tmp = pd.DataFrame(preds.reshape(-1, 625 ).astype(np.uint8), columns=start_features, index=tmp_sub['id'])
tmp_sub = tmp_sub.join(tmp)
sub = sub.append(tmp_sub)
sub.sort_index(inplace = True )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
X_train_feature_creation = model.fit_transform(train, target)
X_test_feature_creation = model.transform(test)
X_train_feature_creation.head()<count_values>
|
print(f'Mean accuracy: {np.array(accuracies ).mean() }')
print(f'LB score estimate from training: {1 - np.array(accuracies ).mean() }' )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
print('Number of new features -',X_train_feature_creation.shape[1] - train.shape[1] )<concatenate>
|
sub = pd.DataFrame()
m = tf.keras.metrics.Accuracy()
accuracies = []
t1 = tqdm(range(1,6), desc=f'Delta: ')
for delta in t1:
m.reset_states()
t1.set_description(f'Delta: {delta}')
t1.refresh()
test_data_iter = test_df.loc[test_df['delta'] == delta]
tmp_sub = test_data_iter[["id"]].copy()
tmp_sub.set_index(tmp_sub['id'].values)
testX = test_data_iter.loc[:, test_data_iter.columns.str.startswith('stop')].values.reshape(-1, 25, 25)
testX = np.array([preprocess(xi)for xi in testX] ).astype(np.float32)
testX = np.expand_dims(testX, axis=-1 ).astype(np.float32)
t2 = tqdm(range(delta))
for i in t2:
if i == 0:
preds = model.predict(testX)
else:
preds = tf.where(preds>0.5,1,0)
preds = model.predict(preds)
preds = tf.cast(tf.where(preds>0.5,1,0), tf.uint32)
preds = preds[:, 4:-4, 4:-4,:].numpy()
tmp = pd.DataFrame(preds.reshape(-1, 625 ).astype(np.uint8), columns=start_features, index=tmp_sub['id'].values)
tmp.insert(loc = 0, column='id', value=tmp_sub['id'].values)
sub = sub.append(tmp)
sub.sort_index(inplace = True)
sub.reset_index(drop = True, inplace = True )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
df2 = pd.concat([df.WomanOrBoySurvived.fillna(0), df.Alone, df.Sex.replace({'male': 0, 'female': 1})], axis=1)
train2, test2 = df2.loc[traindf.index], df2.loc[testdf.index]<categorify>
|
sub.to_csv('submission.csv', index=False )
|
Conway's Reverse Game of Life 2020
|
12,266,838 |
<count_values><EOS>
|
sub.to_csv('submission.csv', index=False )
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
<SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<save_to_csv>
|
notebook_start = time.perf_counter()
display(HTML())
%load_ext autoreload
%autoreload 2
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
test_x = df2.loc[testdf.index]
test_x['Survived'] =(((test_x.WomanOrBoySurvived <= 0.238)&(test_x.Sex > 0.5)&(test_x.Alone > 0.5)) | \
(( test_x.WomanOrBoySurvived > 0.238)& \
~(( test_x.WomanOrBoySurvived > 0.55)&(test_x.WomanOrBoySurvived <= 0.633))))
pd.DataFrame({'Survived': test_x['Survived'].astype(int)}, \
index=testdf.index ).reset_index().to_csv('survived.csv', index=False )<define_variables>
|
! python3 -m pip install -q z3-solver
! apt-get install -qq tree moreutils
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
LB_simple_rule = 0.83253<train_model>
|
!rm -rf /ai-games/
!git clone https://github.com/JamesMcGuigan/ai-games/ /ai-games/
!cp -rf /ai-games/puzzles/game_of_life/*./
!rm -rf /kaggle/working/neural_networks/
!cd /ai-games/; git log -n1
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
model_LR = LinearRegression().fit(train,target.to_numpy().flatten())
model_Autofeat = LinearRegression().fit(X_train_feature_creation, target.to_numpy().flatten() )<predict_on_test>
|
from utils.util import *
from utils.plot import *
from utils.game import *
from utils.datasets import *
from utils.tuplize import *
from hashmaps.crop import *
from hashmaps.hash_functions import *
from hashmaps.translation_solver import *
from hashmaps.repeating_patterns import *
from constraint_satisfaction.fix_submission import *
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
test['Survived_LR'] = np.clip(model_LR.predict(test),0,1)
test['Survived_AF'] = np.clip(model_Autofeat.predict(X_test_feature_creation),0,1 )<find_best_params>
|
@njit()
def get_concentric_prime_mask(shape: Tuple[int,int]=(25,25)) -> np.ndarray:
pattern = 'diamond'
assert shape[0] == shape[1]
assert pattern in [ 'diamond', 'oval' ]
x =(shape[0])//2
y =(shape[1])//2
max_r = max(shape)+ 1 if max(shape)% 2 == 0 else max(shape)
mask = np.zeros(shape, dtype=np.int64)
for r in range(max_r):
primes = hashable_primes[:r+1]
for dr in range(r+1):
if pattern == 'diamond': prime = primes[r]
elif pattern == 'oval': prime = primes[r] + primes[dr]
coords = {
(x+(r-dr),y+(dr)) ,
(x-(r-dr),y+(dr)) ,
(x+(r-dr),y-(dr)) ,
(x-(r-dr),y-(dr)) ,
}
for coord in coords:
if min(coord)>= 0 and max(coord)< min(shape):
mask[coord] = prime
return mask
@njit()
def hash_geometric_concentric(board: np.ndarray)-> int:
assert board.shape[0] == board.shape[1]
mask = get_concentric_prime_mask(shape=board.shape)
hashed = 0
for x in range(board.shape[0]):
for y in range(board.shape[1]):
for dx in range(mask.shape[0]):
for dy in range(mask.shape[1]):
coords =(( x+dx)%board.shape[0],(y+dy)%board.shape[1])
hashed += board[coords] * mask[dx,dy]
return hashed
hash_geometric = hash_geometric_concentric
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
def hyperopt_gb_score(params):
clf = GradientBoostingClassifier(**params)
current_score = cross_val_score(clf, X_train_feature_creation, target, cv=10 ).mean()
print(current_score, params)
return current_score
space_gb = {
'n_estimators': hp.choice('n_estimators', range(100, 1000)) ,
'max_depth': hp.choice('max_depth', np.arange(2, 10, dtype=int))
}
best = fmin(fn=hyperopt_gb_score, space=space_gb, algo=tpe.suggest, max_evals=10)
print('best:')
print(best )<find_best_params>
|
def label_board(board):
tessellation = tessellate_board(board)
tessellation = scipy.ndimage.convolve(tessellation, [[0,1,0],[1,1,1],[0,1,0]] ).astype(np.bool ).astype(np.int8)
labeled = skimage.measure.label(tessellation, background=0, connectivity=2)
labeled = detessellate_board(labeled)
return labeled
def extract_clusters(board: np.ndarray)-> List[np.ndarray]:
labeled = label_board(board)
return extract_clusters_from_labels(board, labeled)
def extract_clusters_from_labels(board: np.ndarray, labeled: np.ndarray)-> List[np.ndarray]:
labels = np.unique(labeled)
clusters = []
for label in labels:
cluster = board *(labeled == label)
clusters.append(cluster)
return clusters
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
params = space_eval(space_gb, best)
params<train_model>
|
dataset_size = 100 if os.environ.get('KAGGLE_KERNEL_RUN_TYPE')== 'Interactive' else 400_000
print(f'dataset_size = {dataset_size}' )
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
gradient_boosting = GradientBoostingClassifier(**params)
gradient_boosting.fit(X_train_feature_creation, target)
Y_pred = gradient_boosting.predict(X_test_feature_creation ).astype(int)
gradient_boosting.score(X_train_feature_creation, target)
acc_gradient_boosting = round(gradient_boosting.score(X_train_feature_creation, target)* 100, 2)
acc_gradient_boosting<save_to_csv>
|
def read_gzip_pickle_file(filename: str)-> Any:
try:
if not os.path.exists(filename): raise FileNotFoundError
with open(filename, 'rb')as file:
data = file.read()
try: data = gzip.decompress(data)
except: pass
data = pickle.loads(data)
except Exception as exception:
data = None
return data
def save_gzip_pickle_file(data: Any, filename: str, verbose=True)-> int:
try:
with open(filename, 'wb')as file:
data = pickle.dumps(data)
data = gzip.compress(data)
file.write(data)
file.close()
filesize = os.path.getsize(filename)
if verbose: print(f'wrote: {filename} = {humanize.naturalsize(filesize)}')
return filesize
except:
return 0
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
pd.DataFrame({'Survived': test['Survived_LR'].astype(int)}, \
index=testdf.index ).reset_index().to_csv('survived_LR16.csv', index=False)
pd.DataFrame({'Survived': test['Survived_AF'].astype(int)}, \
index=testdf.index ).reset_index().to_csv('survived_Autofeat16.csv', index=False)
pd.DataFrame({'Survived': Y_pred}, \
index=testdf.index ).reset_index().to_csv('survived_GBC16.csv', index=False )<define_variables>
|
def generate_cluster_history_lookup(dataset_size=3_000_000//25, forward_play=25, verbose=True):
time_start = time.perf_counter()
csv_size = len(train_df.index)
dataset = np.concatenate([
csv_to_numpy_list(train_df, key='start'),
generate_random_boards(max(1, dataset_size - csv_size))
])[:dataset_size]
cluster_history_lookup = get_cluster_history_lookup(dataset, forward_play=forward_play)
time_taken = time.perf_counter() - time_start
if verbose: print(f'{len(cluster_history_lookup)} unique clusters in {time_taken:.1f}s = {1000*time_taken/len(dataset):.0f}ms/board')
return cluster_history_lookup
cluster_history_lookup_cachefile = f'{output_directory}/cluster_history_lookup.pickle'
cluster_history_lookup = read_gzip_pickle_file(cluster_history_lookup_cachefile)
if __name__ == '__main__':
cluster_history_lookup = generate_cluster_history_lookup(dataset_size=dataset_size)
save_gzip_pickle_file(cluster_history_lookup, cluster_history_lookup_cachefile)
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
LB_LR16 = 0.69377
LB_Autofeat16 = 0.67942
LB_GBC16 = 0.82296<train_model>
|
def image_segmentation_dataframe_solver(df, history, submission_df=None, exact=False, blank_missing=True, verbose=True):
time_start = time.perf_counter()
stats = { "partial": 0, "exact": 0, "total": 0 }
submission_df = submission_df if submission_df is not None else sample_submission_df.copy()
idxs = df.index
deltas = csv_to_delta_list(df)
boards = csv_to_numpy_list(df, key='stop')
labeleds = Parallel(-1 )(delayed(label_board )(board)for board in boards)
clustereds = Parallel(-1 )(delayed(extract_clusters_from_labels )(board, labels)for board, labels in zip(boards, labeleds))
for idx, delta, stop_board, labels, clusters in zip(idxs, deltas, boards, labeleds, clustereds):
start_board = image_segmentation_solver(
stop_board, delta, history=history, blank_missing=blank_missing,
labels=labels, clusters=clusters
)
is_valid = is_valid_solution(start_board, stop_board, delta)
if is_valid: stats['exact'] += 1
elif np.count_nonzero(start_board): stats['partial'] += 1
stats['total'] += 1
if is_valid or not exact:
submission_df.loc[idx] = numpy_to_series(start_board, key='start')
time_taken = time.perf_counter() - time_start
stats['time_seconds'] = int(time_taken)
stats['time_hours'] = round(time_taken/60/60, 2)
if verbose: print('image_segmentation_solver() ', stats)
return submission_df
def image_segmentation_solver(stop_board, delta, history=None, blank_missing=True, labels=None, clusters=None):
history = history if history is not None else cluster_history_lookup
labels = labels if labels is not None else label_board(stop_board)
clusters = clusters if clusters is not None else extract_clusters_from_labels(stop_board, labels)
labels = np.unique(labels)
now_hashes = Parallel(-1 )(delayed(hash_geometric )(cluster)for cluster in clusters)
new_clusters = {}
for label, now_cluster, now_hash in zip(labels, clusters, now_hashes):
if label == 0: continue
if np.count_nonzero(now_cluster)== 0: continue
if history.get(now_hash,{} ).get(delta,None):
for past_hash in history[now_hash][delta].keys() :
try:
start_cluster = history[now_hash][delta][past_hash]['start']
stop_cluster = history[now_hash][delta][past_hash]['stop']
transform_fn = solve_translation(stop_cluster, now_cluster)
past_cluster = transform_fn(start_cluster)
new_clusters[label] = past_cluster
break
except Exception as exception:
pass
if not label in new_clusters:
if blank_missing: new_clusters[label] = np.zeros(now_cluster.shape, dtype=np.int8)
else: new_clusters[label] = now_cluster
start_board = np.zeros(stop_board.shape, dtype=np.int8)
for cluster in new_clusters.values() :
start_board += cluster
start_board = start_board.astype(np.bool ).astype(np.int8)
return start_board
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
model_LR2 = LinearRegression().fit(train2,target.to_numpy().flatten())
model_Autofeat2 = LinearRegression().fit(X_train_feature_creation2, target.to_numpy().flatten() )<predict_on_test>
|
submission_df = image_segmentation_dataframe_solver(test_df[:dataset_size], history=cluster_history_lookup, exact=False)
submission_df.to_csv('submission.csv' )
|
Conway's Reverse Game of Life 2020
|
12,141,564 |
test2['Survived_LR'] = np.clip(model_LR2.predict(test2),0,1)
test2['Survived_AF'] = np.clip(model_Autofeat2.predict(X_test_feature_creation2),0,1 )<find_best_params>
|
!(for FILE in $(find./.. /input/ -name 'submission.csv' | sort); do cat $FILE | grep ',1' | wc -l | tr '
' ' '; echo $FILE; done)| sort -n;
!find./.. /input/ -name 'submission.csv' | xargs cat | sort -nr | uniq | awk -F',' '!a[$1]++' | sort -n >./submission.csv
!(for FILE in $(find./.. /input/ -name 'submission.csv' | sort); do cat $FILE | grep ',1' | wc -l | tr '
' ' '; echo $FILE; done)| sort -n;
!PYTHONPATH='.' python3./constraint_satisfaction/fix_submission.py
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
def hyperopt_gb_score(params):
clf = GradientBoostingClassifier(**params)
current_score = cross_val_score(clf, X_train_feature_creation2, target, cv=10 ).mean()
print(current_score, params)
return current_score
space_gb = {
'n_estimators': hp.choice('n_estimators', range(100, 1000)) ,
'max_depth': hp.choice('max_depth', np.arange(2, 10, dtype=int))
}
best = fmin(fn=hyperopt_gb_score, space=space_gb, algo=tpe.suggest, max_evals=10)
print('best:')
print(best )<compute_train_metric>
|
sample_submission_df = pd.read_csv('.. /input/conways-reverse-game-of-life-2020/sample_submission.csv', index_col='id')
test_df = pd.read_csv('.. /input/conways-reverse-game-of-life-2020/test.csv', index_col='id')
deltas = test_df['delta'].values
boards = csv_to_numpy_list(test_df )
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
params2 = space_eval(space_gb, best)
params2<train_model>
|
SIZE = 25
empty_board = np.zeros(( SIZE,SIZE), dtype=bool)
def v(c):
return -(SIZE * c[0] + c[1] + 1)
def dead_clauses(res, c, x):
for i1 in range(0, 6):
for i2 in range(i1+1, 7):
for i3 in range(i2+1, 8):
a = [v(x[i])for i in range(8)]
a[i1], a[i2], a[i3] = -a[i1], -a[i2], -a[i3]
res.append(a)
for i1 in range(0, 7):
for i2 in range(i1+1, 8):
a = [v(x[i])if i < 8 else -v(c)for i in range(9)]
a[i1], a[i2] = -a[i1], -a[i2]
res.append(a)
def live_clauses(res, c, x):
for i1 in range(0, 5):
for i2 in range(i1+1, 6):
for i3 in range(i2+1, 7):
for i4 in range(i3+1, 8):
res.append([-v(x[i1]), -v(x[i2]), -v(x[i3]), -v(x[i4])])
for i1 in range(0, 7):
for i2 in range(i1+1, 8):
a = [v(x[i])if i < 8 else v(c)for i in range(9)if i != i1 and i != i2]
res.append(a)
for i1 in range(0, 8):
a = [v(x[i])for i in range(8)if i != i1]
res.append(a)
def board_clauses(board, use_opt = True):
res, opt1, opt2 = [], [], []
for i in range(SIZE):
for j in range(SIZE):
x = [(( i + k % 3 - 1)% SIZE,(j + k // 3 - 1)% SIZE)for k in range(9)if k != 4]
if board[i,j]:
live_clauses(res,(i, j), x)
else:
dead_clauses(res,(i, j), x)
if use_opt:
y = [(( i + k % 5 - 2)% SIZE,(j + k // 5 - 2)% SIZE)for k in range(25)]
if sum(board[ii,jj] for ii,jj in y)< 1:
res.append([-v(( i, j)) ])
elif sum(board[ii,jj] for ii,jj in x)< 1:
opt1.append([-v(( i, j)) ])
elif sum(board[ii,jj] for ii,jj in x)< 2:
opt2.append([-v(( i, j)) ])
return res, opt1, opt2
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
gradient_boosting2 = GradientBoostingClassifier(**params2)
gradient_boosting2.fit(X_train_feature_creation2, target)
Y_pred2 = gradient_boosting2.predict(X_test_feature_creation2 ).astype(int)
gradient_boosting2.score(X_train_feature_creation2, target)
acc_gradient_boosting2 = round(gradient_boosting2.score(X_train_feature_creation2, target)* 100, 2)
acc_gradient_boosting2<save_to_csv>
|
N = len(deltas)
score = 0
for n in tqdm(range(N), total=N):
clauses, opt1, opt2 = board_clauses(boards[n], use_opt = False)
solution = pycosat.solve(clauses)
if isinstance(solution, str):
print(f'{n} not solved!')
continue
board = np.array(solution[:SIZE**2])< 0
sample_submission_df.loc[test_df.index[n]] = 1 * board
board = life_step(board.reshape(SIZE,SIZE))
d = np.sum(board ^ boards[n])
score += d / 625
print(score/N )
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
pd.DataFrame({'Survived': test2['Survived_LR'].astype(int)}, \
index=testdf.index ).reset_index().to_csv('survived_LR3.csv', index=False)
pd.DataFrame({'Survived': test2['Survived_AF'].astype(int)}, \
index=testdf.index ).reset_index().to_csv('survived_Autofeat3.csv', index=False)
pd.DataFrame({'Survived': Y_pred2}, \
index=testdf.index ).reset_index().to_csv('survived_GBC3.csv', index=False )<init_hyperparams>
|
N = 1
for n in tqdm(range(N), total=N):
T = min(deltas[n], 3)
board = np.tile(empty_board,(T+1, 1, 1))
board[0] = boards[n]
solvers = [None for _ in range(T)]
opt = [None for _ in range(T)]
os = [0 for _ in range(T)]
oe = [0 for _ in range(T)]
t = 0
while 0 <= t and t < T:
if solvers[t] is None:
clauses, opt1, opt2 = board_clauses(board[t])
solution = pycosat.solve(clauses)
if not isinstance(solution, str):
if t == T - 1:
print(t, '!!', end=" ")
t += 1
board[t] = np.array(solution[:SIZE**2] ).reshape(SIZE,SIZE)< 0
continue
else:
print(t, '??', end=" ")
random.shuffle(opt1)
random.shuffle(opt2)
opt[t] = opt1 + opt2
os[t] = len(opt[t])+1
oe[t] = len(opt[t])+1
solvers[t] = pycosat.itersolve(clauses + opt[t])
print(len(opt[t]), end=" ")
try:
solution = next(solvers[t])
if oe[t] - os[t] > 1:
os[t] =(os[t]+oe[t])//2
solvers[t] = pycosat.itersolve(clauses + opt[t][:(os[t]+oe[t])//2])
print(( os[t]+oe[t])//2, end=" ")
else:
print(t, '++', end=" ")
t += 1
board[t] = np.array(solution[:SIZE**2] ).reshape(SIZE,SIZE)< 0
except Exception as err:
if solvers[t] is not None and(os[t]+oe[t])//2 > 0:
if os[t] == oe[t]:
os[t] = 0
oe[t] = len(opt[t])+1
elif oe[t] - os[t] > 1:
oe[t] =(os[t]+oe[t])//2
else:
os[t] -= 1
oe[t] -= 1
solvers[t] = pycosat.itersolve(clauses + opt[t][:(os[t]+oe[t])//2])
print(( os[t]+oe[t])//2, end=" ")
else:
print(t, '--', end=" ")
solvers[t] = None
opt[t] = None
t -= 1
if t == T:
sample_submission_df.loc[test_df.index[n]] = 1 * board[T].ravel()
plot_3d(board, index )
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
LB_LR3 = 0.67942
LB_Autofeat3 = 0.69377
LB_GBC3 = 0.83253<create_dataframe>
|
sample_submission_df.to_csv("submission.csv", index=True )
|
Conway's Reverse Game of Life 2020
|
11,962,542 |
models = pd.DataFrame({
'Model': ['Simple rule','Linear Regression without Autofeat', 'Linear Regression with Autofeat',
'GradientBoostingClassifier with Autofeat'],
'LB_for_16_features': [LB_simple_rule, LB_LR16, LB_Autofeat16, LB_GBC16],
'LB_for_3opt_features': [LB_simple_rule, LB_LR3, LB_Autofeat3, LB_GBC3]} )<sort_values>
|
submission_df = pd.read_csv('./submission.csv', index_col='id' )
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
models.sort_values(by=['LB_for_3opt_features', 'LB_for_16_features'], ascending=False )<sort_values>
|
neighbor_alive2_cell_alive = {}
neighbor_alive2_cell_dead = {}
neighbor_alive3_cell_alive = {}
neighbor_alive3_cell_dead = {}
for cell in range(8):
neighbor_alive2_cell_alive[cell] = [(cell,j)for j in range(8)if j!=cell]
neighbor_alive2_cell_alive[cell] = [([j]+[8+k for k in range(8)if(k!=i and k!=j)])for i,j in neighbor_alive2_cell_alive[cell]]
neighbor_alive2_cell_dead[cell] = [(i,j)for i in range(8)for j in range(i)if i!=cell and j!=cell]
neighbor_alive2_cell_dead[cell] = [([i,j]+[8+k for k in range(8)if(k!=i and k!=j and k!=cell)])for i,j in neighbor_alive2_cell_dead[cell]]
neighbor_alive3_cell_alive[cell] = [(i,j,cell)for i in range(8)for j in range(i)if i!=cell and j!=cell]
neighbor_alive3_cell_alive[cell] = [([i,j]+[8+l for l in range(8)if(l!=i and l!=j and l!=k)])for i,j,k in neighbor_alive3_cell_alive[cell]]
neighbor_alive3_cell_dead[cell] = [(i,j,k)for i in range(8)for j in range(i)for k in range(j)if i!=cell and j!=cell and k!=cell]
neighbor_alive3_cell_dead[cell] = [([i,j,k]+[8+l for l in range(8)if(l!=i and l!=j and l!=k and l!=cell)])for i,j,k in neighbor_alive3_cell_dead[cell]]
def get_neighbors_backward(grad_output):
return torch.stack([torch.roll(torch.roll(grad_output[idx], -i, 2), -j, 3)for idx,(i,j)in enumerate(neighbors_roll_axes)] ).sum(dim=0)
def n_neigbors_nearby_prob_backward(grad_output, neighbors, neighbor_nearby=2):
if neighbor_nearby==2:
combination_cell_alive = neighbor_alive2_cell_alive
combination_cell_dead = neighbor_alive2_cell_dead
else:
combination_cell_alive = neighbor_alive3_cell_alive
combination_cell_dead = neighbor_alive3_cell_dead
neighbors = torch.cat([neighbors, 1 - neighbors])
coef = []
for cell in range(8):
cell_live_coef = torch.stack([neighbors[l].prod(dim=0)for l in combination_cell_alive[cell]] ).sum(dim=0)
cell_dead_coef = torch.stack([neighbors[d].prod(dim=0)for d in combination_cell_dead[cell]] ).sum(dim=0)
coef.append(cell_live_coef-cell_dead_coef)
coef = torch.stack(coef)
return coef*grad_output
class ProbabilisticForwardIteration(torch.autograd.Function):
@staticmethod
def forward(ctx, grid, delta=1):
ctx.grid = grid
return probabilistic_forward_iteration_autograd(grid)
@staticmethod
def backward(ctx, grad_out):
grid = ctx.grid
neighbors = get_neighbors(grid)
neighbors_p2 = n_neigbors_nearby_prob(neighbors, neighbor_nearby=2)
grad_n2_out = grad_out*grid
grad_n3_out = grad_out
grad_n2_inp = n_neigbors_nearby_prob_backward(grad_n2_out, neighbors, neighbor_nearby=2)
grad_n3_inp = n_neigbors_nearby_prob_backward(grad_n3_out, neighbors, neighbor_nearby=3)
grad_neighbors_out = grad_n2_inp + grad_n3_inp
grad_neighbors_inp = get_neighbors_backward(grad_neighbors_out)
grad_inp = grad_neighbors_inp + neighbors_p2*grad_out
return grad_inp, None
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
models.sort_values(by=['LB_for_16_features', 'LB_for_3opt_features'], ascending=False )<import_modules>
|
def probabilistic_forward_iteration(grid, delta=1, autograd=True):
if autograd:
for _ in range(delta):
grid = probabilistic_forward_iteration_autograd(grid)
else:
for _ in range(delta):
grid = ProbabilisticForwardIteration.apply(grid)
return grid
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score<load_from_csv>
|
neighbors_roll_axes = [(i,j)for i in range(-1,2)for j in range(-1, 2)if not(i==0 and j==0)]
def generate_random_start_batch(batch_size):
return np.random.randint(low=0, high=2, size=(batch_size, 1, 25, 25), dtype=bool)
def straight_iter_binary_numpy(grid, delta=1):
for _ in range(delta):
neighbor_sum = np.concatenate([np.roll(np.roll(grid, i, 2), j, 3)for i,j in neighbors_roll_axes], axis=1)
neighbor_sum = neighbor_sum.sum(axis=1, keepdims=True)
grid =(( neighbor_sum == 3)|(( grid==1)&(neighbor_sum == 2)))
return grid
class DataStream() :
def __init__(self, delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False):
self.init_delta = delta
self.batch_size = batch_size
self.drop_empty= drop_empty
self.drop_ch_dim = drop_ch_dim
def __iter__(self):
while True:
x = generate_random_start_batch(self.batch_size)
delta = self.init_delta if self.init_delta else np.random.randint(1,6)
x = straight_iter_binary_numpy(x, 5+delta)
if self.drop_empty:
x = x[x.any(axis=2 ).any(axis=2 ).reshape(-1)]
if self.drop_ch_dim:
x = x[:,0,:,:]
yield x.astype(float), delta
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
train=pd.read_csv(".. /input/titanic/train.csv")
tit1=train.select_dtypes(include=['float64','int64','object'])
train.info()
test=pd.read_csv(".. /input/titanic/test.csv")
tit2=test.select_dtypes(include=['float64','int64','object'])
test.info()<feature_engineering>
|
class DataStreamTorch(IterableDataset):
def __init__(self, delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False):
self.ds = DataStream(delta, batch_size, drop_empty, drop_ch_dim)
def __iter__(self):
for x, delta in self.ds:
yield FloatTensor(x), delta
def pass_collate(batch):
return batch[0]
def get_datastream_loader(delta=None, batch_size=128, drop_empty=False, drop_ch_dim=False, num_workers=1):
dataset = DataStreamTorch(delta, batch_size, drop_empty, drop_ch_dim)
dataloader = DataLoader(dataset, batch_size=1, collate_fn=pass_collate, num_workers=num_workers)
return dataloader
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
tit2['survived']=np.nan
tit2.head()<feature_engineering>
|
class Model(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 512, 7, padding=3, padding_mode='circular')
self.conv2 = nn.Conv2d(512, 256, 5, padding=2, padding_mode='circular')
self.conv3 = nn.Conv2d(256, 256, 3, padding=1, padding_mode='circular')
self.conv4 = nn.Conv2d(256, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.sigmoid(self.conv4(x))
return x
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
percent=round(np.mean(train['Survived']),3)*100
print("Percentage of Survivors:",percent )<count_values>
|
class FixPredictBlock(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(5, 256, 5, padding=2, padding_mode='circular')
self.conv2 = nn.Conv2d(256, 256, 3, padding=1, padding_mode='circular')
self.conv3 = nn.Conv2d(256, 256, 1)
self.conv4 = nn.Conv2d(256, 1, 3, padding=1, padding_mode='circular')
self.sigmoid = nn.Sigmoid()
def forward(self, x, x_prev_pred):
with torch.no_grad() :
x_prev_pred_bin = x_prev_pred>0.5
x_pred_bin = binary_forward_iteration(x_prev_pred_bin)
x_pred = probabilistic_forward_iteration(x_prev_pred)
x = torch.cat([x, x_prev_pred, x_prev_pred_bin.float() , x_pred, x_pred_bin.float() ], dim=1)
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.sigmoid(self.conv4(x))
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fix_pred = FixPredictBlock()
def forward(self, x, n_it=5):
x_prev_pred = x
for i in range(n_it):
x_prev_pred = self.fix_pred(x, x_prev_pred)
return x_prev_pred
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
total=train['Survived'].sum()
total
men=train[train['Sex']=='male']
women=train[train['Sex']=='female']
m=men['Sex'].count()
w=women['Sex'].count()
print("male:",m)
print("female:",w)
print("percentage of women:",round(w/(m+w)*100))
print("percentage of men:",round(m/(m+w)*100))<count_missing_values>
|
N_iter = 2000
device = 'cuda'
loader = get_datastream_loader(batch_size=128, num_workers=8, drop_empty=True, delta=1)
model = Model().to(device)
criterion = BCELoss()
optimizer = Adam(model.parameters() , lr=1e-3)
tqdm_loader = tqdm(loader)
for i,(stop_state, _)in enumerate(tqdm_loader):
stop_state = stop_state.to(device)
optimizer.zero_grad()
start_state_prediction = model(stop_state)
stop_state_prediction = probabilistic_forward_iteration(start_state_prediction)
loss = criterion(stop_state_prediction, stop_state)
loss.backward()
optimizer.step()
with torch.no_grad() :
bce = loss.item()
start_state_alive =(start_state_prediction>0.5 ).float().mean().item()
accuracy =(( stop_state_prediction > 0.5)==(stop_state>0.5)).float().mean().item()
accuracy_true =(binary_forward_iteration(start_state_prediction>0.5)==(stop_state>0.5)).float().mean().item()
tqdm_loader.postfix = 'bce: {:0.10f} | start_state_alive: {:0.5f} | accuracy: {:0.10f} | accuracy_true: {:0.10f}'\
.format(bce, start_state_alive, accuracy, accuracy_true)
if i > N_iter:
tqdm_loader.close()
break
for param in model.parameters() :
param.requires_grad = False
model.eval()
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
train.isnull().sum()<count_missing_values>
|
for batch in loader:
stop_state = batch[0].cuda()
break
for n_iter in [1,10,100]:
acc =(stop_state == binary_forward_iteration(model(stop_state, n_iter)> 0.5)).float().mean().item()
print(f'model n_iter={n_iter} accuracy: {acc}' )
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
train.isnull().sum()<data_type_conversions>
|
def direct_gradient_optimization(batch, n_iter, lr, device='cuda', reduse_alife=False):
stop_state = batch
start_state = nn.Parameter(torch.rand(stop_state.shape ).to(device)-1)
criterion = BCELoss()
optimizer = Adam([start_state], lr=lr,)
tqdm_loader = trange(n_iter)
for _ in tqdm_loader:
optimizer.zero_grad()
start_state_prob = torch.sigmoid(start_state)
stop_state_prediction = probabilistic_forward_iteration(start_state_prob, autograd=False)
bce_loss = criterion(stop_state_prediction, stop_state)
start_state_alive = start_state_prob.mean()
if reduse_alife and start_state_alive.item() > 0:
loss = bce_loss + start_state_alive
else:
loss = bce_loss
loss.backward()
optimizer.step()
with torch.no_grad() :
bce = bce_loss.item()
alive_cells = start_state_alive.item()
accuracy =(( stop_state_prediction > 0.5)==(stop_state>0.5)).float().mean().item()
accuracy_true =(binary_forward_iteration(start_state_prob>0.5)==(stop_state>0.5)).float().mean().item()
tqdm_loader.postfix = 'bce: {:0.10f} | start_state_alive: {:0.5f} | accuracy: {:0.10f} | accuracy_true: {:0.10f}'.format(bce, alive_cells, accuracy, accuracy_true)
return torch.sigmoid(start_state.detach())
def direct_gradient_optimization_predict(data, delta, n_iter=100, lr=1, device='cuda'):
data = FloatTensor(np.array(data)).reshape(( -1, 1, 25, 25)).to(device)
for i in range(delta-1):
data = direct_gradient_optimization(data, n_iter, lr, reduse_alife=True)
data =(data>0.5 ).float()
data = direct_gradient_optimization(data, n_iter, 1, reduse_alife=False)
return(data>0.5 ).detach().cpu().int().reshape(-1,625 ).numpy()
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
train['Cabin'] = train['Cabin'].fillna('X')
test['Cabin']=test['Cabin'].fillna('X' )<categorify>
|
test = pd.read_csv('.. /input/conways-reverse-game-of-life-2020/test.csv', index_col='id')
submission = pd.read_csv('.. /input/conways-reverse-game-of-life-2020/sample_submission.csv', index_col='id' )
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
train.loc[train.Age.isnull() ,'Age']=train.groupby("Pclass" ).Age.transform('median')
test.loc[test.Age.isnull() ,'Age']=test.groupby("Pclass" ).Age.transform('median')
print(train['Age'].isnull().sum() )<categorify>
|
for delta in range(1,6):
mask = test['delta']==delta
data = test[mask].iloc[:,1:]
submission[mask] = direct_gradient_optimization_predict(data, delta, 100, 1 )
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
category = {'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'X':8, 'T':9}
train['Cabin'] = train['Cabin'].map(category)
train['Cabin'].unique().tolist()<count_missing_values>
|
submission.to_csv('submission.csv')
submission
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
print(train.isnull().sum() )<feature_engineering>
|
def evaluate_results(test, submission):
test = test.copy()
test['socre'] = 0
for delta in range(1,6):
mask = test['delta']==delta
data = FloatTensor(submission[mask].to_numpy() ).reshape(-1,1,25,25)
for _ in range(delta):
data = binary_forward_iteration(data)
data = data.reshape(-1,625 ).numpy()
result = test.loc[mask].iloc[:,1:-1] == data
test.loc[mask, 'socre'] = result.mean(axis=1)
print(f"Delta {delta} score: {test.loc[mask, 'socre'].mean() }")
test.loc[mask, 'socre'].hist(bins=30)
plt.show()
print(f"LB : {1-test['socre'].mean() }" )
|
Conway's Reverse Game of Life 2020
|
11,658,012 |
<count_missing_values><EOS>
|
evaluate_results(test, submission )
|
Conway's Reverse Game of Life 2020
|
12,320,368 |
<SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<feature_engineering>
|
import numpy as np
import pandas as pd
|
Conway's Reverse Game of Life 2020
|
12,320,368 |
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
test["Sex"][test["Sex"] == "male"] = 0
test["Sex"][test["Sex"] == "female"] = 1
train["Embarked"][train["Embarked"] == "S"] = 0
train["Embarked"][train["Embarked"] == "C"] = 1
train["Embarked"][train["Embarked"] == "Q"] = 2
test["Embarked"][test["Embarked"] == "S"] = 0
test["Embarked"][test["Embarked"] == "C"] = 1
test["Embarked"][test["Embarked"] == "Q"] = 2
<feature_engineering>
|
sub = pd.read_csv('/kaggle/input/genetic-algorithm-submission-file/submission.csv')
sub.to_csv('submission.csv', index=False )
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
train['fam']=train['SibSp']+train['Parch']+1
test['fam']=test['SibSp']+test['Parch']+1<data_type_conversions>
|
d1 = pd.read_csv(".. /input/the-game-of-life-cnn-delta-1/submit1.csv", index_col=0)
print("Numbers of delta1's data:{}".format(d1.shape[0]))
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
train['Age']=train['Age'].astype(str)
test['Age']=test['Age'].astype(str )<feature_engineering>
|
d2 = pd.read_csv(".. /input/the-game-of-life-cnn-delta-2/submit2.csv", index_col=0)
print("Numbers of delta2's data:{}".format(d2.shape[0]))
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
cat={'2':1, '3':2 , '5':3, '1':4 ,'4':5,'8':6,'6':7,'7':8,'0':9,'9':10}
test['Age']=test['Age'].map(cat)
test['Age'].unique().tolist()<feature_engineering>
|
d3 = pd.read_csv(".. /input/the-game-of-life-cnn-delta-3/submit3.csv", index_col=0)
print("Numbers of delta3's data:{}".format(d3.shape[0]))
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
train['Title'] = train['Name'].map(lambda x: re.compile("([A-Za-z]+)\." ).search(x ).group())
test['Title'] = test['Name'].map(lambda x: re.compile("([A-Za-z]+)\." ).search(x ).group())
print(train['Title'].unique())
<feature_engineering>
|
d4 = pd.read_csv(".. /input/the-game-of-life-cnn-delta-4/submit4.csv", index_col=0)
print("Numbers of delta4's data:{}".format(d4.shape[0]))
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
train['Title'] = train['Title'].replace(['Lady.', 'Capt.', 'Col.',
'Don.', 'Dr.', 'Major.', 'Rev.', 'Jonkheer.', 'Dona.'], 'Rare.')
train['Title'] = train['Title'].replace(['Countess.', 'Lady', 'Sir'], 'Royal')
train['Title'] = train['Title'].replace('Mlle.', 'Miss.')
train['Title'] = train['Title'].replace('Ms.', 'Miss.')
train['Title'] = train['Title'].replace('Mme.', 'Mrs.')
train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<feature_engineering>
|
d5 = pd.read_csv(".. /input/the-game-of-life-cnn-delta-5/submit5.csv", index_col=0)
print("Numbers of delta5's data:{}".format(d5.shape[0]))
|
Conway's Reverse Game of Life 2020
|
13,658,702 |
<categorify><EOS>
|
submitData = d1.copy()
submitData = submitData.append(d2)
submitData = submitData.append(d3)
submitData = submitData.append(d4)
submitData = submitData.append(d5)
submitData = submitData.sort_index()
submitData.to_csv("submission.csv")
submitData
|
Conway's Reverse Game of Life 2020
|
12,793,594 |
<SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<categorify>
|
!find./.. /input/ /ai-games/puzzles/game_of_life/ -name 'submission.csv' | xargs cat | sort -nr | uniq | awk -F',' '!a[$1]++' | sort -n >./submission_previous.csv
!find./.. /input/ /ai-games/puzzles/game_of_life/ -name 'submission.csv' | xargs cat | sort -nr | uniq | awk -F',' '!a[$1]++' | sort -n >./submission.csv
!find./.. /input/ /ai-games/puzzles/game_of_life/ -name 'timeouts.csv' | xargs cat | sort -nr | uniq | awk -F',' '!a[$1]++' | sort -n >./timeouts.csv
!(for FILE in $(find./.. /input/ /ai-games/puzzles/game_of_life/ -name '*submission.csv' | sort); do cat $FILE | grep ',1' | wc -l | tr '
' ' '; echo $FILE; done)| sort -n;
|
Conway's Reverse Game of Life 2020
|
12,793,594 |
<drop_column><EOS>
|
!find./ -name '__pycache__' -or -name '*.py[cod]' -delete
!(for FILE in *.csv; do cat $FILE | grep ',1,' | wc -l | tr '
' ' '; echo $FILE; done)| sort -n;
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
<SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<split>
|
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import matplotlib.pyplot as plt
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
X_train, X_test, y_train, y_test = train_test_split(train.drop(['Survived','PassengerId'], axis=1),
train['Survived'], test_size = 0.2,
random_state = 0 )<train_model>
|
train_df = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/train.csv')
test_df = pd.read_csv("/kaggle/input/conways-reverse-game-of-life-2020/test.csv")
sample_submission = pd.read_csv("/kaggle/input/conways-reverse-game-of-life-2020/sample_submission.csv" )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
logisticRegression = LogisticRegression(max_iter = 30000)
logisticRegression.fit(X_train, y_train)
<predict_on_test>
|
start_features = [f for f in train_df.columns if "start" in f]
stop_features = [f for f in train_df.columns if "stop" in f]
features_in = stop_features + ["delta"]
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
predictions = logisticRegression.predict(X_test)
acc_LOG = round(accuracy_score(predictions, y_test)* 100, 2)
print(acc_LOG)
print(predictions)
<compute_test_metric>
|
delta_train, delta_validation, stop_train, stop_validation, Y_train, Y_valid = train_test_split(train_df["delta"].values,
train_df[stop_features].values.reshape(-1, 25, 25, 1 ).astype(float),
train_df[start_features].values.reshape(-1, 25, 25, 1 ).astype(float),
test_size=0.33,
)
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
print(confusion_matrix(y_test, predictions))<compute_test_metric>
|
X_train = [delta_train, stop_train]
X_valid = [delta_validation, stop_validation]
X_test = [test_df["delta"].values, test_df[stop_features].values.reshape(-1, 25, 25, 1 ).astype(float)]
X_all_train = [train_df["delta"].values, train_df[stop_features].values.reshape(-1, 25, 25, 1 ).astype(float)]
Y_all_train = train_df[start_features].values.reshape(-1, 25, 25, 1 ).astype(float )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
accuracy=(( 88+50)/(88+50+22+19))
print('accuracy is: ',(round(accuracy, 2)*100))<train_model>
|
def conv_block(inputs, filters, index, activation='relu'):
x = layers.Conv2D(filters, kernel_size=(3,3), padding="SAME", name=f'conv{index}' )(inputs)
x = layers.BatchNormalization()(x)
return layers.Activation(activation, name=activation + str(index))(x )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
randomforest = RandomForestClassifier(random_state = 5, criterion = 'gini', max_depth = 10, max_features = 'auto', n_estimators = 500)
randomforest.fit(X_train, y_train)
pred = randomforest.predict(X_test)
acc_randomforest = round(accuracy_score(pred, y_test)* 100, 2)
print(acc_randomforest)
<train_model>
|
def create_model(dropout_prob=0.3):
input_delta = layers.Input(shape=(1,), name="input_delta")
dense_delta = layers.Dense(25*25, name='dense_delta' )(input_delta)
dense_reshape = layers.Reshape(( 25,25,1), name='reshape_delta' )(dense_delta)
input_image = layers.Input(shape=(25,25,1), name="input_images")
all_inputs = layers.Concatenate(axis=3, name='concatenate' )([input_image, dense_reshape])
x = conv_block(all_inputs, 32, index=1)
x = layers.Dropout(dropout_prob )(x)
x = conv_block(x, 128, index=2)
x = layers.Dropout(dropout_prob )(x)
x = conv_block(x, 256, index=3)
x = layers.Dropout(dropout_prob )(x)
x = conv_block(x, 64, index=4)
x = layers.Dropout(dropout_prob )(x)
out = conv_block(x, 1, index=5, activation='sigmoid')
return Model(inputs=[input_delta, input_image], outputs=out )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
gbk = GradientBoostingClassifier()
gbk.fit(X_train, y_train)
pred = gbk.predict(X_test)
acc_gbk = round(accuracy_score(pred, y_test)* 100, 2)
print(acc_gbk )<create_dataframe>
|
model = create_model()
model.compile(loss="bce", optimizer=tf.keras.optimizers.Adam() , metrics=["accuracy"] )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
see={'TECHNIQUE':['RANDOM FOREST','LOGISTIC REGRESSION','GRADIENT BOOSTING'],'ACCURACY':[acc_randomforest,acc_LOG,acc_gbk]}
mod=pd.DataFrame(see)
mod<save_to_csv>
|
history = model.fit(x=X_train,
y=Y_train,
batch_size=128,
epochs=25,
validation_data=(X_valid, Y_valid))
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
ids = test['PassengerId']
predictions = randomforest.predict(test.drop('PassengerId', axis=1))
output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions })
output.to_csv('submission.csv', index=False )<import_modules>
|
model = create_model()
model.compile(loss="bce", optimizer=tf.keras.optimizers.Adam() , metrics=["accuracy"] )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score<load_from_csv>
|
history = model.fit(x=X_all_train,
y=Y_all_train,
batch_size=128,
epochs=20,
)
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
train=pd.read_csv(".. /input/titanic/train.csv")
tit1=train.select_dtypes(include=['float64','int64','object'])
train.info()
test=pd.read_csv(".. /input/titanic/test.csv")
tit2=test.select_dtypes(include=['float64','int64','object'])
test.info()<feature_engineering>
|
test_prediction = model.predict(X_test)
test_prediction =(test_prediction > 0.5 ).astype(int ).reshape(test_df.shape[0], -1)
sub = test_df[["id"]].copy()
tmp = pd.DataFrame(test_prediction, columns=start_features)
submission = sub.join(tmp )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
tit2['survived']=np.nan
tit2.head()<feature_engineering>
|
submission.to_csv("submission.csv", index=False )
|
Conway's Reverse Game of Life 2020
|
11,546,266 |
<count_values><EOS>
|
submission.to_csv("submission.csv", index=False )
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
<SOS> metric: PostProcessorKernel Kaggle data source: conways-reverse-game-of-life-2020<count_missing_values>
|
class OneIterationReverseNet(nn.Module):
def __init__(self, info_ch, ch):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(info_ch, ch, 5, padding=4, padding_mode='circular')
self.conv2 = nn.Conv2d(ch, ch, 3,)
self.conv3 = nn.Conv2d(ch, info_ch, 3)
def forward(self, inp):
x = self.relu(self.conv1(inp))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
return x
class ReverseModel(nn.Module):
def __init__(self, info_ch=64, ch=128):
super().__init__()
self.relu = nn.ReLU()
self.encoder = nn.Conv2d(1, info_ch, 7, padding=3, padding_mode='circular')
self.reverse_one_iter = OneIterationReverseNet(info_ch, ch)
self.decoder = nn.Conv2d(info_ch, 1, 3, padding=1, padding_mode='circular')
def forward(self, stop, delta):
x = self.relu(self.encoder(stop-0.5))
for i in range(delta.max().item()):
y = self.reverse_one_iter(x)
mask =(delta > i ).reshape(-1,1,1,1)
x = x*(~mask ).float() + y*mask.float()
x = self.decoder(x)
return x
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train.isnull().sum()<count_missing_values>
|
import pandas as pd
from sklearn.model_selection import train_test_split
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train.isnull().sum()<data_type_conversions>
|
train_val = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/train.csv', index_col='id')
test = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/test.csv', index_col='id')
train, val = train_test_split(train_val, test_size=0.2, shuffle=True, random_state=42, stratify=train_val['delta'] )
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train['Cabin'] = train['Cabin'].fillna('X')
test['Cabin']=test['Cabin'].fillna('X' )<categorify>
|
def line2grid_tensor(data, device='cuda'):
grid = data.to_numpy().reshape(( data.shape[0], 1, 25, 25))
return FloatTensor(grid ).to(device)
class TaskDataset(Dataset):
def __init__(self, data, device='cuda'):
self.delta = LongTensor(data['delta'].to_numpy() ).to(device)
if data.shape[1] == 1251:
self.start = line2grid_tensor(data.iloc[:,1:626], device)
self.stop = line2grid_tensor(data.iloc[:,626:], device)
else:
self.start = None
self.stop = line2grid_tensor(data.iloc[:,1:], device)
def __len__(self):
return len(self.delta)
def __getitem__(self, idx):
if self.start is None:
return {'stop': self.stop[idx], 'delta': self.delta[idx]}
return {'start': self.start[idx], 'stop': self.stop[idx], 'delta': self.delta[idx]}
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train.loc[train.Age.isnull() ,'Age']=train.groupby("Pclass" ).Age.transform('median')
test.loc[test.Age.isnull() ,'Age']=test.groupby("Pclass" ).Age.transform('median')
print(train['Age'].isnull().sum() )<categorify>
|
dataset_train = TaskDataset(train)
dataloader_train = DataLoader(dataset_train, batch_size=128, shuffle=True)
dataset_val = TaskDataset(val)
dataloader_val = DataLoader(dataset_val, batch_size=128, shuffle=False)
dataset_test = TaskDataset(test)
dataloader_test = DataLoader(dataset_test, batch_size=128, shuffle=False )
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
category = {'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'X':8, 'T':9}
train['Cabin'] = train['Cabin'].map(category)
train['Cabin'].unique().tolist()<count_missing_values>
|
runner = SupervisedRunner(device='cuda', input_key=['stop', 'delta'],)
loaders = {'train': dataloader_train, 'valid': dataloader_val}
model = ReverseModel()
optimizer = Lookahead(RAdam(params=model.parameters() , lr=1e-3))
criterion = {"bce": nn.BCEWithLogitsLoss() }
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.25, patience=2)
callbacks = [
CriterionCallback(input_key='start', prefix="loss", criterion_key="bce"),
EarlyStoppingCallback(patience=5),
]
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
callbacks=callbacks,
logdir="./logs",
num_epochs=999,
main_metric="loss",
minimize_metric=True,
verbose=True,
)
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
print(train.isnull().sum() )<feature_engineering>
|
best_model = ReverseModel().to('cuda')
best_model.load_state_dict(torch.load('logs/checkpoints/best.pth')['model_state_dict'] )
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train["Embarked"] = train["Embarked"].fillna(mode(train["Embarked"]))<count_missing_values>
|
%load_ext Cython
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
print(train.isnull().sum() )<feature_engineering>
|
%%cython
c
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.nonecheck(False)
@cython.wraparound(False)
cdef int calc_neighs(unsigned char[:, :] field, int i, int j, int n, int k):
cdef:
int neighs = 0;
int i_min = i - 1;
int i_pl = i + 1;
int j_min = j - 1;
int j_pl = j + 1;
neighs = 0
if i_min >= 0:
if j_min >= 0:
neighs += field[i_min, j_min]
neighs += field[i_min, j]
if j_pl < k:
neighs += field[i_min, j_pl]
if j_min >= 0:
neighs += field[i, j_min]
if j_pl < k:
neighs += field[i, j_pl]
if i_pl < n:
if j_min >= 0:
neighs += field[i_pl, j_min]
neighs += field[i_pl, j]
if j_pl < k:
neighs += field[i_pl, j_pl]
return neighs
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.nonecheck(False)
@cython.wraparound(False)
cpdef make_move_cython(unsigned char[:, :] field, int moves):
cdef:
int _, i, j, neighs;
int n, k;
int switch = 0;
unsigned char[:, :] cur_field;
unsigned char[:, :] next_field;
cur_field = np.copy(field)
next_field = np.zeros_like(field, 'uint8')
n = field.shape[0]
k = field.shape[1]
for _ in range(moves):
if switch == 0:
for i in range(n):
for j in range(k):
neighs = calc_neighs(cur_field, i, j, n, k)
if cur_field[i, j] and neighs == 2:
next_field[i, j] = 1
elif neighs == 3:
next_field[i, j] = 1
else:
next_field[i, j] = 0
else:
for i in range(n):
for j in range(k):
neighs = calc_neighs(next_field, i, j, n, k)
if next_field[i, j] and neighs == 2:
cur_field[i, j] = 1
elif neighs == 3:
cur_field[i, j] = 1
else:
cur_field[i, j] = 0
switch =(switch + 1)% 2
return np.array(next_field if switch else cur_field)
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train["Sex"][train["Sex"] == "male"] = 0
train["Sex"][train["Sex"] == "female"] = 1
test["Sex"][test["Sex"] == "male"] = 0
test["Sex"][test["Sex"] == "female"] = 1
train["Embarked"][train["Embarked"] == "S"] = 0
train["Embarked"][train["Embarked"] == "C"] = 1
train["Embarked"][train["Embarked"] == "Q"] = 2
test["Embarked"][test["Embarked"] == "S"] = 0
test["Embarked"][test["Embarked"] == "C"] = 1
test["Embarked"][test["Embarked"] == "Q"] = 2
<feature_engineering>
|
def make_move(board: Union[torch.Tensor, np.ndarray], moves: Union[int, List[int]] = 1):
if isinstance(board, torch.Tensor):
return torch.tensor(_evolve_board(board.detach().cpu().numpy() , moves),
dtype=board.dtype, device=board.device)
return np.array(_evolve_board(board, moves), dtype=board.dtype)
def _evolve_board(board, moves):
board = np.array(board ).astype(np.uint8)
if len(board.shape)== 4:
assert board.shape[1:] ==(1, 25, 25)
board_evolved = _move_board_3dim(board[:, 0], moves)[:, None]
elif len(board.shape)== 3:
board_evolved = _move_board_3dim(board, moves)
else:
assert board.shape ==(25, 25)
board_evolved = make_move_cython(board, moves)
return np.array(board_evolved, dtype=np.float32)
def _move_board_3dim(board, moves):
if board.shape ==(1, 25, 25):
board_evolved = make_move_cython(board[0], moves)[:, None]
else:
assert board.shape[1:] ==(25, 25)
if isinstance(moves, int):
moves = [moves] * len(board)
assert len(moves)== len(board)
board_evolved = np.array([make_move_cython(b, move)for b, move in zip(board, moves)])
return board_evolved
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train['fam']=train['SibSp']+train['Parch']+1
test['fam']=test['SibSp']+test['Parch']+1<data_type_conversions>
|
def batch_accuracy(x, y_pred):
return torch.sum(torch.abs(x - y_pred), dim=(1, 2, 3))
def postprocessor(y_pred, x, moves):
thresholds = torch.linspace(0, 1, 11, device=x.device)
errors = torch.cat([batch_accuracy(x.float() ,
make_move(( y_pred > threshold ).float() , moves)
)[None]
for threshold in thresholds],
0)
best_thresholds = thresholds[torch.argmin(errors, 0)][:, None, None, None]
return(y_pred > best_thresholds ).float()
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train['Age']=train['Age'].astype(str)
test['Age']=test['Age'].astype(str )<feature_engineering>
|
def predict_batch(model, batch):
model.eval()
with torch.no_grad() :
prediction = model(batch['stop'], batch['delta'])
prediction = torch.sigmoid(prediction ).detach().cpu().numpy()
return prediction
def predict_loader(model, loader):
predict = [predict_batch(model, batch)for batch in loader]
predict = np.concatenate(predict)
return predict
def validate_loader(model, loader, lb_delta=None, threshold=0.5):
prediction_val = predict_loader(best_model, loader)
y_val = loader.dataset.start.detach().cpu().numpy()
score_unoptimized =(( prediction_val > threshold)== y_val ).mean(axis=(1,2,3))
delta_val = loader.dataset.delta.detach().cpu().numpy()
prediction_val = postprocessor(torch.tensor(prediction_val),
x=loader.dataset.stop.cpu() ,
moves=delta_val ).cpu().numpy()
score =(prediction_val == y_val ).mean(axis=(1,2,3))
print(f'All data accuracy(global threshold): {score_unoptimized.mean() }')
print(f'All data accuracy(optimized threshold): {score.mean() }')
delta_score = {}
for i in range(1, 6):
delta_score[i] = score[delta_val==i].mean()
print(f'delta={i} accuracy: {delta_score[i]}')
if lb_delta is not None:
lb_delta = lb_delta.value_counts(normalize=True)
test_score = sum([lb_delta[i]*delta_score[i] for i in range(1,6)])
print(f'VAL score : {1-score.mean() }')
print(f'LB score estimate: {1-test_score}')
def make_submission(prediction, x, moves, sample_submission_path='/kaggle/input/conways-reverse-game-of-life-2020/sample_submission.csv'):
prediction = postprocessor(prediction, x, moves ).numpy().astype(int ).reshape(-1, 625)
sample_submission = pd.read_csv(sample_submission_path, index_col='id')
sample_submission.iloc[:] = prediction
return sample_submission
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
cat={'2':1, '3':2 , '5':3, '1':4 ,'4':5,'8':6,'6':7,'7':8,'0':9,'9':10}
test['Age']=test['Age'].map(cat)
test['Age'].unique().tolist()<feature_engineering>
|
validate_loader(best_model, dataloader_val, test['delta'] )
|
Conway's Reverse Game of Life 2020
|
11,632,795 |
train['Title'] = train['Name'].map(lambda x: re.compile("([A-Za-z]+)\." ).search(x ).group())
test['Title'] = test['Name'].map(lambda x: re.compile("([A-Za-z]+)\." ).search(x ).group())
print(train['Title'].unique())
<feature_engineering>
|
prediction_test = predict_loader(best_model, dataloader_test)
submission = make_submission(torch.tensor(prediction_test),
x=dataloader_test.dataset.stop.cpu() ,
moves=dataloader_test.dataset.delta.detach().cpu().numpy())
submission.to_csv('submission.csv')
submission.head()
|
Conway's Reverse Game of Life 2020
|
11,614,110 |
train['Title'] = train['Title'].replace(['Lady.', 'Capt.', 'Col.',
'Don.', 'Dr.', 'Major.', 'Rev.', 'Jonkheer.', 'Dona.'], 'Rare.')
train['Title'] = train['Title'].replace(['Countess.', 'Lady', 'Sir'], 'Royal')
train['Title'] = train['Title'].replace('Mlle.', 'Miss.')
train['Title'] = train['Title'].replace('Ms.', 'Miss.')
train['Title'] = train['Title'].replace('Mme.', 'Mrs.')
train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<feature_engineering>
|
sample = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/sample_submission.csv')
test = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/test.csv')
train = pd.read_csv('/kaggle/input/conways-reverse-game-of-life-2020/train.csv')
|
Conway's Reverse Game of Life 2020
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.